lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:   Fri, 22 Apr 2022 15:57:43 +0100
From:   Martin Habets <habetsm.xilinx@...il.com>
To:     kuba@...nel.org, pabeni@...hat.com, davem@...emloft.net
Cc:     netdev@...r.kernel.org, ecree.xilinx@...il.com
Subject: [PATCH net-next 03/28] sfc: Copy shared files needed for Siena

From: Martin Habets <martinh@...inx.com>

No changes are done, those will be done with subsequent commits.

Signed-off-by: Martin Habets <habetsm.xilinx@...il.com>
---
 drivers/net/ethernet/sfc/siena/bitfield.h         |  614 +
 drivers/net/ethernet/sfc/siena/efx.c              | 1335 +
 drivers/net/ethernet/sfc/siena/efx.h              |  236 
 drivers/net/ethernet/sfc/siena/efx_channels.c     | 1352 +
 drivers/net/ethernet/sfc/siena/efx_channels.h     |   52 
 drivers/net/ethernet/sfc/siena/efx_common.c       | 1396 +
 drivers/net/ethernet/sfc/siena/efx_common.h       |  116 
 drivers/net/ethernet/sfc/siena/enum.h             |  176 
 drivers/net/ethernet/sfc/siena/ethtool.c          |  282 
 drivers/net/ethernet/sfc/siena/ethtool_common.c   | 1338 +
 drivers/net/ethernet/sfc/siena/ethtool_common.h   |   63 
 drivers/net/ethernet/sfc/siena/farch_regs.h       | 2929 +++
 drivers/net/ethernet/sfc/siena/filter.h           |  309 
 drivers/net/ethernet/sfc/siena/io.h               |  310 
 drivers/net/ethernet/sfc/siena/mcdi.c             | 2375 ++
 drivers/net/ethernet/sfc/siena/mcdi.h             |  388 
 drivers/net/ethernet/sfc/siena/mcdi_mon.c         |  531 +
 drivers/net/ethernet/sfc/siena/mcdi_pcol.h        |21968 +++++++++++++++++++++
 drivers/net/ethernet/sfc/siena/mcdi_port.c        |  117 
 drivers/net/ethernet/sfc/siena/mcdi_port.h        |   18 
 drivers/net/ethernet/sfc/siena/mcdi_port_common.c | 1301 +
 drivers/net/ethernet/sfc/siena/mcdi_port_common.h |   67 
 drivers/net/ethernet/sfc/siena/mtd.c              |  124 
 drivers/net/ethernet/sfc/siena/net_driver.h       | 1716 ++
 drivers/net/ethernet/sfc/siena/nic.c              |  580 +
 drivers/net/ethernet/sfc/siena/nic.h              |  392 
 drivers/net/ethernet/sfc/siena/nic_common.h       |  262 
 drivers/net/ethernet/sfc/siena/ptp.c              | 2210 ++
 drivers/net/ethernet/sfc/siena/ptp.h              |   45 
 drivers/net/ethernet/sfc/siena/rx.c               |  399 
 drivers/net/ethernet/sfc/siena/rx_common.c        | 1083 +
 drivers/net/ethernet/sfc/siena/rx_common.h        |  116 
 drivers/net/ethernet/sfc/siena/selftest.c         |  807 +
 drivers/net/ethernet/sfc/siena/selftest.h         |   52 
 drivers/net/ethernet/sfc/siena/sriov.c            |   72 
 drivers/net/ethernet/sfc/siena/sriov.h            |   25 
 drivers/net/ethernet/sfc/siena/tx.c               |  639 +
 drivers/net/ethernet/sfc/siena/tx.h               |   47 
 drivers/net/ethernet/sfc/siena/tx_common.c        |  447 
 drivers/net/ethernet/sfc/siena/tx_common.h        |   45 
 drivers/net/ethernet/sfc/siena/vfdi.h             |  252 
 drivers/net/ethernet/sfc/siena/workarounds.h      |   34 
 42 files changed, 46620 insertions(+)
 create mode 100644 drivers/net/ethernet/sfc/siena/bitfield.h
 create mode 100644 drivers/net/ethernet/sfc/siena/efx.c
 create mode 100644 drivers/net/ethernet/sfc/siena/efx.h
 create mode 100644 drivers/net/ethernet/sfc/siena/efx_channels.c
 create mode 100644 drivers/net/ethernet/sfc/siena/efx_channels.h
 create mode 100644 drivers/net/ethernet/sfc/siena/efx_common.c
 create mode 100644 drivers/net/ethernet/sfc/siena/efx_common.h
 create mode 100644 drivers/net/ethernet/sfc/siena/enum.h
 create mode 100644 drivers/net/ethernet/sfc/siena/ethtool.c
 create mode 100644 drivers/net/ethernet/sfc/siena/ethtool_common.c
 create mode 100644 drivers/net/ethernet/sfc/siena/ethtool_common.h
 create mode 100644 drivers/net/ethernet/sfc/siena/farch_regs.h
 create mode 100644 drivers/net/ethernet/sfc/siena/filter.h
 create mode 100644 drivers/net/ethernet/sfc/siena/io.h
 create mode 100644 drivers/net/ethernet/sfc/siena/mcdi.c
 create mode 100644 drivers/net/ethernet/sfc/siena/mcdi.h
 create mode 100644 drivers/net/ethernet/sfc/siena/mcdi_mon.c
 create mode 100644 drivers/net/ethernet/sfc/siena/mcdi_pcol.h
 create mode 100644 drivers/net/ethernet/sfc/siena/mcdi_port.c
 create mode 100644 drivers/net/ethernet/sfc/siena/mcdi_port.h
 create mode 100644 drivers/net/ethernet/sfc/siena/mcdi_port_common.c
 create mode 100644 drivers/net/ethernet/sfc/siena/mcdi_port_common.h
 create mode 100644 drivers/net/ethernet/sfc/siena/mtd.c
 create mode 100644 drivers/net/ethernet/sfc/siena/net_driver.h
 create mode 100644 drivers/net/ethernet/sfc/siena/nic.c
 create mode 100644 drivers/net/ethernet/sfc/siena/nic.h
 create mode 100644 drivers/net/ethernet/sfc/siena/nic_common.h
 create mode 100644 drivers/net/ethernet/sfc/siena/ptp.c
 create mode 100644 drivers/net/ethernet/sfc/siena/ptp.h
 create mode 100644 drivers/net/ethernet/sfc/siena/rx.c
 create mode 100644 drivers/net/ethernet/sfc/siena/rx_common.c
 create mode 100644 drivers/net/ethernet/sfc/siena/rx_common.h
 create mode 100644 drivers/net/ethernet/sfc/siena/selftest.c
 create mode 100644 drivers/net/ethernet/sfc/siena/selftest.h
 create mode 100644 drivers/net/ethernet/sfc/siena/sriov.c
 create mode 100644 drivers/net/ethernet/sfc/siena/sriov.h
 create mode 100644 drivers/net/ethernet/sfc/siena/tx.c
 create mode 100644 drivers/net/ethernet/sfc/siena/tx.h
 create mode 100644 drivers/net/ethernet/sfc/siena/tx_common.c
 create mode 100644 drivers/net/ethernet/sfc/siena/tx_common.h
 create mode 100644 drivers/net/ethernet/sfc/siena/vfdi.h
 create mode 100644 drivers/net/ethernet/sfc/siena/workarounds.h

diff --git a/drivers/net/ethernet/sfc/siena/bitfield.h b/drivers/net/ethernet/sfc/siena/bitfield.h
new file mode 100644
index 000000000000..1f981dfe4bdc
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/bitfield.h
@@ -0,0 +1,614 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2013 Solarflare Communications Inc.
+ */
+
+#ifndef EFX_BITFIELD_H
+#define EFX_BITFIELD_H
+
+/*
+ * Efx bitfield access
+ *
+ * Efx NICs make extensive use of bitfields up to 128 bits
+ * wide.  Since there is no native 128-bit datatype on most systems,
+ * and since 64-bit datatypes are inefficient on 32-bit systems and
+ * vice versa, we wrap accesses in a way that uses the most efficient
+ * datatype.
+ *
+ * The NICs are PCI devices and therefore little-endian.  Since most
+ * of the quantities that we deal with are DMAed to/from host memory,
+ * we define our datatypes (efx_oword_t, efx_qword_t and
+ * efx_dword_t) to be little-endian.
+ */
+
+/* Lowest bit numbers and widths */
+#define EFX_DUMMY_FIELD_LBN 0
+#define EFX_DUMMY_FIELD_WIDTH 0
+#define EFX_WORD_0_LBN 0
+#define EFX_WORD_0_WIDTH 16
+#define EFX_WORD_1_LBN 16
+#define EFX_WORD_1_WIDTH 16
+#define EFX_DWORD_0_LBN 0
+#define EFX_DWORD_0_WIDTH 32
+#define EFX_DWORD_1_LBN 32
+#define EFX_DWORD_1_WIDTH 32
+#define EFX_DWORD_2_LBN 64
+#define EFX_DWORD_2_WIDTH 32
+#define EFX_DWORD_3_LBN 96
+#define EFX_DWORD_3_WIDTH 32
+#define EFX_QWORD_0_LBN 0
+#define EFX_QWORD_0_WIDTH 64
+
+/* Specified attribute (e.g. LBN) of the specified field */
+#define EFX_VAL(field, attribute) field ## _ ## attribute
+/* Low bit number of the specified field */
+#define EFX_LOW_BIT(field) EFX_VAL(field, LBN)
+/* Bit width of the specified field */
+#define EFX_WIDTH(field) EFX_VAL(field, WIDTH)
+/* High bit number of the specified field */
+#define EFX_HIGH_BIT(field) (EFX_LOW_BIT(field) + EFX_WIDTH(field) - 1)
+/* Mask equal in width to the specified field.
+ *
+ * For example, a field with width 5 would have a mask of 0x1f.
+ *
+ * The maximum width mask that can be generated is 64 bits.
+ */
+#define EFX_MASK64(width)			\
+	((width) == 64 ? ~((u64) 0) :		\
+	 (((((u64) 1) << (width))) - 1))
+
+/* Mask equal in width to the specified field.
+ *
+ * For example, a field with width 5 would have a mask of 0x1f.
+ *
+ * The maximum width mask that can be generated is 32 bits.  Use
+ * EFX_MASK64 for higher width fields.
+ */
+#define EFX_MASK32(width)			\
+	((width) == 32 ? ~((u32) 0) :		\
+	 (((((u32) 1) << (width))) - 1))
+
+/* A doubleword (i.e. 4 byte) datatype - little-endian in HW */
+typedef union efx_dword {
+	__le32 u32[1];
+} efx_dword_t;
+
+/* A quadword (i.e. 8 byte) datatype - little-endian in HW */
+typedef union efx_qword {
+	__le64 u64[1];
+	__le32 u32[2];
+	efx_dword_t dword[2];
+} efx_qword_t;
+
+/* An octword (eight-word, i.e. 16 byte) datatype - little-endian in HW */
+typedef union efx_oword {
+	__le64 u64[2];
+	efx_qword_t qword[2];
+	__le32 u32[4];
+	efx_dword_t dword[4];
+} efx_oword_t;
+
+/* Format string and value expanders for printk */
+#define EFX_DWORD_FMT "%08x"
+#define EFX_QWORD_FMT "%08x:%08x"
+#define EFX_OWORD_FMT "%08x:%08x:%08x:%08x"
+#define EFX_DWORD_VAL(dword)				\
+	((unsigned int) le32_to_cpu((dword).u32[0]))
+#define EFX_QWORD_VAL(qword)				\
+	((unsigned int) le32_to_cpu((qword).u32[1])),	\
+	((unsigned int) le32_to_cpu((qword).u32[0]))
+#define EFX_OWORD_VAL(oword)				\
+	((unsigned int) le32_to_cpu((oword).u32[3])),	\
+	((unsigned int) le32_to_cpu((oword).u32[2])),	\
+	((unsigned int) le32_to_cpu((oword).u32[1])),	\
+	((unsigned int) le32_to_cpu((oword).u32[0]))
+
+/*
+ * Extract bit field portion [low,high) from the native-endian element
+ * which contains bits [min,max).
+ *
+ * For example, suppose "element" represents the high 32 bits of a
+ * 64-bit value, and we wish to extract the bits belonging to the bit
+ * field occupying bits 28-45 of this 64-bit value.
+ *
+ * Then EFX_EXTRACT ( element, 32, 63, 28, 45 ) would give
+ *
+ *   ( element ) << 4
+ *
+ * The result will contain the relevant bits filled in in the range
+ * [0,high-low), with garbage in bits [high-low+1,...).
+ */
+#define EFX_EXTRACT_NATIVE(native_element, min, max, low, high)		\
+	((low) > (max) || (high) < (min) ? 0 :				\
+	 (low) > (min) ?						\
+	 (native_element) >> ((low) - (min)) :				\
+	 (native_element) << ((min) - (low)))
+
+/*
+ * Extract bit field portion [low,high) from the 64-bit little-endian
+ * element which contains bits [min,max)
+ */
+#define EFX_EXTRACT64(element, min, max, low, high)			\
+	EFX_EXTRACT_NATIVE(le64_to_cpu(element), min, max, low, high)
+
+/*
+ * Extract bit field portion [low,high) from the 32-bit little-endian
+ * element which contains bits [min,max)
+ */
+#define EFX_EXTRACT32(element, min, max, low, high)			\
+	EFX_EXTRACT_NATIVE(le32_to_cpu(element), min, max, low, high)
+
+#define EFX_EXTRACT_OWORD64(oword, low, high)				\
+	((EFX_EXTRACT64((oword).u64[0], 0, 63, low, high) |		\
+	  EFX_EXTRACT64((oword).u64[1], 64, 127, low, high)) &		\
+	 EFX_MASK64((high) + 1 - (low)))
+
+#define EFX_EXTRACT_QWORD64(qword, low, high)				\
+	(EFX_EXTRACT64((qword).u64[0], 0, 63, low, high) &		\
+	 EFX_MASK64((high) + 1 - (low)))
+
+#define EFX_EXTRACT_OWORD32(oword, low, high)				\
+	((EFX_EXTRACT32((oword).u32[0], 0, 31, low, high) |		\
+	  EFX_EXTRACT32((oword).u32[1], 32, 63, low, high) |		\
+	  EFX_EXTRACT32((oword).u32[2], 64, 95, low, high) |		\
+	  EFX_EXTRACT32((oword).u32[3], 96, 127, low, high)) &		\
+	 EFX_MASK32((high) + 1 - (low)))
+
+#define EFX_EXTRACT_QWORD32(qword, low, high)				\
+	((EFX_EXTRACT32((qword).u32[0], 0, 31, low, high) |		\
+	  EFX_EXTRACT32((qword).u32[1], 32, 63, low, high)) &		\
+	 EFX_MASK32((high) + 1 - (low)))
+
+#define EFX_EXTRACT_DWORD(dword, low, high)			\
+	(EFX_EXTRACT32((dword).u32[0], 0, 31, low, high) &	\
+	 EFX_MASK32((high) + 1 - (low)))
+
+#define EFX_OWORD_FIELD64(oword, field)				\
+	EFX_EXTRACT_OWORD64(oword, EFX_LOW_BIT(field),		\
+			    EFX_HIGH_BIT(field))
+
+#define EFX_QWORD_FIELD64(qword, field)				\
+	EFX_EXTRACT_QWORD64(qword, EFX_LOW_BIT(field),		\
+			    EFX_HIGH_BIT(field))
+
+#define EFX_OWORD_FIELD32(oword, field)				\
+	EFX_EXTRACT_OWORD32(oword, EFX_LOW_BIT(field),		\
+			    EFX_HIGH_BIT(field))
+
+#define EFX_QWORD_FIELD32(qword, field)				\
+	EFX_EXTRACT_QWORD32(qword, EFX_LOW_BIT(field),		\
+			    EFX_HIGH_BIT(field))
+
+#define EFX_DWORD_FIELD(dword, field)				\
+	EFX_EXTRACT_DWORD(dword, EFX_LOW_BIT(field),		\
+			  EFX_HIGH_BIT(field))
+
+#define EFX_OWORD_IS_ZERO64(oword)					\
+	(((oword).u64[0] | (oword).u64[1]) == (__force __le64) 0)
+
+#define EFX_QWORD_IS_ZERO64(qword)					\
+	(((qword).u64[0]) == (__force __le64) 0)
+
+#define EFX_OWORD_IS_ZERO32(oword)					     \
+	(((oword).u32[0] | (oword).u32[1] | (oword).u32[2] | (oword).u32[3]) \
+	 == (__force __le32) 0)
+
+#define EFX_QWORD_IS_ZERO32(qword)					\
+	(((qword).u32[0] | (qword).u32[1]) == (__force __le32) 0)
+
+#define EFX_DWORD_IS_ZERO(dword)					\
+	(((dword).u32[0]) == (__force __le32) 0)
+
+#define EFX_OWORD_IS_ALL_ONES64(oword)					\
+	(((oword).u64[0] & (oword).u64[1]) == ~((__force __le64) 0))
+
+#define EFX_QWORD_IS_ALL_ONES64(qword)					\
+	((qword).u64[0] == ~((__force __le64) 0))
+
+#define EFX_OWORD_IS_ALL_ONES32(oword)					\
+	(((oword).u32[0] & (oword).u32[1] & (oword).u32[2] & (oword).u32[3]) \
+	 == ~((__force __le32) 0))
+
+#define EFX_QWORD_IS_ALL_ONES32(qword)					\
+	(((qword).u32[0] & (qword).u32[1]) == ~((__force __le32) 0))
+
+#define EFX_DWORD_IS_ALL_ONES(dword)					\
+	((dword).u32[0] == ~((__force __le32) 0))
+
+#if BITS_PER_LONG == 64
+#define EFX_OWORD_FIELD		EFX_OWORD_FIELD64
+#define EFX_QWORD_FIELD		EFX_QWORD_FIELD64
+#define EFX_OWORD_IS_ZERO	EFX_OWORD_IS_ZERO64
+#define EFX_QWORD_IS_ZERO	EFX_QWORD_IS_ZERO64
+#define EFX_OWORD_IS_ALL_ONES	EFX_OWORD_IS_ALL_ONES64
+#define EFX_QWORD_IS_ALL_ONES	EFX_QWORD_IS_ALL_ONES64
+#else
+#define EFX_OWORD_FIELD		EFX_OWORD_FIELD32
+#define EFX_QWORD_FIELD		EFX_QWORD_FIELD32
+#define EFX_OWORD_IS_ZERO	EFX_OWORD_IS_ZERO32
+#define EFX_QWORD_IS_ZERO	EFX_QWORD_IS_ZERO32
+#define EFX_OWORD_IS_ALL_ONES	EFX_OWORD_IS_ALL_ONES32
+#define EFX_QWORD_IS_ALL_ONES	EFX_QWORD_IS_ALL_ONES32
+#endif
+
+/*
+ * Construct bit field portion
+ *
+ * Creates the portion of the bit field [low,high) that lies within
+ * the range [min,max).
+ */
+#define EFX_INSERT_NATIVE64(min, max, low, high, value)		\
+	(((low > max) || (high < min)) ? 0 :			\
+	 ((low > min) ?						\
+	  (((u64) (value)) << (low - min)) :		\
+	  (((u64) (value)) >> (min - low))))
+
+#define EFX_INSERT_NATIVE32(min, max, low, high, value)		\
+	(((low > max) || (high < min)) ? 0 :			\
+	 ((low > min) ?						\
+	  (((u32) (value)) << (low - min)) :		\
+	  (((u32) (value)) >> (min - low))))
+
+#define EFX_INSERT_NATIVE(min, max, low, high, value)		\
+	((((max - min) >= 32) || ((high - low) >= 32)) ?	\
+	 EFX_INSERT_NATIVE64(min, max, low, high, value) :	\
+	 EFX_INSERT_NATIVE32(min, max, low, high, value))
+
+/*
+ * Construct bit field portion
+ *
+ * Creates the portion of the named bit field that lies within the
+ * range [min,max).
+ */
+#define EFX_INSERT_FIELD_NATIVE(min, max, field, value)		\
+	EFX_INSERT_NATIVE(min, max, EFX_LOW_BIT(field),		\
+			  EFX_HIGH_BIT(field), value)
+
+/*
+ * Construct bit field
+ *
+ * Creates the portion of the named bit fields that lie within the
+ * range [min,max).
+ */
+#define EFX_INSERT_FIELDS_NATIVE(min, max,				\
+				 field1, value1,			\
+				 field2, value2,			\
+				 field3, value3,			\
+				 field4, value4,			\
+				 field5, value5,			\
+				 field6, value6,			\
+				 field7, value7,			\
+				 field8, value8,			\
+				 field9, value9,			\
+				 field10, value10,			\
+				 field11, value11,			\
+				 field12, value12,			\
+				 field13, value13,			\
+				 field14, value14,			\
+				 field15, value15,			\
+				 field16, value16,			\
+				 field17, value17,			\
+				 field18, value18,			\
+				 field19, value19)			\
+	(EFX_INSERT_FIELD_NATIVE((min), (max), field1, (value1)) |	\
+	 EFX_INSERT_FIELD_NATIVE((min), (max), field2, (value2)) |	\
+	 EFX_INSERT_FIELD_NATIVE((min), (max), field3, (value3)) |	\
+	 EFX_INSERT_FIELD_NATIVE((min), (max), field4, (value4)) |	\
+	 EFX_INSERT_FIELD_NATIVE((min), (max), field5, (value5)) |	\
+	 EFX_INSERT_FIELD_NATIVE((min), (max), field6, (value6)) |	\
+	 EFX_INSERT_FIELD_NATIVE((min), (max), field7, (value7)) |	\
+	 EFX_INSERT_FIELD_NATIVE((min), (max), field8, (value8)) |	\
+	 EFX_INSERT_FIELD_NATIVE((min), (max), field9, (value9)) |	\
+	 EFX_INSERT_FIELD_NATIVE((min), (max), field10, (value10)) |	\
+	 EFX_INSERT_FIELD_NATIVE((min), (max), field11, (value11)) |	\
+	 EFX_INSERT_FIELD_NATIVE((min), (max), field12, (value12)) |	\
+	 EFX_INSERT_FIELD_NATIVE((min), (max), field13, (value13)) |	\
+	 EFX_INSERT_FIELD_NATIVE((min), (max), field14, (value14)) |	\
+	 EFX_INSERT_FIELD_NATIVE((min), (max), field15, (value15)) |	\
+	 EFX_INSERT_FIELD_NATIVE((min), (max), field16, (value16)) |	\
+	 EFX_INSERT_FIELD_NATIVE((min), (max), field17, (value17)) |	\
+	 EFX_INSERT_FIELD_NATIVE((min), (max), field18, (value18)) |	\
+	 EFX_INSERT_FIELD_NATIVE((min), (max), field19, (value19)))
+
+#define EFX_INSERT_FIELDS64(...)				\
+	cpu_to_le64(EFX_INSERT_FIELDS_NATIVE(__VA_ARGS__))
+
+#define EFX_INSERT_FIELDS32(...)				\
+	cpu_to_le32(EFX_INSERT_FIELDS_NATIVE(__VA_ARGS__))
+
+#define EFX_POPULATE_OWORD64(oword, ...) do {				\
+	(oword).u64[0] = EFX_INSERT_FIELDS64(0, 63, __VA_ARGS__);	\
+	(oword).u64[1] = EFX_INSERT_FIELDS64(64, 127, __VA_ARGS__);	\
+	} while (0)
+
+#define EFX_POPULATE_QWORD64(qword, ...) do {				\
+	(qword).u64[0] = EFX_INSERT_FIELDS64(0, 63, __VA_ARGS__);	\
+	} while (0)
+
+#define EFX_POPULATE_OWORD32(oword, ...) do {				\
+	(oword).u32[0] = EFX_INSERT_FIELDS32(0, 31, __VA_ARGS__);	\
+	(oword).u32[1] = EFX_INSERT_FIELDS32(32, 63, __VA_ARGS__);	\
+	(oword).u32[2] = EFX_INSERT_FIELDS32(64, 95, __VA_ARGS__);	\
+	(oword).u32[3] = EFX_INSERT_FIELDS32(96, 127, __VA_ARGS__);	\
+	} while (0)
+
+#define EFX_POPULATE_QWORD32(qword, ...) do {				\
+	(qword).u32[0] = EFX_INSERT_FIELDS32(0, 31, __VA_ARGS__);	\
+	(qword).u32[1] = EFX_INSERT_FIELDS32(32, 63, __VA_ARGS__);	\
+	} while (0)
+
+#define EFX_POPULATE_DWORD(dword, ...) do {				\
+	(dword).u32[0] = EFX_INSERT_FIELDS32(0, 31, __VA_ARGS__);	\
+	} while (0)
+
+#if BITS_PER_LONG == 64
+#define EFX_POPULATE_OWORD EFX_POPULATE_OWORD64
+#define EFX_POPULATE_QWORD EFX_POPULATE_QWORD64
+#else
+#define EFX_POPULATE_OWORD EFX_POPULATE_OWORD32
+#define EFX_POPULATE_QWORD EFX_POPULATE_QWORD32
+#endif
+
+/* Populate an octword field with various numbers of arguments */
+#define EFX_POPULATE_OWORD_19 EFX_POPULATE_OWORD
+#define EFX_POPULATE_OWORD_18(oword, ...) \
+	EFX_POPULATE_OWORD_19(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_OWORD_17(oword, ...) \
+	EFX_POPULATE_OWORD_18(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_OWORD_16(oword, ...) \
+	EFX_POPULATE_OWORD_17(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_OWORD_15(oword, ...) \
+	EFX_POPULATE_OWORD_16(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_OWORD_14(oword, ...) \
+	EFX_POPULATE_OWORD_15(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_OWORD_13(oword, ...) \
+	EFX_POPULATE_OWORD_14(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_OWORD_12(oword, ...) \
+	EFX_POPULATE_OWORD_13(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_OWORD_11(oword, ...) \
+	EFX_POPULATE_OWORD_12(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_OWORD_10(oword, ...) \
+	EFX_POPULATE_OWORD_11(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_OWORD_9(oword, ...) \
+	EFX_POPULATE_OWORD_10(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_OWORD_8(oword, ...) \
+	EFX_POPULATE_OWORD_9(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_OWORD_7(oword, ...) \
+	EFX_POPULATE_OWORD_8(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_OWORD_6(oword, ...) \
+	EFX_POPULATE_OWORD_7(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_OWORD_5(oword, ...) \
+	EFX_POPULATE_OWORD_6(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_OWORD_4(oword, ...) \
+	EFX_POPULATE_OWORD_5(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_OWORD_3(oword, ...) \
+	EFX_POPULATE_OWORD_4(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_OWORD_2(oword, ...) \
+	EFX_POPULATE_OWORD_3(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_OWORD_1(oword, ...) \
+	EFX_POPULATE_OWORD_2(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_ZERO_OWORD(oword) \
+	EFX_POPULATE_OWORD_1(oword, EFX_DUMMY_FIELD, 0)
+#define EFX_SET_OWORD(oword) \
+	EFX_POPULATE_OWORD_4(oword, \
+			     EFX_DWORD_0, 0xffffffff, \
+			     EFX_DWORD_1, 0xffffffff, \
+			     EFX_DWORD_2, 0xffffffff, \
+			     EFX_DWORD_3, 0xffffffff)
+
+/* Populate a quadword field with various numbers of arguments */
+#define EFX_POPULATE_QWORD_19 EFX_POPULATE_QWORD
+#define EFX_POPULATE_QWORD_18(qword, ...) \
+	EFX_POPULATE_QWORD_19(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_QWORD_17(qword, ...) \
+	EFX_POPULATE_QWORD_18(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_QWORD_16(qword, ...) \
+	EFX_POPULATE_QWORD_17(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_QWORD_15(qword, ...) \
+	EFX_POPULATE_QWORD_16(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_QWORD_14(qword, ...) \
+	EFX_POPULATE_QWORD_15(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_QWORD_13(qword, ...) \
+	EFX_POPULATE_QWORD_14(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_QWORD_12(qword, ...) \
+	EFX_POPULATE_QWORD_13(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_QWORD_11(qword, ...) \
+	EFX_POPULATE_QWORD_12(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_QWORD_10(qword, ...) \
+	EFX_POPULATE_QWORD_11(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_QWORD_9(qword, ...) \
+	EFX_POPULATE_QWORD_10(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_QWORD_8(qword, ...) \
+	EFX_POPULATE_QWORD_9(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_QWORD_7(qword, ...) \
+	EFX_POPULATE_QWORD_8(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_QWORD_6(qword, ...) \
+	EFX_POPULATE_QWORD_7(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_QWORD_5(qword, ...) \
+	EFX_POPULATE_QWORD_6(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_QWORD_4(qword, ...) \
+	EFX_POPULATE_QWORD_5(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_QWORD_3(qword, ...) \
+	EFX_POPULATE_QWORD_4(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_QWORD_2(qword, ...) \
+	EFX_POPULATE_QWORD_3(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_QWORD_1(qword, ...) \
+	EFX_POPULATE_QWORD_2(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_ZERO_QWORD(qword) \
+	EFX_POPULATE_QWORD_1(qword, EFX_DUMMY_FIELD, 0)
+#define EFX_SET_QWORD(qword) \
+	EFX_POPULATE_QWORD_2(qword, \
+			     EFX_DWORD_0, 0xffffffff, \
+			     EFX_DWORD_1, 0xffffffff)
+
+/* Populate a dword field with various numbers of arguments */
+#define EFX_POPULATE_DWORD_19 EFX_POPULATE_DWORD
+#define EFX_POPULATE_DWORD_18(dword, ...) \
+	EFX_POPULATE_DWORD_19(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_DWORD_17(dword, ...) \
+	EFX_POPULATE_DWORD_18(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_DWORD_16(dword, ...) \
+	EFX_POPULATE_DWORD_17(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_DWORD_15(dword, ...) \
+	EFX_POPULATE_DWORD_16(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_DWORD_14(dword, ...) \
+	EFX_POPULATE_DWORD_15(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_DWORD_13(dword, ...) \
+	EFX_POPULATE_DWORD_14(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_DWORD_12(dword, ...) \
+	EFX_POPULATE_DWORD_13(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_DWORD_11(dword, ...) \
+	EFX_POPULATE_DWORD_12(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_DWORD_10(dword, ...) \
+	EFX_POPULATE_DWORD_11(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_DWORD_9(dword, ...) \
+	EFX_POPULATE_DWORD_10(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_DWORD_8(dword, ...) \
+	EFX_POPULATE_DWORD_9(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_DWORD_7(dword, ...) \
+	EFX_POPULATE_DWORD_8(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_DWORD_6(dword, ...) \
+	EFX_POPULATE_DWORD_7(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_DWORD_5(dword, ...) \
+	EFX_POPULATE_DWORD_6(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_DWORD_4(dword, ...) \
+	EFX_POPULATE_DWORD_5(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_DWORD_3(dword, ...) \
+	EFX_POPULATE_DWORD_4(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_DWORD_2(dword, ...) \
+	EFX_POPULATE_DWORD_3(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_DWORD_1(dword, ...) \
+	EFX_POPULATE_DWORD_2(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_ZERO_DWORD(dword) \
+	EFX_POPULATE_DWORD_1(dword, EFX_DUMMY_FIELD, 0)
+#define EFX_SET_DWORD(dword) \
+	EFX_POPULATE_DWORD_1(dword, EFX_DWORD_0, 0xffffffff)
+
+/*
+ * Modify a named field within an already-populated structure.  Used
+ * for read-modify-write operations.
+ *
+ */
+#define EFX_INVERT_OWORD(oword) do {		\
+	(oword).u64[0] = ~((oword).u64[0]);	\
+	(oword).u64[1] = ~((oword).u64[1]);	\
+	} while (0)
+
+#define EFX_AND_OWORD(oword, from, mask)			\
+	do {							\
+		(oword).u64[0] = (from).u64[0] & (mask).u64[0];	\
+		(oword).u64[1] = (from).u64[1] & (mask).u64[1];	\
+	} while (0)
+
+#define EFX_AND_QWORD(qword, from, mask)			\
+		(qword).u64[0] = (from).u64[0] & (mask).u64[0]
+
+#define EFX_OR_OWORD(oword, from, mask)				\
+	do {							\
+		(oword).u64[0] = (from).u64[0] | (mask).u64[0];	\
+		(oword).u64[1] = (from).u64[1] | (mask).u64[1];	\
+	} while (0)
+
+#define EFX_INSERT64(min, max, low, high, value)			\
+	cpu_to_le64(EFX_INSERT_NATIVE(min, max, low, high, value))
+
+#define EFX_INSERT32(min, max, low, high, value)			\
+	cpu_to_le32(EFX_INSERT_NATIVE(min, max, low, high, value))
+
+#define EFX_INPLACE_MASK64(min, max, low, high)				\
+	EFX_INSERT64(min, max, low, high, EFX_MASK64((high) + 1 - (low)))
+
+#define EFX_INPLACE_MASK32(min, max, low, high)				\
+	EFX_INSERT32(min, max, low, high, EFX_MASK32((high) + 1 - (low)))
+
+#define EFX_SET_OWORD64(oword, low, high, value) do {			\
+	(oword).u64[0] = (((oword).u64[0]				\
+			   & ~EFX_INPLACE_MASK64(0,  63, low, high))	\
+			  | EFX_INSERT64(0,  63, low, high, value));	\
+	(oword).u64[1] = (((oword).u64[1]				\
+			   & ~EFX_INPLACE_MASK64(64, 127, low, high))	\
+			  | EFX_INSERT64(64, 127, low, high, value));	\
+	} while (0)
+
+#define EFX_SET_QWORD64(qword, low, high, value) do {			\
+	(qword).u64[0] = (((qword).u64[0]				\
+			   & ~EFX_INPLACE_MASK64(0, 63, low, high))	\
+			  | EFX_INSERT64(0, 63, low, high, value));	\
+	} while (0)
+
+#define EFX_SET_OWORD32(oword, low, high, value) do {			\
+	(oword).u32[0] = (((oword).u32[0]				\
+			   & ~EFX_INPLACE_MASK32(0, 31, low, high))	\
+			  | EFX_INSERT32(0, 31, low, high, value));	\
+	(oword).u32[1] = (((oword).u32[1]				\
+			   & ~EFX_INPLACE_MASK32(32, 63, low, high))	\
+			  | EFX_INSERT32(32, 63, low, high, value));	\
+	(oword).u32[2] = (((oword).u32[2]				\
+			   & ~EFX_INPLACE_MASK32(64, 95, low, high))	\
+			  | EFX_INSERT32(64, 95, low, high, value));	\
+	(oword).u32[3] = (((oword).u32[3]				\
+			   & ~EFX_INPLACE_MASK32(96, 127, low, high))	\
+			  | EFX_INSERT32(96, 127, low, high, value));	\
+	} while (0)
+
+#define EFX_SET_QWORD32(qword, low, high, value) do {			\
+	(qword).u32[0] = (((qword).u32[0]				\
+			   & ~EFX_INPLACE_MASK32(0, 31, low, high))	\
+			  | EFX_INSERT32(0, 31, low, high, value));	\
+	(qword).u32[1] = (((qword).u32[1]				\
+			   & ~EFX_INPLACE_MASK32(32, 63, low, high))	\
+			  | EFX_INSERT32(32, 63, low, high, value));	\
+	} while (0)
+
+#define EFX_SET_DWORD32(dword, low, high, value) do {			\
+	(dword).u32[0] = (((dword).u32[0]				\
+			   & ~EFX_INPLACE_MASK32(0, 31, low, high))	\
+			  | EFX_INSERT32(0, 31, low, high, value));	\
+	} while (0)
+
+#define EFX_SET_OWORD_FIELD64(oword, field, value)			\
+	EFX_SET_OWORD64(oword, EFX_LOW_BIT(field),			\
+			 EFX_HIGH_BIT(field), value)
+
+#define EFX_SET_QWORD_FIELD64(qword, field, value)			\
+	EFX_SET_QWORD64(qword, EFX_LOW_BIT(field),			\
+			 EFX_HIGH_BIT(field), value)
+
+#define EFX_SET_OWORD_FIELD32(oword, field, value)			\
+	EFX_SET_OWORD32(oword, EFX_LOW_BIT(field),			\
+			 EFX_HIGH_BIT(field), value)
+
+#define EFX_SET_QWORD_FIELD32(qword, field, value)			\
+	EFX_SET_QWORD32(qword, EFX_LOW_BIT(field),			\
+			 EFX_HIGH_BIT(field), value)
+
+#define EFX_SET_DWORD_FIELD(dword, field, value)			\
+	EFX_SET_DWORD32(dword, EFX_LOW_BIT(field),			\
+			 EFX_HIGH_BIT(field), value)
+
+
+
+#if BITS_PER_LONG == 64
+#define EFX_SET_OWORD_FIELD EFX_SET_OWORD_FIELD64
+#define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD64
+#else
+#define EFX_SET_OWORD_FIELD EFX_SET_OWORD_FIELD32
+#define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD32
+#endif
+
+/* Used to avoid compiler warnings about shift range exceeding width
+ * of the data types when dma_addr_t is only 32 bits wide.
+ */
+#define DMA_ADDR_T_WIDTH	(8 * sizeof(dma_addr_t))
+#define EFX_DMA_TYPE_WIDTH(width) \
+	(((width) < DMA_ADDR_T_WIDTH) ? (width) : DMA_ADDR_T_WIDTH)
+
+
+/* Static initialiser */
+#define EFX_OWORD32(a, b, c, d)				\
+	{ .u32 = { cpu_to_le32(a), cpu_to_le32(b),	\
+		   cpu_to_le32(c), cpu_to_le32(d) } }
+
+#endif /* EFX_BITFIELD_H */
diff --git a/drivers/net/ethernet/sfc/siena/efx.c b/drivers/net/ethernet/sfc/siena/efx.c
new file mode 100644
index 000000000000..5e7fe75cb1d4
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/efx.c
@@ -0,0 +1,1335 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2005-2013 Solarflare Communications Inc.
+ */
+
+#include <linux/filter.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <linux/notifier.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/in.h>
+#include <linux/ethtool.h>
+#include <linux/topology.h>
+#include <linux/gfp.h>
+#include <linux/aer.h>
+#include <linux/interrupt.h>
+#include "net_driver.h"
+#include <net/gre.h>
+#include <net/udp_tunnel.h>
+#include "efx.h"
+#include "efx_common.h"
+#include "efx_channels.h"
+#include "ef100.h"
+#include "rx_common.h"
+#include "tx_common.h"
+#include "nic.h"
+#include "io.h"
+#include "selftest.h"
+#include "sriov.h"
+
+#include "mcdi_port_common.h"
+#include "mcdi_pcol.h"
+#include "workarounds.h"
+
+/**************************************************************************
+ *
+ * Configurable values
+ *
+ *************************************************************************/
+
+module_param_named(interrupt_mode, efx_interrupt_mode, uint, 0444);
+MODULE_PARM_DESC(interrupt_mode,
+		 "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");
+
+module_param(rss_cpus, uint, 0444);
+MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
+
+/*
+ * Use separate channels for TX and RX events
+ *
+ * Set this to 1 to use separate channels for TX and RX. It allows us
+ * to control interrupt affinity separately for TX and RX.
+ *
+ * This is only used in MSI-X interrupt mode
+ */
+bool efx_separate_tx_channels;
+module_param(efx_separate_tx_channels, bool, 0444);
+MODULE_PARM_DESC(efx_separate_tx_channels,
+		 "Use separate channels for TX and RX");
+
+/* Initial interrupt moderation settings.  They can be modified after
+ * module load with ethtool.
+ *
+ * The default for RX should strike a balance between increasing the
+ * round-trip latency and reducing overhead.
+ */
+static unsigned int rx_irq_mod_usec = 60;
+
+/* Initial interrupt moderation settings.  They can be modified after
+ * module load with ethtool.
+ *
+ * This default is chosen to ensure that a 10G link does not go idle
+ * while a TX queue is stopped after it has become full.  A queue is
+ * restarted when it drops below half full.  The time this takes (assuming
+ * worst case 3 descriptors per packet and 1024 descriptors) is
+ *   512 / 3 * 1.2 = 205 usec.
+ */
+static unsigned int tx_irq_mod_usec = 150;
+
+static bool phy_flash_cfg;
+module_param(phy_flash_cfg, bool, 0644);
+MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially");
+
+static unsigned debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
+			 NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
+			 NETIF_MSG_IFUP | NETIF_MSG_RX_ERR |
+			 NETIF_MSG_TX_ERR | NETIF_MSG_HW);
+module_param(debug, uint, 0);
+MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");
+
+/**************************************************************************
+ *
+ * Utility functions and prototypes
+ *
+ *************************************************************************/
+
+static void efx_remove_port(struct efx_nic *efx);
+static int efx_xdp_setup_prog(struct efx_nic *efx, struct bpf_prog *prog);
+static int efx_xdp(struct net_device *dev, struct netdev_bpf *xdp);
+static int efx_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdpfs,
+			u32 flags);
+
+#define EFX_ASSERT_RESET_SERIALISED(efx)		\
+	do {						\
+		if ((efx->state == STATE_READY) ||	\
+		    (efx->state == STATE_RECOVERY) ||	\
+		    (efx->state == STATE_DISABLED))	\
+			ASSERT_RTNL();			\
+	} while (0)
+
+/**************************************************************************
+ *
+ * Port handling
+ *
+ **************************************************************************/
+
+static void efx_fini_port(struct efx_nic *efx);
+
+static int efx_probe_port(struct efx_nic *efx)
+{
+	int rc;
+
+	netif_dbg(efx, probe, efx->net_dev, "create port\n");
+
+	if (phy_flash_cfg)
+		efx->phy_mode = PHY_MODE_SPECIAL;
+
+	/* Connect up MAC/PHY operations table */
+	rc = efx->type->probe_port(efx);
+	if (rc)
+		return rc;
+
+	/* Initialise MAC address to permanent address */
+	eth_hw_addr_set(efx->net_dev, efx->net_dev->perm_addr);
+
+	return 0;
+}
+
+static int efx_init_port(struct efx_nic *efx)
+{
+	int rc;
+
+	netif_dbg(efx, drv, efx->net_dev, "init port\n");
+
+	mutex_lock(&efx->mac_lock);
+
+	efx->port_initialized = true;
+
+	/* Ensure the PHY advertises the correct flow control settings */
+	rc = efx_mcdi_port_reconfigure(efx);
+	if (rc && rc != -EPERM)
+		goto fail;
+
+	mutex_unlock(&efx->mac_lock);
+	return 0;
+
+fail:
+	mutex_unlock(&efx->mac_lock);
+	return rc;
+}
+
+static void efx_fini_port(struct efx_nic *efx)
+{
+	netif_dbg(efx, drv, efx->net_dev, "shut down port\n");
+
+	if (!efx->port_initialized)
+		return;
+
+	efx->port_initialized = false;
+
+	efx->link_state.up = false;
+	efx_link_status_changed(efx);
+}
+
+static void efx_remove_port(struct efx_nic *efx)
+{
+	netif_dbg(efx, drv, efx->net_dev, "destroying port\n");
+
+	efx->type->remove_port(efx);
+}
+
+/**************************************************************************
+ *
+ * NIC handling
+ *
+ **************************************************************************/
+
+static LIST_HEAD(efx_primary_list);
+static LIST_HEAD(efx_unassociated_list);
+
+static bool efx_same_controller(struct efx_nic *left, struct efx_nic *right)
+{
+	return left->type == right->type &&
+		left->vpd_sn && right->vpd_sn &&
+		!strcmp(left->vpd_sn, right->vpd_sn);
+}
+
+static void efx_associate(struct efx_nic *efx)
+{
+	struct efx_nic *other, *next;
+
+	if (efx->primary == efx) {
+		/* Adding primary function; look for secondaries */
+
+		netif_dbg(efx, probe, efx->net_dev, "adding to primary list\n");
+		list_add_tail(&efx->node, &efx_primary_list);
+
+		list_for_each_entry_safe(other, next, &efx_unassociated_list,
+					 node) {
+			if (efx_same_controller(efx, other)) {
+				list_del(&other->node);
+				netif_dbg(other, probe, other->net_dev,
+					  "moving to secondary list of %s %s\n",
+					  pci_name(efx->pci_dev),
+					  efx->net_dev->name);
+				list_add_tail(&other->node,
+					      &efx->secondary_list);
+				other->primary = efx;
+			}
+		}
+	} else {
+		/* Adding secondary function; look for primary */
+
+		list_for_each_entry(other, &efx_primary_list, node) {
+			if (efx_same_controller(efx, other)) {
+				netif_dbg(efx, probe, efx->net_dev,
+					  "adding to secondary list of %s %s\n",
+					  pci_name(other->pci_dev),
+					  other->net_dev->name);
+				list_add_tail(&efx->node,
+					      &other->secondary_list);
+				efx->primary = other;
+				return;
+			}
+		}
+
+		netif_dbg(efx, probe, efx->net_dev,
+			  "adding to unassociated list\n");
+		list_add_tail(&efx->node, &efx_unassociated_list);
+	}
+}
+
+static void efx_dissociate(struct efx_nic *efx)
+{
+	struct efx_nic *other, *next;
+
+	list_del(&efx->node);
+	efx->primary = NULL;
+
+	list_for_each_entry_safe(other, next, &efx->secondary_list, node) {
+		list_del(&other->node);
+		netif_dbg(other, probe, other->net_dev,
+			  "moving to unassociated list\n");
+		list_add_tail(&other->node, &efx_unassociated_list);
+		other->primary = NULL;
+	}
+}
+
+static int efx_probe_nic(struct efx_nic *efx)
+{
+	int rc;
+
+	netif_dbg(efx, probe, efx->net_dev, "creating NIC\n");
+
+	/* Carry out hardware-type specific initialisation */
+	rc = efx->type->probe(efx);
+	if (rc)
+		return rc;
+
+	do {
+		if (!efx->max_channels || !efx->max_tx_channels) {
+			netif_err(efx, drv, efx->net_dev,
+				  "Insufficient resources to allocate"
+				  " any channels\n");
+			rc = -ENOSPC;
+			goto fail1;
+		}
+
+		/* Determine the number of channels and queues by trying
+		 * to hook in MSI-X interrupts.
+		 */
+		rc = efx_probe_interrupts(efx);
+		if (rc)
+			goto fail1;
+
+		rc = efx_set_channels(efx);
+		if (rc)
+			goto fail1;
+
+		/* dimension_resources can fail with EAGAIN */
+		rc = efx->type->dimension_resources(efx);
+		if (rc != 0 && rc != -EAGAIN)
+			goto fail2;
+
+		if (rc == -EAGAIN)
+			/* try again with new max_channels */
+			efx_remove_interrupts(efx);
+
+	} while (rc == -EAGAIN);
+
+	if (efx->n_channels > 1)
+		netdev_rss_key_fill(efx->rss_context.rx_hash_key,
+				    sizeof(efx->rss_context.rx_hash_key));
+	efx_set_default_rx_indir_table(efx, &efx->rss_context);
+
+	/* Initialise the interrupt moderation settings */
+	efx->irq_mod_step_us = DIV_ROUND_UP(efx->timer_quantum_ns, 1000);
+	efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true,
+				true);
+
+	return 0;
+
+fail2:
+	efx_remove_interrupts(efx);
+fail1:
+	efx->type->remove(efx);
+	return rc;
+}
+
+static void efx_remove_nic(struct efx_nic *efx)
+{
+	netif_dbg(efx, drv, efx->net_dev, "destroying NIC\n");
+
+	efx_remove_interrupts(efx);
+	efx->type->remove(efx);
+}
+
+/**************************************************************************
+ *
+ * NIC startup/shutdown
+ *
+ *************************************************************************/
+
+static int efx_probe_all(struct efx_nic *efx)
+{
+	int rc;
+
+	rc = efx_probe_nic(efx);
+	if (rc) {
+		netif_err(efx, probe, efx->net_dev, "failed to create NIC\n");
+		goto fail1;
+	}
+
+	rc = efx_probe_port(efx);
+	if (rc) {
+		netif_err(efx, probe, efx->net_dev, "failed to create port\n");
+		goto fail2;
+	}
+
+	BUILD_BUG_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_RXQ_MIN_ENT);
+	if (WARN_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_TXQ_MIN_ENT(efx))) {
+		rc = -EINVAL;
+		goto fail3;
+	}
+
+#ifdef CONFIG_SFC_SRIOV
+	rc = efx->type->vswitching_probe(efx);
+	if (rc) /* not fatal; the PF will still work fine */
+		netif_warn(efx, probe, efx->net_dev,
+			   "failed to setup vswitching rc=%d;"
+			   " VFs may not function\n", rc);
+#endif
+
+	rc = efx_probe_filters(efx);
+	if (rc) {
+		netif_err(efx, probe, efx->net_dev,
+			  "failed to create filter tables\n");
+		goto fail4;
+	}
+
+	rc = efx_probe_channels(efx);
+	if (rc)
+		goto fail5;
+
+	return 0;
+
+ fail5:
+	efx_remove_filters(efx);
+ fail4:
+#ifdef CONFIG_SFC_SRIOV
+	efx->type->vswitching_remove(efx);
+#endif
+ fail3:
+	efx_remove_port(efx);
+ fail2:
+	efx_remove_nic(efx);
+ fail1:
+	return rc;
+}
+
+static void efx_remove_all(struct efx_nic *efx)
+{
+	rtnl_lock();
+	efx_xdp_setup_prog(efx, NULL);
+	rtnl_unlock();
+
+	efx_remove_channels(efx);
+	efx_remove_filters(efx);
+#ifdef CONFIG_SFC_SRIOV
+	efx->type->vswitching_remove(efx);
+#endif
+	efx_remove_port(efx);
+	efx_remove_nic(efx);
+}
+
+/**************************************************************************
+ *
+ * Interrupt moderation
+ *
+ **************************************************************************/
+unsigned int efx_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs)
+{
+	if (usecs == 0)
+		return 0;
+	if (usecs * 1000 < efx->timer_quantum_ns)
+		return 1; /* never round down to 0 */
+	return usecs * 1000 / efx->timer_quantum_ns;
+}
+
+unsigned int efx_ticks_to_usecs(struct efx_nic *efx, unsigned int ticks)
+{
+	/* We must round up when converting ticks to microseconds
+	 * because we round down when converting the other way.
+	 */
+	return DIV_ROUND_UP(ticks * efx->timer_quantum_ns, 1000);
+}
+
+/* Set interrupt moderation parameters */
+int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
+			    unsigned int rx_usecs, bool rx_adaptive,
+			    bool rx_may_override_tx)
+{
+	struct efx_channel *channel;
+	unsigned int timer_max_us;
+
+	EFX_ASSERT_RESET_SERIALISED(efx);
+
+	timer_max_us = efx->timer_max_ns / 1000;
+
+	if (tx_usecs > timer_max_us || rx_usecs > timer_max_us)
+		return -EINVAL;
+
+	if (tx_usecs != rx_usecs && efx->tx_channel_offset == 0 &&
+	    !rx_may_override_tx) {
+		netif_err(efx, drv, efx->net_dev, "Channels are shared. "
+			  "RX and TX IRQ moderation must be equal\n");
+		return -EINVAL;
+	}
+
+	efx->irq_rx_adaptive = rx_adaptive;
+	efx->irq_rx_moderation_us = rx_usecs;
+	efx_for_each_channel(channel, efx) {
+		if (efx_channel_has_rx_queue(channel))
+			channel->irq_moderation_us = rx_usecs;
+		else if (efx_channel_has_tx_queues(channel))
+			channel->irq_moderation_us = tx_usecs;
+		else if (efx_channel_is_xdp_tx(channel))
+			channel->irq_moderation_us = tx_usecs;
+	}
+
+	return 0;
+}
+
+void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
+			    unsigned int *rx_usecs, bool *rx_adaptive)
+{
+	*rx_adaptive = efx->irq_rx_adaptive;
+	*rx_usecs = efx->irq_rx_moderation_us;
+
+	/* If channels are shared between RX and TX, so is IRQ
+	 * moderation.  Otherwise, IRQ moderation is the same for all
+	 * TX channels and is not adaptive.
+	 */
+	if (efx->tx_channel_offset == 0) {
+		*tx_usecs = *rx_usecs;
+	} else {
+		struct efx_channel *tx_channel;
+
+		tx_channel = efx->channel[efx->tx_channel_offset];
+		*tx_usecs = tx_channel->irq_moderation_us;
+	}
+}
+
+/**************************************************************************
+ *
+ * ioctls
+ *
+ *************************************************************************/
+
+/* Net device ioctl
+ * Context: process, rtnl_lock() held.
+ */
+static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+	struct mii_ioctl_data *data = if_mii(ifr);
+
+	if (cmd == SIOCSHWTSTAMP)
+		return efx_ptp_set_ts_config(efx, ifr);
+	if (cmd == SIOCGHWTSTAMP)
+		return efx_ptp_get_ts_config(efx, ifr);
+
+	/* Convert phy_id from older PRTAD/DEVAD format */
+	if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) &&
+	    (data->phy_id & 0xfc00) == 0x0400)
+		data->phy_id ^= MDIO_PHY_ID_C45 | 0x0400;
+
+	return mdio_mii_ioctl(&efx->mdio, data, cmd);
+}
+
+/**************************************************************************
+ *
+ * Kernel net device interface
+ *
+ *************************************************************************/
+
+/* Context: process, rtnl_lock() held. */
+int efx_net_open(struct net_device *net_dev)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+	int rc;
+
+	netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n",
+		  raw_smp_processor_id());
+
+	rc = efx_check_disabled(efx);
+	if (rc)
+		return rc;
+	if (efx->phy_mode & PHY_MODE_SPECIAL)
+		return -EBUSY;
+	if (efx_mcdi_poll_reboot(efx) && efx_reset(efx, RESET_TYPE_ALL))
+		return -EIO;
+
+	/* Notify the kernel of the link state polled during driver load,
+	 * before the monitor starts running */
+	efx_link_status_changed(efx);
+
+	efx_start_all(efx);
+	if (efx->state == STATE_DISABLED || efx->reset_pending)
+		netif_device_detach(efx->net_dev);
+	efx_selftest_async_start(efx);
+	return 0;
+}
+
+/* Context: process, rtnl_lock() held.
+ * Note that the kernel will ignore our return code; this method
+ * should really be a void.
+ */
+int efx_net_stop(struct net_device *net_dev)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+
+	netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n",
+		  raw_smp_processor_id());
+
+	/* Stop the device and flush all the channels */
+	efx_stop_all(efx);
+
+	return 0;
+}
+
+static int efx_vlan_rx_add_vid(struct net_device *net_dev, __be16 proto, u16 vid)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+
+	if (efx->type->vlan_rx_add_vid)
+		return efx->type->vlan_rx_add_vid(efx, proto, vid);
+	else
+		return -EOPNOTSUPP;
+}
+
+static int efx_vlan_rx_kill_vid(struct net_device *net_dev, __be16 proto, u16 vid)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+
+	if (efx->type->vlan_rx_kill_vid)
+		return efx->type->vlan_rx_kill_vid(efx, proto, vid);
+	else
+		return -EOPNOTSUPP;
+}
+
+static const struct net_device_ops efx_netdev_ops = {
+	.ndo_open		= efx_net_open,
+	.ndo_stop		= efx_net_stop,
+	.ndo_get_stats64	= efx_net_stats,
+	.ndo_tx_timeout		= efx_watchdog,
+	.ndo_start_xmit		= efx_hard_start_xmit,
+	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_eth_ioctl		= efx_ioctl,
+	.ndo_change_mtu		= efx_change_mtu,
+	.ndo_set_mac_address	= efx_set_mac_address,
+	.ndo_set_rx_mode	= efx_set_rx_mode,
+	.ndo_set_features	= efx_set_features,
+	.ndo_features_check	= efx_features_check,
+	.ndo_vlan_rx_add_vid	= efx_vlan_rx_add_vid,
+	.ndo_vlan_rx_kill_vid	= efx_vlan_rx_kill_vid,
+#ifdef CONFIG_SFC_SRIOV
+	.ndo_set_vf_mac		= efx_sriov_set_vf_mac,
+	.ndo_set_vf_vlan	= efx_sriov_set_vf_vlan,
+	.ndo_set_vf_spoofchk	= efx_sriov_set_vf_spoofchk,
+	.ndo_get_vf_config	= efx_sriov_get_vf_config,
+	.ndo_set_vf_link_state  = efx_sriov_set_vf_link_state,
+#endif
+	.ndo_get_phys_port_id   = efx_get_phys_port_id,
+	.ndo_get_phys_port_name	= efx_get_phys_port_name,
+	.ndo_setup_tc		= efx_setup_tc,
+#ifdef CONFIG_RFS_ACCEL
+	.ndo_rx_flow_steer	= efx_filter_rfs,
+#endif
+	.ndo_xdp_xmit		= efx_xdp_xmit,
+	.ndo_bpf		= efx_xdp
+};
+
+static int efx_xdp_setup_prog(struct efx_nic *efx, struct bpf_prog *prog)
+{
+	struct bpf_prog *old_prog;
+
+	if (efx->xdp_rxq_info_failed) {
+		netif_err(efx, drv, efx->net_dev,
+			  "Unable to bind XDP program due to previous failure of rxq_info\n");
+		return -EINVAL;
+	}
+
+	if (prog && efx->net_dev->mtu > efx_xdp_max_mtu(efx)) {
+		netif_err(efx, drv, efx->net_dev,
+			  "Unable to configure XDP with MTU of %d (max: %d)\n",
+			  efx->net_dev->mtu, efx_xdp_max_mtu(efx));
+		return -EINVAL;
+	}
+
+	old_prog = rtnl_dereference(efx->xdp_prog);
+	rcu_assign_pointer(efx->xdp_prog, prog);
+	/* Release the reference that was originally passed by the caller. */
+	if (old_prog)
+		bpf_prog_put(old_prog);
+
+	return 0;
+}
+
+/* Context: process, rtnl_lock() held. */
+static int efx_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+{
+	struct efx_nic *efx = netdev_priv(dev);
+
+	switch (xdp->command) {
+	case XDP_SETUP_PROG:
+		return efx_xdp_setup_prog(efx, xdp->prog);
+	default:
+		return -EINVAL;
+	}
+}
+
+static int efx_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **xdpfs,
+			u32 flags)
+{
+	struct efx_nic *efx = netdev_priv(dev);
+
+	if (!netif_running(dev))
+		return -EINVAL;
+
+	return efx_xdp_tx_buffers(efx, n, xdpfs, flags & XDP_XMIT_FLUSH);
+}
+
+static void efx_update_name(struct efx_nic *efx)
+{
+	strcpy(efx->name, efx->net_dev->name);
+	efx_mtd_rename(efx);
+	efx_set_channel_names(efx);
+}
+
+static int efx_netdev_event(struct notifier_block *this,
+			    unsigned long event, void *ptr)
+{
+	struct net_device *net_dev = netdev_notifier_info_to_dev(ptr);
+
+	if ((net_dev->netdev_ops == &efx_netdev_ops) &&
+	    event == NETDEV_CHANGENAME)
+		efx_update_name(netdev_priv(net_dev));
+
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block efx_netdev_notifier = {
+	.notifier_call = efx_netdev_event,
+};
+
+static ssize_t phy_type_show(struct device *dev,
+			     struct device_attribute *attr, char *buf)
+{
+	struct efx_nic *efx = dev_get_drvdata(dev);
+	return sprintf(buf, "%d\n", efx->phy_type);
+}
+static DEVICE_ATTR_RO(phy_type);
+
+static int efx_register_netdev(struct efx_nic *efx)
+{
+	struct net_device *net_dev = efx->net_dev;
+	struct efx_channel *channel;
+	int rc;
+
+	net_dev->watchdog_timeo = 5 * HZ;
+	net_dev->irq = efx->pci_dev->irq;
+	net_dev->netdev_ops = &efx_netdev_ops;
+	if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
+		net_dev->priv_flags |= IFF_UNICAST_FLT;
+	net_dev->ethtool_ops = &efx_ethtool_ops;
+	netif_set_gso_max_segs(net_dev, EFX_TSO_MAX_SEGS);
+	net_dev->min_mtu = EFX_MIN_MTU;
+	net_dev->max_mtu = EFX_MAX_MTU;
+
+	rtnl_lock();
+
+	/* Enable resets to be scheduled and check whether any were
+	 * already requested.  If so, the NIC is probably hosed so we
+	 * abort.
+	 */
+	efx->state = STATE_READY;
+	smp_mb(); /* ensure we change state before checking reset_pending */
+	if (efx->reset_pending) {
+		pci_err(efx->pci_dev, "aborting probe due to scheduled reset\n");
+		rc = -EIO;
+		goto fail_locked;
+	}
+
+	rc = dev_alloc_name(net_dev, net_dev->name);
+	if (rc < 0)
+		goto fail_locked;
+	efx_update_name(efx);
+
+	/* Always start with carrier off; PHY events will detect the link */
+	netif_carrier_off(net_dev);
+
+	rc = register_netdevice(net_dev);
+	if (rc)
+		goto fail_locked;
+
+	efx_for_each_channel(channel, efx) {
+		struct efx_tx_queue *tx_queue;
+		efx_for_each_channel_tx_queue(tx_queue, channel)
+			efx_init_tx_queue_core_txq(tx_queue);
+	}
+
+	efx_associate(efx);
+
+	rtnl_unlock();
+
+	rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type);
+	if (rc) {
+		netif_err(efx, drv, efx->net_dev,
+			  "failed to init net dev attributes\n");
+		goto fail_registered;
+	}
+
+	efx_init_mcdi_logging(efx);
+
+	return 0;
+
+fail_registered:
+	rtnl_lock();
+	efx_dissociate(efx);
+	unregister_netdevice(net_dev);
+fail_locked:
+	efx->state = STATE_UNINIT;
+	rtnl_unlock();
+	netif_err(efx, drv, efx->net_dev, "could not register net dev\n");
+	return rc;
+}
+
+static void efx_unregister_netdev(struct efx_nic *efx)
+{
+	if (!efx->net_dev)
+		return;
+
+	BUG_ON(netdev_priv(efx->net_dev) != efx);
+
+	if (efx_dev_registered(efx)) {
+		strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
+		efx_fini_mcdi_logging(efx);
+		device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
+		unregister_netdev(efx->net_dev);
+	}
+}
+
+/**************************************************************************
+ *
+ * List of NICs we support
+ *
+ **************************************************************************/
+
+/* PCI device ID table */
+static const struct pci_device_id efx_pci_table[] = {
+	{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0903),  /* SFC9120 PF */
+	 .driver_data = (unsigned long) &efx_hunt_a0_nic_type},
+	{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1903),  /* SFC9120 VF */
+	 .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type},
+	{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0923),  /* SFC9140 PF */
+	 .driver_data = (unsigned long) &efx_hunt_a0_nic_type},
+	{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1923),  /* SFC9140 VF */
+	 .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type},
+	{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0a03),  /* SFC9220 PF */
+	 .driver_data = (unsigned long) &efx_hunt_a0_nic_type},
+	{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1a03),  /* SFC9220 VF */
+	 .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type},
+	{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0b03),  /* SFC9250 PF */
+	 .driver_data = (unsigned long) &efx_hunt_a0_nic_type},
+	{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1b03),  /* SFC9250 VF */
+	 .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type},
+	{0}			/* end of list */
+};
+
+/**************************************************************************
+ *
+ * Data housekeeping
+ *
+ **************************************************************************/
+
+void efx_update_sw_stats(struct efx_nic *efx, u64 *stats)
+{
+	u64 n_rx_nodesc_trunc = 0;
+	struct efx_channel *channel;
+
+	efx_for_each_channel(channel, efx)
+		n_rx_nodesc_trunc += channel->n_rx_nodesc_trunc;
+	stats[GENERIC_STAT_rx_nodesc_trunc] = n_rx_nodesc_trunc;
+	stats[GENERIC_STAT_rx_noskb_drops] = atomic_read(&efx->n_rx_noskb_drops);
+}
+
+/**************************************************************************
+ *
+ * PCI interface
+ *
+ **************************************************************************/
+
+/* Main body of final NIC shutdown code
+ * This is called only at module unload (or hotplug removal).
+ */
+static void efx_pci_remove_main(struct efx_nic *efx)
+{
+	/* Flush reset_work. It can no longer be scheduled since we
+	 * are not READY.
+	 */
+	BUG_ON(efx->state == STATE_READY);
+	efx_flush_reset_workqueue(efx);
+
+	efx_disable_interrupts(efx);
+	efx_clear_interrupt_affinity(efx);
+	efx_nic_fini_interrupt(efx);
+	efx_fini_port(efx);
+	efx->type->fini(efx);
+	efx_fini_napi(efx);
+	efx_remove_all(efx);
+}
+
+/* Final NIC shutdown
+ * This is called only at module unload (or hotplug removal).  A PF can call
+ * this on its VFs to ensure they are unbound first.
+ */
+static void efx_pci_remove(struct pci_dev *pci_dev)
+{
+	struct efx_nic *efx;
+
+	efx = pci_get_drvdata(pci_dev);
+	if (!efx)
+		return;
+
+	/* Mark the NIC as fini, then stop the interface */
+	rtnl_lock();
+	efx_dissociate(efx);
+	dev_close(efx->net_dev);
+	efx_disable_interrupts(efx);
+	efx->state = STATE_UNINIT;
+	rtnl_unlock();
+
+	if (efx->type->sriov_fini)
+		efx->type->sriov_fini(efx);
+
+	efx_unregister_netdev(efx);
+
+	efx_mtd_remove(efx);
+
+	efx_pci_remove_main(efx);
+
+	efx_fini_io(efx);
+	netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n");
+
+	efx_fini_struct(efx);
+	free_netdev(efx->net_dev);
+
+	pci_disable_pcie_error_reporting(pci_dev);
+};
+
+/* NIC VPD information
+ * Called during probe to display the part number of the
+ * installed NIC.
+ */
+static void efx_probe_vpd_strings(struct efx_nic *efx)
+{
+	struct pci_dev *dev = efx->pci_dev;
+	unsigned int vpd_size, kw_len;
+	u8 *vpd_data;
+	int start;
+
+	vpd_data = pci_vpd_alloc(dev, &vpd_size);
+	if (IS_ERR(vpd_data)) {
+		pci_warn(dev, "Unable to read VPD\n");
+		return;
+	}
+
+	start = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
+					     PCI_VPD_RO_KEYWORD_PARTNO, &kw_len);
+	if (start < 0)
+		pci_err(dev, "Part number not found or incomplete\n");
+	else
+		pci_info(dev, "Part Number : %.*s\n", kw_len, vpd_data + start);
+
+	start = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
+					     PCI_VPD_RO_KEYWORD_SERIALNO, &kw_len);
+	if (start < 0)
+		pci_err(dev, "Serial number not found or incomplete\n");
+	else
+		efx->vpd_sn = kmemdup_nul(vpd_data + start, kw_len, GFP_KERNEL);
+
+	kfree(vpd_data);
+}
+
+
+/* Main body of NIC initialisation
+ * This is called at module load (or hotplug insertion, theoretically).
+ */
+static int efx_pci_probe_main(struct efx_nic *efx)
+{
+	int rc;
+
+	/* Do start-of-day initialisation */
+	rc = efx_probe_all(efx);
+	if (rc)
+		goto fail1;
+
+	efx_init_napi(efx);
+
+	down_write(&efx->filter_sem);
+	rc = efx->type->init(efx);
+	up_write(&efx->filter_sem);
+	if (rc) {
+		pci_err(efx->pci_dev, "failed to initialise NIC\n");
+		goto fail3;
+	}
+
+	rc = efx_init_port(efx);
+	if (rc) {
+		netif_err(efx, probe, efx->net_dev,
+			  "failed to initialise port\n");
+		goto fail4;
+	}
+
+	rc = efx_nic_init_interrupt(efx);
+	if (rc)
+		goto fail5;
+
+	efx_set_interrupt_affinity(efx);
+	rc = efx_enable_interrupts(efx);
+	if (rc)
+		goto fail6;
+
+	return 0;
+
+ fail6:
+	efx_clear_interrupt_affinity(efx);
+	efx_nic_fini_interrupt(efx);
+ fail5:
+	efx_fini_port(efx);
+ fail4:
+	efx->type->fini(efx);
+ fail3:
+	efx_fini_napi(efx);
+	efx_remove_all(efx);
+ fail1:
+	return rc;
+}
+
+static int efx_pci_probe_post_io(struct efx_nic *efx)
+{
+	struct net_device *net_dev = efx->net_dev;
+	int rc = efx_pci_probe_main(efx);
+
+	if (rc)
+		return rc;
+
+	if (efx->type->sriov_init) {
+		rc = efx->type->sriov_init(efx);
+		if (rc)
+			pci_err(efx->pci_dev, "SR-IOV can't be enabled rc %d\n",
+				rc);
+	}
+
+	/* Determine netdevice features */
+	net_dev->features |= (efx->type->offload_features | NETIF_F_SG |
+			      NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_RXALL);
+	if (efx->type->offload_features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM))
+		net_dev->features |= NETIF_F_TSO6;
+	/* Check whether device supports TSO */
+	if (!efx->type->tso_versions || !efx->type->tso_versions(efx))
+		net_dev->features &= ~NETIF_F_ALL_TSO;
+	/* Mask for features that also apply to VLAN devices */
+	net_dev->vlan_features |= (NETIF_F_HW_CSUM | NETIF_F_SG |
+				   NETIF_F_HIGHDMA | NETIF_F_ALL_TSO |
+				   NETIF_F_RXCSUM);
+
+	net_dev->hw_features |= net_dev->features & ~efx->fixed_features;
+
+	/* Disable receiving frames with bad FCS, by default. */
+	net_dev->features &= ~NETIF_F_RXALL;
+
+	/* Disable VLAN filtering by default.  It may be enforced if
+	 * the feature is fixed (i.e. VLAN filters are required to
+	 * receive VLAN tagged packets due to vPort restrictions).
+	 */
+	net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
+	net_dev->features |= efx->fixed_features;
+
+	rc = efx_register_netdev(efx);
+	if (!rc)
+		return 0;
+
+	efx_pci_remove_main(efx);
+	return rc;
+}
+
+/* NIC initialisation
+ *
+ * This is called at module load (or hotplug insertion,
+ * theoretically).  It sets up PCI mappings, resets the NIC,
+ * sets up and registers the network devices with the kernel and hooks
+ * the interrupt service routine.  It does not prepare the device for
+ * transmission; this is left to the first time one of the network
+ * interfaces is brought up (i.e. efx_net_open).
+ */
+static int efx_pci_probe(struct pci_dev *pci_dev,
+			 const struct pci_device_id *entry)
+{
+	struct net_device *net_dev;
+	struct efx_nic *efx;
+	int rc;
+
+	/* Allocate and initialise a struct net_device and struct efx_nic */
+	net_dev = alloc_etherdev_mqs(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES,
+				     EFX_MAX_RX_QUEUES);
+	if (!net_dev)
+		return -ENOMEM;
+	efx = netdev_priv(net_dev);
+	efx->type = (const struct efx_nic_type *) entry->driver_data;
+	efx->fixed_features |= NETIF_F_HIGHDMA;
+
+	pci_set_drvdata(pci_dev, efx);
+	SET_NETDEV_DEV(net_dev, &pci_dev->dev);
+	rc = efx_init_struct(efx, pci_dev, net_dev);
+	if (rc)
+		goto fail1;
+
+	pci_info(pci_dev, "Solarflare NIC detected\n");
+
+	if (!efx->type->is_vf)
+		efx_probe_vpd_strings(efx);
+
+	/* Set up basic I/O (BAR mappings etc) */
+	rc = efx_init_io(efx, efx->type->mem_bar(efx), efx->type->max_dma_mask,
+			 efx->type->mem_map_size(efx));
+	if (rc)
+		goto fail2;
+
+	rc = efx_pci_probe_post_io(efx);
+	if (rc) {
+		/* On failure, retry once immediately.
+		 * If we aborted probe due to a scheduled reset, dismiss it.
+		 */
+		efx->reset_pending = 0;
+		rc = efx_pci_probe_post_io(efx);
+		if (rc) {
+			/* On another failure, retry once more
+			 * after a 50-305ms delay.
+			 */
+			unsigned char r;
+
+			get_random_bytes(&r, 1);
+			msleep((unsigned int)r + 50);
+			efx->reset_pending = 0;
+			rc = efx_pci_probe_post_io(efx);
+		}
+	}
+	if (rc)
+		goto fail3;
+
+	netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n");
+
+	/* Try to create MTDs, but allow this to fail */
+	rtnl_lock();
+	rc = efx_mtd_probe(efx);
+	rtnl_unlock();
+	if (rc && rc != -EPERM)
+		netif_warn(efx, probe, efx->net_dev,
+			   "failed to create MTDs (%d)\n", rc);
+
+	(void)pci_enable_pcie_error_reporting(pci_dev);
+
+	if (efx->type->udp_tnl_push_ports)
+		efx->type->udp_tnl_push_ports(efx);
+
+	return 0;
+
+ fail3:
+	efx_fini_io(efx);
+ fail2:
+	efx_fini_struct(efx);
+ fail1:
+	WARN_ON(rc > 0);
+	netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc);
+	free_netdev(net_dev);
+	return rc;
+}
+
+/* efx_pci_sriov_configure returns the actual number of Virtual Functions
+ * enabled on success
+ */
+#ifdef CONFIG_SFC_SRIOV
+static int efx_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
+{
+	int rc;
+	struct efx_nic *efx = pci_get_drvdata(dev);
+
+	if (efx->type->sriov_configure) {
+		rc = efx->type->sriov_configure(efx, num_vfs);
+		if (rc)
+			return rc;
+		else
+			return num_vfs;
+	} else
+		return -EOPNOTSUPP;
+}
+#endif
+
+static int efx_pm_freeze(struct device *dev)
+{
+	struct efx_nic *efx = dev_get_drvdata(dev);
+
+	rtnl_lock();
+
+	if (efx->state != STATE_DISABLED) {
+		efx->state = STATE_UNINIT;
+
+		efx_device_detach_sync(efx);
+
+		efx_stop_all(efx);
+		efx_disable_interrupts(efx);
+	}
+
+	rtnl_unlock();
+
+	return 0;
+}
+
+static int efx_pm_thaw(struct device *dev)
+{
+	int rc;
+	struct efx_nic *efx = dev_get_drvdata(dev);
+
+	rtnl_lock();
+
+	if (efx->state != STATE_DISABLED) {
+		rc = efx_enable_interrupts(efx);
+		if (rc)
+			goto fail;
+
+		mutex_lock(&efx->mac_lock);
+		efx_mcdi_port_reconfigure(efx);
+		mutex_unlock(&efx->mac_lock);
+
+		efx_start_all(efx);
+
+		efx_device_attach_if_not_resetting(efx);
+
+		efx->state = STATE_READY;
+
+		efx->type->resume_wol(efx);
+	}
+
+	rtnl_unlock();
+
+	/* Reschedule any quenched resets scheduled during efx_pm_freeze() */
+	efx_queue_reset_work(efx);
+
+	return 0;
+
+fail:
+	rtnl_unlock();
+
+	return rc;
+}
+
+static int efx_pm_poweroff(struct device *dev)
+{
+	struct pci_dev *pci_dev = to_pci_dev(dev);
+	struct efx_nic *efx = pci_get_drvdata(pci_dev);
+
+	efx->type->fini(efx);
+
+	efx->reset_pending = 0;
+
+	pci_save_state(pci_dev);
+	return pci_set_power_state(pci_dev, PCI_D3hot);
+}
+
+/* Used for both resume and restore */
+static int efx_pm_resume(struct device *dev)
+{
+	struct pci_dev *pci_dev = to_pci_dev(dev);
+	struct efx_nic *efx = pci_get_drvdata(pci_dev);
+	int rc;
+
+	rc = pci_set_power_state(pci_dev, PCI_D0);
+	if (rc)
+		return rc;
+	pci_restore_state(pci_dev);
+	rc = pci_enable_device(pci_dev);
+	if (rc)
+		return rc;
+	pci_set_master(efx->pci_dev);
+	rc = efx->type->reset(efx, RESET_TYPE_ALL);
+	if (rc)
+		return rc;
+	down_write(&efx->filter_sem);
+	rc = efx->type->init(efx);
+	up_write(&efx->filter_sem);
+	if (rc)
+		return rc;
+	rc = efx_pm_thaw(dev);
+	return rc;
+}
+
+static int efx_pm_suspend(struct device *dev)
+{
+	int rc;
+
+	efx_pm_freeze(dev);
+	rc = efx_pm_poweroff(dev);
+	if (rc)
+		efx_pm_resume(dev);
+	return rc;
+}
+
+static const struct dev_pm_ops efx_pm_ops = {
+	.suspend	= efx_pm_suspend,
+	.resume		= efx_pm_resume,
+	.freeze		= efx_pm_freeze,
+	.thaw		= efx_pm_thaw,
+	.poweroff	= efx_pm_poweroff,
+	.restore	= efx_pm_resume,
+};
+
+static struct pci_driver efx_pci_driver = {
+	.name		= KBUILD_MODNAME,
+	.id_table	= efx_pci_table,
+	.probe		= efx_pci_probe,
+	.remove		= efx_pci_remove,
+	.driver.pm	= &efx_pm_ops,
+	.err_handler	= &efx_err_handlers,
+#ifdef CONFIG_SFC_SRIOV
+	.sriov_configure = efx_pci_sriov_configure,
+#endif
+};
+
+/**************************************************************************
+ *
+ * Kernel module interface
+ *
+ *************************************************************************/
+
+static int __init efx_init_module(void)
+{
+	int rc;
+
+	printk(KERN_INFO "Solarflare NET driver\n");
+
+	rc = register_netdevice_notifier(&efx_netdev_notifier);
+	if (rc)
+		goto err_notifier;
+
+	rc = efx_create_reset_workqueue();
+	if (rc)
+		goto err_reset;
+
+	rc = pci_register_driver(&efx_pci_driver);
+	if (rc < 0)
+		goto err_pci;
+
+	rc = pci_register_driver(&ef100_pci_driver);
+	if (rc < 0)
+		goto err_pci_ef100;
+
+	return 0;
+
+ err_pci_ef100:
+	pci_unregister_driver(&efx_pci_driver);
+ err_pci:
+	efx_destroy_reset_workqueue();
+ err_reset:
+	unregister_netdevice_notifier(&efx_netdev_notifier);
+ err_notifier:
+	return rc;
+}
+
+static void __exit efx_exit_module(void)
+{
+	printk(KERN_INFO "Solarflare NET driver unloading\n");
+
+	pci_unregister_driver(&ef100_pci_driver);
+	pci_unregister_driver(&efx_pci_driver);
+	efx_destroy_reset_workqueue();
+	unregister_netdevice_notifier(&efx_netdev_notifier);
+
+}
+
+module_init(efx_init_module);
+module_exit(efx_exit_module);
+
+MODULE_AUTHOR("Solarflare Communications and "
+	      "Michael Brown <mbrown@...systems.co.uk>");
+MODULE_DESCRIPTION("Solarflare network driver");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, efx_pci_table);
diff --git a/drivers/net/ethernet/sfc/siena/efx.h b/drivers/net/ethernet/sfc/siena/efx.h
new file mode 100644
index 000000000000..c05a83da9e44
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/efx.h
@@ -0,0 +1,236 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2013 Solarflare Communications Inc.
+ */
+
+#ifndef EFX_EFX_H
+#define EFX_EFX_H
+
+#include <linux/indirect_call_wrapper.h>
+#include "net_driver.h"
+#include "ef100_rx.h"
+#include "ef100_tx.h"
+#include "filter.h"
+
+int efx_net_open(struct net_device *net_dev);
+int efx_net_stop(struct net_device *net_dev);
+
+/* TX */
+void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue);
+netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
+				struct net_device *net_dev);
+netdev_tx_t __efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
+static inline netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
+{
+	return INDIRECT_CALL_2(tx_queue->efx->type->tx_enqueue,
+			       ef100_enqueue_skb, __efx_enqueue_skb,
+			       tx_queue, skb);
+}
+void efx_xmit_done_single(struct efx_tx_queue *tx_queue);
+int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
+		 void *type_data);
+extern unsigned int efx_piobuf_size;
+
+/* RX */
+void __efx_rx_packet(struct efx_channel *channel);
+void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
+		   unsigned int n_frags, unsigned int len, u16 flags);
+static inline void efx_rx_flush_packet(struct efx_channel *channel)
+{
+	if (channel->rx_pkt_n_frags)
+		INDIRECT_CALL_2(channel->efx->type->rx_packet,
+				__ef100_rx_packet, __efx_rx_packet,
+				channel);
+}
+static inline bool efx_rx_buf_hash_valid(struct efx_nic *efx, const u8 *prefix)
+{
+	if (efx->type->rx_buf_hash_valid)
+		return INDIRECT_CALL_1(efx->type->rx_buf_hash_valid,
+				       ef100_rx_buf_hash_valid,
+				       prefix);
+	return true;
+}
+
+/* Maximum number of TCP segments we support for soft-TSO */
+#define EFX_TSO_MAX_SEGS	100
+
+/* The smallest [rt]xq_entries that the driver supports.  RX minimum
+ * is a bit arbitrary.  For TX, we must have space for at least 2
+ * TSO skbs.
+ */
+#define EFX_RXQ_MIN_ENT		128U
+#define EFX_TXQ_MIN_ENT(efx)	(2 * efx_tx_max_skb_descs(efx))
+
+/* All EF10 architecture NICs steal one bit of the DMAQ size for various
+ * other purposes when counting TxQ entries, so we halve the queue size.
+ */
+#define EFX_TXQ_MAX_ENT(efx)	(EFX_WORKAROUND_EF10(efx) ? \
+				 EFX_MAX_DMAQ_SIZE / 2 : EFX_MAX_DMAQ_SIZE)
+
+static inline bool efx_rss_enabled(struct efx_nic *efx)
+{
+	return efx->rss_spread > 1;
+}
+
+/* Filters */
+
+/**
+ * efx_filter_insert_filter - add or replace a filter
+ * @efx: NIC in which to insert the filter
+ * @spec: Specification for the filter
+ * @replace_equal: Flag for whether the specified filter may replace an
+ *	existing filter with equal priority
+ *
+ * On success, return the filter ID.
+ * On failure, return a negative error code.
+ *
+ * If existing filters have equal match values to the new filter spec,
+ * then the new filter might replace them or the function might fail,
+ * as follows.
+ *
+ * 1. If the existing filters have lower priority, or @replace_equal
+ *    is set and they have equal priority, replace them.
+ *
+ * 2. If the existing filters have higher priority, return -%EPERM.
+ *
+ * 3. If !efx_filter_is_mc_recipient(@spec), or the NIC does not
+ *    support delivery to multiple recipients, return -%EEXIST.
+ *
+ * This implies that filters for multiple multicast recipients must
+ * all be inserted with the same priority and @replace_equal = %false.
+ */
+static inline s32 efx_filter_insert_filter(struct efx_nic *efx,
+					   struct efx_filter_spec *spec,
+					   bool replace_equal)
+{
+	return efx->type->filter_insert(efx, spec, replace_equal);
+}
+
+/**
+ * efx_filter_remove_id_safe - remove a filter by ID, carefully
+ * @efx: NIC from which to remove the filter
+ * @priority: Priority of filter, as passed to @efx_filter_insert_filter
+ * @filter_id: ID of filter, as returned by @efx_filter_insert_filter
+ *
+ * This function will range-check @filter_id, so it is safe to call
+ * with a value passed from userland.
+ */
+static inline int efx_filter_remove_id_safe(struct efx_nic *efx,
+					    enum efx_filter_priority priority,
+					    u32 filter_id)
+{
+	return efx->type->filter_remove_safe(efx, priority, filter_id);
+}
+
+/**
+ * efx_filter_get_filter_safe - retrieve a filter by ID, carefully
+ * @efx: NIC from which to remove the filter
+ * @priority: Priority of filter, as passed to @efx_filter_insert_filter
+ * @filter_id: ID of filter, as returned by @efx_filter_insert_filter
+ * @spec: Buffer in which to store filter specification
+ *
+ * This function will range-check @filter_id, so it is safe to call
+ * with a value passed from userland.
+ */
+static inline int
+efx_filter_get_filter_safe(struct efx_nic *efx,
+			   enum efx_filter_priority priority,
+			   u32 filter_id, struct efx_filter_spec *spec)
+{
+	return efx->type->filter_get_safe(efx, priority, filter_id, spec);
+}
+
+static inline u32 efx_filter_count_rx_used(struct efx_nic *efx,
+					   enum efx_filter_priority priority)
+{
+	return efx->type->filter_count_rx_used(efx, priority);
+}
+static inline u32 efx_filter_get_rx_id_limit(struct efx_nic *efx)
+{
+	return efx->type->filter_get_rx_id_limit(efx);
+}
+static inline s32 efx_filter_get_rx_ids(struct efx_nic *efx,
+					enum efx_filter_priority priority,
+					u32 *buf, u32 size)
+{
+	return efx->type->filter_get_rx_ids(efx, priority, buf, size);
+}
+
+/* RSS contexts */
+static inline bool efx_rss_active(struct efx_rss_context *ctx)
+{
+	return ctx->context_id != EFX_MCDI_RSS_CONTEXT_INVALID;
+}
+
+/* Ethtool support */
+extern const struct ethtool_ops efx_ethtool_ops;
+
+/* Global */
+unsigned int efx_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs);
+unsigned int efx_ticks_to_usecs(struct efx_nic *efx, unsigned int ticks);
+int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
+			    unsigned int rx_usecs, bool rx_adaptive,
+			    bool rx_may_override_tx);
+void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
+			    unsigned int *rx_usecs, bool *rx_adaptive);
+
+/* Update the generic software stats in the passed stats array */
+void efx_update_sw_stats(struct efx_nic *efx, u64 *stats);
+
+/* MTD */
+#ifdef CONFIG_SFC_MTD
+int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts,
+		size_t n_parts, size_t sizeof_part);
+static inline int efx_mtd_probe(struct efx_nic *efx)
+{
+	return efx->type->mtd_probe(efx);
+}
+void efx_mtd_rename(struct efx_nic *efx);
+void efx_mtd_remove(struct efx_nic *efx);
+#else
+static inline int efx_mtd_probe(struct efx_nic *efx) { return 0; }
+static inline void efx_mtd_rename(struct efx_nic *efx) {}
+static inline void efx_mtd_remove(struct efx_nic *efx) {}
+#endif
+
+#ifdef CONFIG_SFC_SRIOV
+static inline unsigned int efx_vf_size(struct efx_nic *efx)
+{
+	return 1 << efx->vi_scale;
+}
+#endif
+
+static inline void efx_device_detach_sync(struct efx_nic *efx)
+{
+	struct net_device *dev = efx->net_dev;
+
+	/* Lock/freeze all TX queues so that we can be sure the
+	 * TX scheduler is stopped when we're done and before
+	 * netif_device_present() becomes false.
+	 */
+	netif_tx_lock_bh(dev);
+	netif_device_detach(dev);
+	netif_tx_unlock_bh(dev);
+}
+
+static inline void efx_device_attach_if_not_resetting(struct efx_nic *efx)
+{
+	if ((efx->state != STATE_DISABLED) && !efx->reset_pending)
+		netif_device_attach(efx->net_dev);
+}
+
+static inline bool efx_rwsem_assert_write_locked(struct rw_semaphore *sem)
+{
+	if (WARN_ON(down_read_trylock(sem))) {
+		up_read(sem);
+		return false;
+	}
+	return true;
+}
+
+int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs,
+		       bool flush);
+
+#endif /* EFX_EFX_H */
diff --git a/drivers/net/ethernet/sfc/siena/efx_channels.c b/drivers/net/ethernet/sfc/siena/efx_channels.c
new file mode 100644
index 000000000000..4ab2ff8e82d3
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/efx_channels.c
@@ -0,0 +1,1352 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include "net_driver.h"
+#include <linux/module.h>
+#include <linux/filter.h>
+#include "efx_channels.h"
+#include "efx.h"
+#include "efx_common.h"
+#include "tx_common.h"
+#include "rx_common.h"
+#include "nic.h"
+#include "sriov.h"
+#include "workarounds.h"
+
+/* This is the first interrupt mode to try out of:
+ * 0 => MSI-X
+ * 1 => MSI
+ * 2 => legacy
+ */
+unsigned int efx_interrupt_mode = EFX_INT_MODE_MSIX;
+
+/* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
+ * i.e. the number of CPUs among which we may distribute simultaneous
+ * interrupt handling.
+ *
+ * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
+ * The default (0) means to assign an interrupt to each core.
+ */
+unsigned int rss_cpus;
+
+static unsigned int irq_adapt_low_thresh = 8000;
+module_param(irq_adapt_low_thresh, uint, 0644);
+MODULE_PARM_DESC(irq_adapt_low_thresh,
+		 "Threshold score for reducing IRQ moderation");
+
+static unsigned int irq_adapt_high_thresh = 16000;
+module_param(irq_adapt_high_thresh, uint, 0644);
+MODULE_PARM_DESC(irq_adapt_high_thresh,
+		 "Threshold score for increasing IRQ moderation");
+
+/* This is the weight assigned to each of the (per-channel) virtual
+ * NAPI devices.
+ */
+static int napi_weight = 64;
+
+static const struct efx_channel_type efx_default_channel_type;
+
+/*************
+ * INTERRUPTS
+ *************/
+
+static unsigned int count_online_cores(struct efx_nic *efx, bool local_node)
+{
+	cpumask_var_t filter_mask;
+	unsigned int count;
+	int cpu;
+
+	if (unlikely(!zalloc_cpumask_var(&filter_mask, GFP_KERNEL))) {
+		netif_warn(efx, probe, efx->net_dev,
+			   "RSS disabled due to allocation failure\n");
+		return 1;
+	}
+
+	cpumask_copy(filter_mask, cpu_online_mask);
+	if (local_node)
+		cpumask_and(filter_mask, filter_mask,
+			    cpumask_of_pcibus(efx->pci_dev->bus));
+
+	count = 0;
+	for_each_cpu(cpu, filter_mask) {
+		++count;
+		cpumask_andnot(filter_mask, filter_mask, topology_sibling_cpumask(cpu));
+	}
+
+	free_cpumask_var(filter_mask);
+
+	return count;
+}
+
+static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
+{
+	unsigned int count;
+
+	if (rss_cpus) {
+		count = rss_cpus;
+	} else {
+		count = count_online_cores(efx, true);
+
+		/* If no online CPUs in local node, fallback to any online CPUs */
+		if (count == 0)
+			count = count_online_cores(efx, false);
+	}
+
+	if (count > EFX_MAX_RX_QUEUES) {
+		netif_cond_dbg(efx, probe, efx->net_dev, !rss_cpus, warn,
+			       "Reducing number of rx queues from %u to %u.\n",
+			       count, EFX_MAX_RX_QUEUES);
+		count = EFX_MAX_RX_QUEUES;
+	}
+
+	/* If RSS is requested for the PF *and* VFs then we can't write RSS
+	 * table entries that are inaccessible to VFs
+	 */
+#ifdef CONFIG_SFC_SRIOV
+	if (efx->type->sriov_wanted) {
+		if (efx->type->sriov_wanted(efx) && efx_vf_size(efx) > 1 &&
+		    count > efx_vf_size(efx)) {
+			netif_warn(efx, probe, efx->net_dev,
+				   "Reducing number of RSS channels from %u to %u for "
+				   "VF support. Increase vf-msix-limit to use more "
+				   "channels on the PF.\n",
+				   count, efx_vf_size(efx));
+			count = efx_vf_size(efx);
+		}
+	}
+#endif
+
+	return count;
+}
+
+static int efx_allocate_msix_channels(struct efx_nic *efx,
+				      unsigned int max_channels,
+				      unsigned int extra_channels,
+				      unsigned int parallelism)
+{
+	unsigned int n_channels = parallelism;
+	int vec_count;
+	int tx_per_ev;
+	int n_xdp_tx;
+	int n_xdp_ev;
+
+	if (efx_separate_tx_channels)
+		n_channels *= 2;
+	n_channels += extra_channels;
+
+	/* To allow XDP transmit to happen from arbitrary NAPI contexts
+	 * we allocate a TX queue per CPU. We share event queues across
+	 * multiple tx queues, assuming tx and ev queues are both
+	 * maximum size.
+	 */
+	tx_per_ev = EFX_MAX_EVQ_SIZE / EFX_TXQ_MAX_ENT(efx);
+	tx_per_ev = min(tx_per_ev, EFX_MAX_TXQ_PER_CHANNEL);
+	n_xdp_tx = num_possible_cpus();
+	n_xdp_ev = DIV_ROUND_UP(n_xdp_tx, tx_per_ev);
+
+	vec_count = pci_msix_vec_count(efx->pci_dev);
+	if (vec_count < 0)
+		return vec_count;
+
+	max_channels = min_t(unsigned int, vec_count, max_channels);
+
+	/* Check resources.
+	 * We need a channel per event queue, plus a VI per tx queue.
+	 * This may be more pessimistic than it needs to be.
+	 */
+	if (n_channels >= max_channels) {
+		efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED;
+		netif_warn(efx, drv, efx->net_dev,
+			   "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n",
+			   n_xdp_ev, n_channels, max_channels);
+		netif_warn(efx, drv, efx->net_dev,
+			   "XDP_TX and XDP_REDIRECT might decrease device's performance\n");
+	} else if (n_channels + n_xdp_tx > efx->max_vis) {
+		efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_BORROWED;
+		netif_warn(efx, drv, efx->net_dev,
+			   "Insufficient resources for %d XDP TX queues (%d other channels, max VIs %d)\n",
+			   n_xdp_tx, n_channels, efx->max_vis);
+		netif_warn(efx, drv, efx->net_dev,
+			   "XDP_TX and XDP_REDIRECT might decrease device's performance\n");
+	} else if (n_channels + n_xdp_ev > max_channels) {
+		efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_SHARED;
+		netif_warn(efx, drv, efx->net_dev,
+			   "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n",
+			   n_xdp_ev, n_channels, max_channels);
+
+		n_xdp_ev = max_channels - n_channels;
+		netif_warn(efx, drv, efx->net_dev,
+			   "XDP_TX and XDP_REDIRECT will work with reduced performance (%d cpus/tx_queue)\n",
+			   DIV_ROUND_UP(n_xdp_tx, tx_per_ev * n_xdp_ev));
+	} else {
+		efx->xdp_txq_queues_mode = EFX_XDP_TX_QUEUES_DEDICATED;
+	}
+
+	if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_BORROWED) {
+		efx->n_xdp_channels = n_xdp_ev;
+		efx->xdp_tx_per_channel = tx_per_ev;
+		efx->xdp_tx_queue_count = n_xdp_tx;
+		n_channels += n_xdp_ev;
+		netif_dbg(efx, drv, efx->net_dev,
+			  "Allocating %d TX and %d event queues for XDP\n",
+			  n_xdp_ev * tx_per_ev, n_xdp_ev);
+	} else {
+		efx->n_xdp_channels = 0;
+		efx->xdp_tx_per_channel = 0;
+		efx->xdp_tx_queue_count = n_xdp_tx;
+	}
+
+	if (vec_count < n_channels) {
+		netif_err(efx, drv, efx->net_dev,
+			  "WARNING: Insufficient MSI-X vectors available (%d < %u).\n",
+			  vec_count, n_channels);
+		netif_err(efx, drv, efx->net_dev,
+			  "WARNING: Performance may be reduced.\n");
+		n_channels = vec_count;
+	}
+
+	n_channels = min(n_channels, max_channels);
+
+	efx->n_channels = n_channels;
+
+	/* Ignore XDP tx channels when creating rx channels. */
+	n_channels -= efx->n_xdp_channels;
+
+	if (efx_separate_tx_channels) {
+		efx->n_tx_channels =
+			min(max(n_channels / 2, 1U),
+			    efx->max_tx_channels);
+		efx->tx_channel_offset =
+			n_channels - efx->n_tx_channels;
+		efx->n_rx_channels =
+			max(n_channels -
+			    efx->n_tx_channels, 1U);
+	} else {
+		efx->n_tx_channels = min(n_channels, efx->max_tx_channels);
+		efx->tx_channel_offset = 0;
+		efx->n_rx_channels = n_channels;
+	}
+
+	efx->n_rx_channels = min(efx->n_rx_channels, parallelism);
+	efx->n_tx_channels = min(efx->n_tx_channels, parallelism);
+
+	efx->xdp_channel_offset = n_channels;
+
+	netif_dbg(efx, drv, efx->net_dev,
+		  "Allocating %u RX channels\n",
+		  efx->n_rx_channels);
+
+	return efx->n_channels;
+}
+
+/* Probe the number and type of interrupts we are able to obtain, and
+ * the resulting numbers of channels and RX queues.
+ */
+int efx_probe_interrupts(struct efx_nic *efx)
+{
+	unsigned int extra_channels = 0;
+	unsigned int rss_spread;
+	unsigned int i, j;
+	int rc;
+
+	for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++)
+		if (efx->extra_channel_type[i])
+			++extra_channels;
+
+	if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
+		unsigned int parallelism = efx_wanted_parallelism(efx);
+		struct msix_entry xentries[EFX_MAX_CHANNELS];
+		unsigned int n_channels;
+
+		rc = efx_allocate_msix_channels(efx, efx->max_channels,
+						extra_channels, parallelism);
+		if (rc >= 0) {
+			n_channels = rc;
+			for (i = 0; i < n_channels; i++)
+				xentries[i].entry = i;
+			rc = pci_enable_msix_range(efx->pci_dev, xentries, 1,
+						   n_channels);
+		}
+		if (rc < 0) {
+			/* Fall back to single channel MSI */
+			netif_err(efx, drv, efx->net_dev,
+				  "could not enable MSI-X\n");
+			if (efx->type->min_interrupt_mode >= EFX_INT_MODE_MSI)
+				efx->interrupt_mode = EFX_INT_MODE_MSI;
+			else
+				return rc;
+		} else if (rc < n_channels) {
+			netif_err(efx, drv, efx->net_dev,
+				  "WARNING: Insufficient MSI-X vectors"
+				  " available (%d < %u).\n", rc, n_channels);
+			netif_err(efx, drv, efx->net_dev,
+				  "WARNING: Performance may be reduced.\n");
+			n_channels = rc;
+		}
+
+		if (rc > 0) {
+			for (i = 0; i < efx->n_channels; i++)
+				efx_get_channel(efx, i)->irq =
+					xentries[i].vector;
+		}
+	}
+
+	/* Try single interrupt MSI */
+	if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
+		efx->n_channels = 1;
+		efx->n_rx_channels = 1;
+		efx->n_tx_channels = 1;
+		efx->n_xdp_channels = 0;
+		efx->xdp_channel_offset = efx->n_channels;
+		rc = pci_enable_msi(efx->pci_dev);
+		if (rc == 0) {
+			efx_get_channel(efx, 0)->irq = efx->pci_dev->irq;
+		} else {
+			netif_err(efx, drv, efx->net_dev,
+				  "could not enable MSI\n");
+			if (efx->type->min_interrupt_mode >= EFX_INT_MODE_LEGACY)
+				efx->interrupt_mode = EFX_INT_MODE_LEGACY;
+			else
+				return rc;
+		}
+	}
+
+	/* Assume legacy interrupts */
+	if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
+		efx->n_channels = 1 + (efx_separate_tx_channels ? 1 : 0);
+		efx->n_rx_channels = 1;
+		efx->n_tx_channels = 1;
+		efx->n_xdp_channels = 0;
+		efx->xdp_channel_offset = efx->n_channels;
+		efx->legacy_irq = efx->pci_dev->irq;
+	}
+
+	/* Assign extra channels if possible, before XDP channels */
+	efx->n_extra_tx_channels = 0;
+	j = efx->xdp_channel_offset;
+	for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) {
+		if (!efx->extra_channel_type[i])
+			continue;
+		if (j <= efx->tx_channel_offset + efx->n_tx_channels) {
+			efx->extra_channel_type[i]->handle_no_channel(efx);
+		} else {
+			--j;
+			efx_get_channel(efx, j)->type =
+				efx->extra_channel_type[i];
+			if (efx_channel_has_tx_queues(efx_get_channel(efx, j)))
+				efx->n_extra_tx_channels++;
+		}
+	}
+
+	rss_spread = efx->n_rx_channels;
+	/* RSS might be usable on VFs even if it is disabled on the PF */
+#ifdef CONFIG_SFC_SRIOV
+	if (efx->type->sriov_wanted) {
+		efx->rss_spread = ((rss_spread > 1 ||
+				    !efx->type->sriov_wanted(efx)) ?
+				   rss_spread : efx_vf_size(efx));
+		return 0;
+	}
+#endif
+	efx->rss_spread = rss_spread;
+
+	return 0;
+}
+
+#if defined(CONFIG_SMP)
+void efx_set_interrupt_affinity(struct efx_nic *efx)
+{
+	const struct cpumask *numa_mask = cpumask_of_pcibus(efx->pci_dev->bus);
+	struct efx_channel *channel;
+	unsigned int cpu;
+
+	/* If no online CPUs in local node, fallback to any online CPU */
+	if (cpumask_first_and(cpu_online_mask, numa_mask) >= nr_cpu_ids)
+		numa_mask = cpu_online_mask;
+
+	cpu = -1;
+	efx_for_each_channel(channel, efx) {
+		cpu = cpumask_next_and(cpu, cpu_online_mask, numa_mask);
+		if (cpu >= nr_cpu_ids)
+			cpu = cpumask_first_and(cpu_online_mask, numa_mask);
+		irq_set_affinity_hint(channel->irq, cpumask_of(cpu));
+	}
+}
+
+void efx_clear_interrupt_affinity(struct efx_nic *efx)
+{
+	struct efx_channel *channel;
+
+	efx_for_each_channel(channel, efx)
+		irq_set_affinity_hint(channel->irq, NULL);
+}
+#else
+void
+efx_set_interrupt_affinity(struct efx_nic *efx __attribute__ ((unused)))
+{
+}
+
+void
+efx_clear_interrupt_affinity(struct efx_nic *efx __attribute__ ((unused)))
+{
+}
+#endif /* CONFIG_SMP */
+
+void efx_remove_interrupts(struct efx_nic *efx)
+{
+	struct efx_channel *channel;
+
+	/* Remove MSI/MSI-X interrupts */
+	efx_for_each_channel(channel, efx)
+		channel->irq = 0;
+	pci_disable_msi(efx->pci_dev);
+	pci_disable_msix(efx->pci_dev);
+
+	/* Remove legacy interrupt */
+	efx->legacy_irq = 0;
+}
+
+/***************
+ * EVENT QUEUES
+ ***************/
+
+/* Create event queue
+ * Event queue memory allocations are done only once.  If the channel
+ * is reset, the memory buffer will be reused; this guards against
+ * errors during channel reset and also simplifies interrupt handling.
+ */
+int efx_probe_eventq(struct efx_channel *channel)
+{
+	struct efx_nic *efx = channel->efx;
+	unsigned long entries;
+
+	netif_dbg(efx, probe, efx->net_dev,
+		  "chan %d create event queue\n", channel->channel);
+
+	/* Build an event queue with room for one event per tx and rx buffer,
+	 * plus some extra for link state events and MCDI completions.
+	 */
+	entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128);
+	EFX_WARN_ON_PARANOID(entries > EFX_MAX_EVQ_SIZE);
+	channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1;
+
+	return efx_nic_probe_eventq(channel);
+}
+
+/* Prepare channel's event queue */
+int efx_init_eventq(struct efx_channel *channel)
+{
+	struct efx_nic *efx = channel->efx;
+	int rc;
+
+	EFX_WARN_ON_PARANOID(channel->eventq_init);
+
+	netif_dbg(efx, drv, efx->net_dev,
+		  "chan %d init event queue\n", channel->channel);
+
+	rc = efx_nic_init_eventq(channel);
+	if (rc == 0) {
+		efx->type->push_irq_moderation(channel);
+		channel->eventq_read_ptr = 0;
+		channel->eventq_init = true;
+	}
+	return rc;
+}
+
+/* Enable event queue processing and NAPI */
+void efx_start_eventq(struct efx_channel *channel)
+{
+	netif_dbg(channel->efx, ifup, channel->efx->net_dev,
+		  "chan %d start event queue\n", channel->channel);
+
+	/* Make sure the NAPI handler sees the enabled flag set */
+	channel->enabled = true;
+	smp_wmb();
+
+	napi_enable(&channel->napi_str);
+	efx_nic_eventq_read_ack(channel);
+}
+
+/* Disable event queue processing and NAPI */
+void efx_stop_eventq(struct efx_channel *channel)
+{
+	if (!channel->enabled)
+		return;
+
+	napi_disable(&channel->napi_str);
+	channel->enabled = false;
+}
+
+void efx_fini_eventq(struct efx_channel *channel)
+{
+	if (!channel->eventq_init)
+		return;
+
+	netif_dbg(channel->efx, drv, channel->efx->net_dev,
+		  "chan %d fini event queue\n", channel->channel);
+
+	efx_nic_fini_eventq(channel);
+	channel->eventq_init = false;
+}
+
+void efx_remove_eventq(struct efx_channel *channel)
+{
+	netif_dbg(channel->efx, drv, channel->efx->net_dev,
+		  "chan %d remove event queue\n", channel->channel);
+
+	efx_nic_remove_eventq(channel);
+}
+
+/**************************************************************************
+ *
+ * Channel handling
+ *
+ *************************************************************************/
+
+#ifdef CONFIG_RFS_ACCEL
+static void efx_filter_rfs_expire(struct work_struct *data)
+{
+	struct delayed_work *dwork = to_delayed_work(data);
+	struct efx_channel *channel;
+	unsigned int time, quota;
+
+	channel = container_of(dwork, struct efx_channel, filter_work);
+	time = jiffies - channel->rfs_last_expiry;
+	quota = channel->rfs_filter_count * time / (30 * HZ);
+	if (quota >= 20 && __efx_filter_rfs_expire(channel, min(channel->rfs_filter_count, quota)))
+		channel->rfs_last_expiry += time;
+	/* Ensure we do more work eventually even if NAPI poll is not happening */
+	schedule_delayed_work(dwork, 30 * HZ);
+}
+#endif
+
+/* Allocate and initialise a channel structure. */
+static struct efx_channel *efx_alloc_channel(struct efx_nic *efx, int i)
+{
+	struct efx_rx_queue *rx_queue;
+	struct efx_tx_queue *tx_queue;
+	struct efx_channel *channel;
+	int j;
+
+	channel = kzalloc(sizeof(*channel), GFP_KERNEL);
+	if (!channel)
+		return NULL;
+
+	channel->efx = efx;
+	channel->channel = i;
+	channel->type = &efx_default_channel_type;
+
+	for (j = 0; j < EFX_MAX_TXQ_PER_CHANNEL; j++) {
+		tx_queue = &channel->tx_queue[j];
+		tx_queue->efx = efx;
+		tx_queue->queue = -1;
+		tx_queue->label = j;
+		tx_queue->channel = channel;
+	}
+
+#ifdef CONFIG_RFS_ACCEL
+	INIT_DELAYED_WORK(&channel->filter_work, efx_filter_rfs_expire);
+#endif
+
+	rx_queue = &channel->rx_queue;
+	rx_queue->efx = efx;
+	timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0);
+
+	return channel;
+}
+
+int efx_init_channels(struct efx_nic *efx)
+{
+	unsigned int i;
+
+	for (i = 0; i < EFX_MAX_CHANNELS; i++) {
+		efx->channel[i] = efx_alloc_channel(efx, i);
+		if (!efx->channel[i])
+			return -ENOMEM;
+		efx->msi_context[i].efx = efx;
+		efx->msi_context[i].index = i;
+	}
+
+	/* Higher numbered interrupt modes are less capable! */
+	efx->interrupt_mode = min(efx->type->min_interrupt_mode,
+				  efx_interrupt_mode);
+
+	efx->max_channels = EFX_MAX_CHANNELS;
+	efx->max_tx_channels = EFX_MAX_CHANNELS;
+
+	return 0;
+}
+
+void efx_fini_channels(struct efx_nic *efx)
+{
+	unsigned int i;
+
+	for (i = 0; i < EFX_MAX_CHANNELS; i++)
+		if (efx->channel[i]) {
+			kfree(efx->channel[i]);
+			efx->channel[i] = NULL;
+		}
+}
+
+/* Allocate and initialise a channel structure, copying parameters
+ * (but not resources) from an old channel structure.
+ */
+static
+struct efx_channel *efx_copy_channel(const struct efx_channel *old_channel)
+{
+	struct efx_rx_queue *rx_queue;
+	struct efx_tx_queue *tx_queue;
+	struct efx_channel *channel;
+	int j;
+
+	channel = kmalloc(sizeof(*channel), GFP_KERNEL);
+	if (!channel)
+		return NULL;
+
+	*channel = *old_channel;
+
+	channel->napi_dev = NULL;
+	INIT_HLIST_NODE(&channel->napi_str.napi_hash_node);
+	channel->napi_str.napi_id = 0;
+	channel->napi_str.state = 0;
+	memset(&channel->eventq, 0, sizeof(channel->eventq));
+
+	for (j = 0; j < EFX_MAX_TXQ_PER_CHANNEL; j++) {
+		tx_queue = &channel->tx_queue[j];
+		if (tx_queue->channel)
+			tx_queue->channel = channel;
+		tx_queue->buffer = NULL;
+		tx_queue->cb_page = NULL;
+		memset(&tx_queue->txd, 0, sizeof(tx_queue->txd));
+	}
+
+	rx_queue = &channel->rx_queue;
+	rx_queue->buffer = NULL;
+	memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd));
+	timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0);
+#ifdef CONFIG_RFS_ACCEL
+	INIT_DELAYED_WORK(&channel->filter_work, efx_filter_rfs_expire);
+#endif
+
+	return channel;
+}
+
+static int efx_probe_channel(struct efx_channel *channel)
+{
+	struct efx_tx_queue *tx_queue;
+	struct efx_rx_queue *rx_queue;
+	int rc;
+
+	netif_dbg(channel->efx, probe, channel->efx->net_dev,
+		  "creating channel %d\n", channel->channel);
+
+	rc = channel->type->pre_probe(channel);
+	if (rc)
+		goto fail;
+
+	rc = efx_probe_eventq(channel);
+	if (rc)
+		goto fail;
+
+	efx_for_each_channel_tx_queue(tx_queue, channel) {
+		rc = efx_probe_tx_queue(tx_queue);
+		if (rc)
+			goto fail;
+	}
+
+	efx_for_each_channel_rx_queue(rx_queue, channel) {
+		rc = efx_probe_rx_queue(rx_queue);
+		if (rc)
+			goto fail;
+	}
+
+	channel->rx_list = NULL;
+
+	return 0;
+
+fail:
+	efx_remove_channel(channel);
+	return rc;
+}
+
+static void efx_get_channel_name(struct efx_channel *channel, char *buf,
+				 size_t len)
+{
+	struct efx_nic *efx = channel->efx;
+	const char *type;
+	int number;
+
+	number = channel->channel;
+
+	if (number >= efx->xdp_channel_offset &&
+	    !WARN_ON_ONCE(!efx->n_xdp_channels)) {
+		type = "-xdp";
+		number -= efx->xdp_channel_offset;
+	} else if (efx->tx_channel_offset == 0) {
+		type = "";
+	} else if (number < efx->tx_channel_offset) {
+		type = "-rx";
+	} else {
+		type = "-tx";
+		number -= efx->tx_channel_offset;
+	}
+	snprintf(buf, len, "%s%s-%d", efx->name, type, number);
+}
+
+void efx_set_channel_names(struct efx_nic *efx)
+{
+	struct efx_channel *channel;
+
+	efx_for_each_channel(channel, efx)
+		channel->type->get_name(channel,
+					efx->msi_context[channel->channel].name,
+					sizeof(efx->msi_context[0].name));
+}
+
+int efx_probe_channels(struct efx_nic *efx)
+{
+	struct efx_channel *channel;
+	int rc;
+
+	/* Restart special buffer allocation */
+	efx->next_buffer_table = 0;
+
+	/* Probe channels in reverse, so that any 'extra' channels
+	 * use the start of the buffer table. This allows the traffic
+	 * channels to be resized without moving them or wasting the
+	 * entries before them.
+	 */
+	efx_for_each_channel_rev(channel, efx) {
+		rc = efx_probe_channel(channel);
+		if (rc) {
+			netif_err(efx, probe, efx->net_dev,
+				  "failed to create channel %d\n",
+				  channel->channel);
+			goto fail;
+		}
+	}
+	efx_set_channel_names(efx);
+
+	return 0;
+
+fail:
+	efx_remove_channels(efx);
+	return rc;
+}
+
+void efx_remove_channel(struct efx_channel *channel)
+{
+	struct efx_tx_queue *tx_queue;
+	struct efx_rx_queue *rx_queue;
+
+	netif_dbg(channel->efx, drv, channel->efx->net_dev,
+		  "destroy chan %d\n", channel->channel);
+
+	efx_for_each_channel_rx_queue(rx_queue, channel)
+		efx_remove_rx_queue(rx_queue);
+	efx_for_each_channel_tx_queue(tx_queue, channel)
+		efx_remove_tx_queue(tx_queue);
+	efx_remove_eventq(channel);
+	channel->type->post_remove(channel);
+}
+
+void efx_remove_channels(struct efx_nic *efx)
+{
+	struct efx_channel *channel;
+
+	efx_for_each_channel(channel, efx)
+		efx_remove_channel(channel);
+
+	kfree(efx->xdp_tx_queues);
+}
+
+int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
+{
+	struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
+	unsigned int i, next_buffer_table = 0;
+	u32 old_rxq_entries, old_txq_entries;
+	int rc, rc2;
+
+	rc = efx_check_disabled(efx);
+	if (rc)
+		return rc;
+
+	/* Not all channels should be reallocated. We must avoid
+	 * reallocating their buffer table entries.
+	 */
+	efx_for_each_channel(channel, efx) {
+		struct efx_rx_queue *rx_queue;
+		struct efx_tx_queue *tx_queue;
+
+		if (channel->type->copy)
+			continue;
+		next_buffer_table = max(next_buffer_table,
+					channel->eventq.index +
+					channel->eventq.entries);
+		efx_for_each_channel_rx_queue(rx_queue, channel)
+			next_buffer_table = max(next_buffer_table,
+						rx_queue->rxd.index +
+						rx_queue->rxd.entries);
+		efx_for_each_channel_tx_queue(tx_queue, channel)
+			next_buffer_table = max(next_buffer_table,
+						tx_queue->txd.index +
+						tx_queue->txd.entries);
+	}
+
+	efx_device_detach_sync(efx);
+	efx_stop_all(efx);
+	efx_soft_disable_interrupts(efx);
+
+	/* Clone channels (where possible) */
+	memset(other_channel, 0, sizeof(other_channel));
+	for (i = 0; i < efx->n_channels; i++) {
+		channel = efx->channel[i];
+		if (channel->type->copy)
+			channel = channel->type->copy(channel);
+		if (!channel) {
+			rc = -ENOMEM;
+			goto out;
+		}
+		other_channel[i] = channel;
+	}
+
+	/* Swap entry counts and channel pointers */
+	old_rxq_entries = efx->rxq_entries;
+	old_txq_entries = efx->txq_entries;
+	efx->rxq_entries = rxq_entries;
+	efx->txq_entries = txq_entries;
+	for (i = 0; i < efx->n_channels; i++)
+		swap(efx->channel[i], other_channel[i]);
+
+	/* Restart buffer table allocation */
+	efx->next_buffer_table = next_buffer_table;
+
+	for (i = 0; i < efx->n_channels; i++) {
+		channel = efx->channel[i];
+		if (!channel->type->copy)
+			continue;
+		rc = efx_probe_channel(channel);
+		if (rc)
+			goto rollback;
+		efx_init_napi_channel(efx->channel[i]);
+	}
+
+out:
+	/* Destroy unused channel structures */
+	for (i = 0; i < efx->n_channels; i++) {
+		channel = other_channel[i];
+		if (channel && channel->type->copy) {
+			efx_fini_napi_channel(channel);
+			efx_remove_channel(channel);
+			kfree(channel);
+		}
+	}
+
+	rc2 = efx_soft_enable_interrupts(efx);
+	if (rc2) {
+		rc = rc ? rc : rc2;
+		netif_err(efx, drv, efx->net_dev,
+			  "unable to restart interrupts on channel reallocation\n");
+		efx_schedule_reset(efx, RESET_TYPE_DISABLE);
+	} else {
+		efx_start_all(efx);
+		efx_device_attach_if_not_resetting(efx);
+	}
+	return rc;
+
+rollback:
+	/* Swap back */
+	efx->rxq_entries = old_rxq_entries;
+	efx->txq_entries = old_txq_entries;
+	for (i = 0; i < efx->n_channels; i++)
+		swap(efx->channel[i], other_channel[i]);
+	goto out;
+}
+
+static inline int
+efx_set_xdp_tx_queue(struct efx_nic *efx, int xdp_queue_number,
+		     struct efx_tx_queue *tx_queue)
+{
+	if (xdp_queue_number >= efx->xdp_tx_queue_count)
+		return -EINVAL;
+
+	netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is XDP %u, HW %u\n",
+		  tx_queue->channel->channel, tx_queue->label,
+		  xdp_queue_number, tx_queue->queue);
+	efx->xdp_tx_queues[xdp_queue_number] = tx_queue;
+	return 0;
+}
+
+int efx_set_channels(struct efx_nic *efx)
+{
+	struct efx_tx_queue *tx_queue;
+	struct efx_channel *channel;
+	unsigned int next_queue = 0;
+	int xdp_queue_number;
+	int rc;
+
+	efx->tx_channel_offset =
+		efx_separate_tx_channels ?
+		efx->n_channels - efx->n_tx_channels : 0;
+
+	if (efx->xdp_tx_queue_count) {
+		EFX_WARN_ON_PARANOID(efx->xdp_tx_queues);
+
+		/* Allocate array for XDP TX queue lookup. */
+		efx->xdp_tx_queues = kcalloc(efx->xdp_tx_queue_count,
+					     sizeof(*efx->xdp_tx_queues),
+					     GFP_KERNEL);
+		if (!efx->xdp_tx_queues)
+			return -ENOMEM;
+	}
+
+	/* We need to mark which channels really have RX and TX
+	 * queues, and adjust the TX queue numbers if we have separate
+	 * RX-only and TX-only channels.
+	 */
+	xdp_queue_number = 0;
+	efx_for_each_channel(channel, efx) {
+		if (channel->channel < efx->n_rx_channels)
+			channel->rx_queue.core_index = channel->channel;
+		else
+			channel->rx_queue.core_index = -1;
+
+		if (channel->channel >= efx->tx_channel_offset) {
+			if (efx_channel_is_xdp_tx(channel)) {
+				efx_for_each_channel_tx_queue(tx_queue, channel) {
+					tx_queue->queue = next_queue++;
+					rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue);
+					if (rc == 0)
+						xdp_queue_number++;
+				}
+			} else {
+				efx_for_each_channel_tx_queue(tx_queue, channel) {
+					tx_queue->queue = next_queue++;
+					netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is HW %u\n",
+						  channel->channel, tx_queue->label,
+						  tx_queue->queue);
+				}
+
+				/* If XDP is borrowing queues from net stack, it must use the queue
+				 * with no csum offload, which is the first one of the channel
+				 * (note: channel->tx_queue_by_type is not initialized yet)
+				 */
+				if (efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_BORROWED) {
+					tx_queue = &channel->tx_queue[0];
+					rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue);
+					if (rc == 0)
+						xdp_queue_number++;
+				}
+			}
+		}
+	}
+	WARN_ON(efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_DEDICATED &&
+		xdp_queue_number != efx->xdp_tx_queue_count);
+	WARN_ON(efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED &&
+		xdp_queue_number > efx->xdp_tx_queue_count);
+
+	/* If we have more CPUs than assigned XDP TX queues, assign the already
+	 * existing queues to the exceeding CPUs
+	 */
+	next_queue = 0;
+	while (xdp_queue_number < efx->xdp_tx_queue_count) {
+		tx_queue = efx->xdp_tx_queues[next_queue++];
+		rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue);
+		if (rc == 0)
+			xdp_queue_number++;
+	}
+
+	rc = netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
+	if (rc)
+		return rc;
+	return netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
+}
+
+static bool efx_default_channel_want_txqs(struct efx_channel *channel)
+{
+	return channel->channel - channel->efx->tx_channel_offset <
+		channel->efx->n_tx_channels;
+}
+
+/*************
+ * START/STOP
+ *************/
+
+int efx_soft_enable_interrupts(struct efx_nic *efx)
+{
+	struct efx_channel *channel, *end_channel;
+	int rc;
+
+	BUG_ON(efx->state == STATE_DISABLED);
+
+	efx->irq_soft_enabled = true;
+	smp_wmb();
+
+	efx_for_each_channel(channel, efx) {
+		if (!channel->type->keep_eventq) {
+			rc = efx_init_eventq(channel);
+			if (rc)
+				goto fail;
+		}
+		efx_start_eventq(channel);
+	}
+
+	efx_mcdi_mode_event(efx);
+
+	return 0;
+fail:
+	end_channel = channel;
+	efx_for_each_channel(channel, efx) {
+		if (channel == end_channel)
+			break;
+		efx_stop_eventq(channel);
+		if (!channel->type->keep_eventq)
+			efx_fini_eventq(channel);
+	}
+
+	return rc;
+}
+
+void efx_soft_disable_interrupts(struct efx_nic *efx)
+{
+	struct efx_channel *channel;
+
+	if (efx->state == STATE_DISABLED)
+		return;
+
+	efx_mcdi_mode_poll(efx);
+
+	efx->irq_soft_enabled = false;
+	smp_wmb();
+
+	if (efx->legacy_irq)
+		synchronize_irq(efx->legacy_irq);
+
+	efx_for_each_channel(channel, efx) {
+		if (channel->irq)
+			synchronize_irq(channel->irq);
+
+		efx_stop_eventq(channel);
+		if (!channel->type->keep_eventq)
+			efx_fini_eventq(channel);
+	}
+
+	/* Flush the asynchronous MCDI request queue */
+	efx_mcdi_flush_async(efx);
+}
+
+int efx_enable_interrupts(struct efx_nic *efx)
+{
+	struct efx_channel *channel, *end_channel;
+	int rc;
+
+	/* TODO: Is this really a bug? */
+	BUG_ON(efx->state == STATE_DISABLED);
+
+	if (efx->eeh_disabled_legacy_irq) {
+		enable_irq(efx->legacy_irq);
+		efx->eeh_disabled_legacy_irq = false;
+	}
+
+	efx->type->irq_enable_master(efx);
+
+	efx_for_each_channel(channel, efx) {
+		if (channel->type->keep_eventq) {
+			rc = efx_init_eventq(channel);
+			if (rc)
+				goto fail;
+		}
+	}
+
+	rc = efx_soft_enable_interrupts(efx);
+	if (rc)
+		goto fail;
+
+	return 0;
+
+fail:
+	end_channel = channel;
+	efx_for_each_channel(channel, efx) {
+		if (channel == end_channel)
+			break;
+		if (channel->type->keep_eventq)
+			efx_fini_eventq(channel);
+	}
+
+	efx->type->irq_disable_non_ev(efx);
+
+	return rc;
+}
+
+void efx_disable_interrupts(struct efx_nic *efx)
+{
+	struct efx_channel *channel;
+
+	efx_soft_disable_interrupts(efx);
+
+	efx_for_each_channel(channel, efx) {
+		if (channel->type->keep_eventq)
+			efx_fini_eventq(channel);
+	}
+
+	efx->type->irq_disable_non_ev(efx);
+}
+
+void efx_start_channels(struct efx_nic *efx)
+{
+	struct efx_tx_queue *tx_queue;
+	struct efx_rx_queue *rx_queue;
+	struct efx_channel *channel;
+
+	efx_for_each_channel(channel, efx) {
+		efx_for_each_channel_tx_queue(tx_queue, channel) {
+			efx_init_tx_queue(tx_queue);
+			atomic_inc(&efx->active_queues);
+		}
+
+		efx_for_each_channel_rx_queue(rx_queue, channel) {
+			efx_init_rx_queue(rx_queue);
+			atomic_inc(&efx->active_queues);
+			efx_stop_eventq(channel);
+			efx_fast_push_rx_descriptors(rx_queue, false);
+			efx_start_eventq(channel);
+		}
+
+		WARN_ON(channel->rx_pkt_n_frags);
+	}
+}
+
+void efx_stop_channels(struct efx_nic *efx)
+{
+	struct efx_tx_queue *tx_queue;
+	struct efx_rx_queue *rx_queue;
+	struct efx_channel *channel;
+	int rc = 0;
+
+	/* Stop RX refill */
+	efx_for_each_channel(channel, efx) {
+		efx_for_each_channel_rx_queue(rx_queue, channel)
+			rx_queue->refill_enabled = false;
+	}
+
+	efx_for_each_channel(channel, efx) {
+		/* RX packet processing is pipelined, so wait for the
+		 * NAPI handler to complete.  At least event queue 0
+		 * might be kept active by non-data events, so don't
+		 * use napi_synchronize() but actually disable NAPI
+		 * temporarily.
+		 */
+		if (efx_channel_has_rx_queue(channel)) {
+			efx_stop_eventq(channel);
+			efx_start_eventq(channel);
+		}
+	}
+
+	if (efx->type->fini_dmaq)
+		rc = efx->type->fini_dmaq(efx);
+
+	if (rc) {
+		netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
+	} else {
+		netif_dbg(efx, drv, efx->net_dev,
+			  "successfully flushed all queues\n");
+	}
+
+	efx_for_each_channel(channel, efx) {
+		efx_for_each_channel_rx_queue(rx_queue, channel)
+			efx_fini_rx_queue(rx_queue);
+		efx_for_each_channel_tx_queue(tx_queue, channel)
+			efx_fini_tx_queue(tx_queue);
+	}
+}
+
+/**************************************************************************
+ *
+ * NAPI interface
+ *
+ *************************************************************************/
+
+/* Process channel's event queue
+ *
+ * This function is responsible for processing the event queue of a
+ * single channel.  The caller must guarantee that this function will
+ * never be concurrently called more than once on the same channel,
+ * though different channels may be being processed concurrently.
+ */
+static int efx_process_channel(struct efx_channel *channel, int budget)
+{
+	struct efx_tx_queue *tx_queue;
+	struct list_head rx_list;
+	int spent;
+
+	if (unlikely(!channel->enabled))
+		return 0;
+
+	/* Prepare the batch receive list */
+	EFX_WARN_ON_PARANOID(channel->rx_list != NULL);
+	INIT_LIST_HEAD(&rx_list);
+	channel->rx_list = &rx_list;
+
+	efx_for_each_channel_tx_queue(tx_queue, channel) {
+		tx_queue->pkts_compl = 0;
+		tx_queue->bytes_compl = 0;
+	}
+
+	spent = efx_nic_process_eventq(channel, budget);
+	if (spent && efx_channel_has_rx_queue(channel)) {
+		struct efx_rx_queue *rx_queue =
+			efx_channel_get_rx_queue(channel);
+
+		efx_rx_flush_packet(channel);
+		efx_fast_push_rx_descriptors(rx_queue, true);
+	}
+
+	/* Update BQL */
+	efx_for_each_channel_tx_queue(tx_queue, channel) {
+		if (tx_queue->bytes_compl) {
+			netdev_tx_completed_queue(tx_queue->core_txq,
+						  tx_queue->pkts_compl,
+						  tx_queue->bytes_compl);
+		}
+	}
+
+	/* Receive any packets we queued up */
+	netif_receive_skb_list(channel->rx_list);
+	channel->rx_list = NULL;
+
+	return spent;
+}
+
+static void efx_update_irq_mod(struct efx_nic *efx, struct efx_channel *channel)
+{
+	int step = efx->irq_mod_step_us;
+
+	if (channel->irq_mod_score < irq_adapt_low_thresh) {
+		if (channel->irq_moderation_us > step) {
+			channel->irq_moderation_us -= step;
+			efx->type->push_irq_moderation(channel);
+		}
+	} else if (channel->irq_mod_score > irq_adapt_high_thresh) {
+		if (channel->irq_moderation_us <
+		    efx->irq_rx_moderation_us) {
+			channel->irq_moderation_us += step;
+			efx->type->push_irq_moderation(channel);
+		}
+	}
+
+	channel->irq_count = 0;
+	channel->irq_mod_score = 0;
+}
+
+/* NAPI poll handler
+ *
+ * NAPI guarantees serialisation of polls of the same device, which
+ * provides the guarantee required by efx_process_channel().
+ */
+static int efx_poll(struct napi_struct *napi, int budget)
+{
+	struct efx_channel *channel =
+		container_of(napi, struct efx_channel, napi_str);
+	struct efx_nic *efx = channel->efx;
+#ifdef CONFIG_RFS_ACCEL
+	unsigned int time;
+#endif
+	int spent;
+
+	netif_vdbg(efx, intr, efx->net_dev,
+		   "channel %d NAPI poll executing on CPU %d\n",
+		   channel->channel, raw_smp_processor_id());
+
+	spent = efx_process_channel(channel, budget);
+
+	xdp_do_flush_map();
+
+	if (spent < budget) {
+		if (efx_channel_has_rx_queue(channel) &&
+		    efx->irq_rx_adaptive &&
+		    unlikely(++channel->irq_count == 1000)) {
+			efx_update_irq_mod(efx, channel);
+		}
+
+#ifdef CONFIG_RFS_ACCEL
+		/* Perhaps expire some ARFS filters */
+		time = jiffies - channel->rfs_last_expiry;
+		/* Would our quota be >= 20? */
+		if (channel->rfs_filter_count * time >= 600 * HZ)
+			mod_delayed_work(system_wq, &channel->filter_work, 0);
+#endif
+
+		/* There is no race here; although napi_disable() will
+		 * only wait for napi_complete(), this isn't a problem
+		 * since efx_nic_eventq_read_ack() will have no effect if
+		 * interrupts have already been disabled.
+		 */
+		if (napi_complete_done(napi, spent))
+			efx_nic_eventq_read_ack(channel);
+	}
+
+	return spent;
+}
+
+void efx_init_napi_channel(struct efx_channel *channel)
+{
+	struct efx_nic *efx = channel->efx;
+
+	channel->napi_dev = efx->net_dev;
+	netif_napi_add(channel->napi_dev, &channel->napi_str,
+		       efx_poll, napi_weight);
+}
+
+void efx_init_napi(struct efx_nic *efx)
+{
+	struct efx_channel *channel;
+
+	efx_for_each_channel(channel, efx)
+		efx_init_napi_channel(channel);
+}
+
+void efx_fini_napi_channel(struct efx_channel *channel)
+{
+	if (channel->napi_dev)
+		netif_napi_del(&channel->napi_str);
+
+	channel->napi_dev = NULL;
+}
+
+void efx_fini_napi(struct efx_nic *efx)
+{
+	struct efx_channel *channel;
+
+	efx_for_each_channel(channel, efx)
+		efx_fini_napi_channel(channel);
+}
+
+/***************
+ * Housekeeping
+ ***************/
+
+static int efx_channel_dummy_op_int(struct efx_channel *channel)
+{
+	return 0;
+}
+
+void efx_channel_dummy_op_void(struct efx_channel *channel)
+{
+}
+
+static const struct efx_channel_type efx_default_channel_type = {
+	.pre_probe		= efx_channel_dummy_op_int,
+	.post_remove		= efx_channel_dummy_op_void,
+	.get_name		= efx_get_channel_name,
+	.copy			= efx_copy_channel,
+	.want_txqs		= efx_default_channel_want_txqs,
+	.keep_eventq		= false,
+	.want_pio		= true,
+};
diff --git a/drivers/net/ethernet/sfc/siena/efx_channels.h b/drivers/net/ethernet/sfc/siena/efx_channels.h
new file mode 100644
index 000000000000..64abb99a56b8
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/efx_channels.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_CHANNELS_H
+#define EFX_CHANNELS_H
+
+extern unsigned int efx_interrupt_mode;
+extern unsigned int rss_cpus;
+
+int efx_probe_interrupts(struct efx_nic *efx);
+void efx_remove_interrupts(struct efx_nic *efx);
+int efx_soft_enable_interrupts(struct efx_nic *efx);
+void efx_soft_disable_interrupts(struct efx_nic *efx);
+int efx_enable_interrupts(struct efx_nic *efx);
+void efx_disable_interrupts(struct efx_nic *efx);
+
+void efx_set_interrupt_affinity(struct efx_nic *efx);
+void efx_clear_interrupt_affinity(struct efx_nic *efx);
+
+int efx_probe_eventq(struct efx_channel *channel);
+int efx_init_eventq(struct efx_channel *channel);
+void efx_start_eventq(struct efx_channel *channel);
+void efx_stop_eventq(struct efx_channel *channel);
+void efx_fini_eventq(struct efx_channel *channel);
+void efx_remove_eventq(struct efx_channel *channel);
+
+int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries);
+void efx_set_channel_names(struct efx_nic *efx);
+int efx_init_channels(struct efx_nic *efx);
+int efx_probe_channels(struct efx_nic *efx);
+int efx_set_channels(struct efx_nic *efx);
+void efx_remove_channel(struct efx_channel *channel);
+void efx_remove_channels(struct efx_nic *efx);
+void efx_fini_channels(struct efx_nic *efx);
+void efx_start_channels(struct efx_nic *efx);
+void efx_stop_channels(struct efx_nic *efx);
+
+void efx_init_napi_channel(struct efx_channel *channel);
+void efx_init_napi(struct efx_nic *efx);
+void efx_fini_napi_channel(struct efx_channel *channel);
+void efx_fini_napi(struct efx_nic *efx);
+
+void efx_channel_dummy_op_void(struct efx_channel *channel);
+
+#endif
diff --git a/drivers/net/ethernet/sfc/siena/efx_common.c b/drivers/net/ethernet/sfc/siena/efx_common.c
new file mode 100644
index 000000000000..f6577e74d6e6
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/efx_common.c
@@ -0,0 +1,1396 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include "net_driver.h"
+#include <linux/filter.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <net/gre.h>
+#include "efx_common.h"
+#include "efx_channels.h"
+#include "efx.h"
+#include "mcdi.h"
+#include "selftest.h"
+#include "rx_common.h"
+#include "tx_common.h"
+#include "nic.h"
+#include "mcdi_port_common.h"
+#include "io.h"
+#include "mcdi_pcol.h"
+
+static unsigned int debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
+			     NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
+			     NETIF_MSG_IFUP | NETIF_MSG_RX_ERR |
+			     NETIF_MSG_TX_ERR | NETIF_MSG_HW);
+module_param(debug, uint, 0);
+MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");
+
+/* This is the time (in jiffies) between invocations of the hardware
+ * monitor.
+ * On Falcon-based NICs, this will:
+ * - Check the on-board hardware monitor;
+ * - Poll the link state and reconfigure the hardware as necessary.
+ * On Siena-based NICs for power systems with EEH support, this will give EEH a
+ * chance to start.
+ */
+static unsigned int efx_monitor_interval = 1 * HZ;
+
+/* How often and how many times to poll for a reset while waiting for a
+ * BIST that another function started to complete.
+ */
+#define BIST_WAIT_DELAY_MS	100
+#define BIST_WAIT_DELAY_COUNT	100
+
+/* Default stats update time */
+#define STATS_PERIOD_MS_DEFAULT 1000
+
+static const unsigned int efx_reset_type_max = RESET_TYPE_MAX;
+static const char *const efx_reset_type_names[] = {
+	[RESET_TYPE_INVISIBLE]          = "INVISIBLE",
+	[RESET_TYPE_ALL]                = "ALL",
+	[RESET_TYPE_RECOVER_OR_ALL]     = "RECOVER_OR_ALL",
+	[RESET_TYPE_WORLD]              = "WORLD",
+	[RESET_TYPE_RECOVER_OR_DISABLE] = "RECOVER_OR_DISABLE",
+	[RESET_TYPE_DATAPATH]           = "DATAPATH",
+	[RESET_TYPE_MC_BIST]		= "MC_BIST",
+	[RESET_TYPE_DISABLE]            = "DISABLE",
+	[RESET_TYPE_TX_WATCHDOG]        = "TX_WATCHDOG",
+	[RESET_TYPE_INT_ERROR]          = "INT_ERROR",
+	[RESET_TYPE_DMA_ERROR]          = "DMA_ERROR",
+	[RESET_TYPE_TX_SKIP]            = "TX_SKIP",
+	[RESET_TYPE_MC_FAILURE]         = "MC_FAILURE",
+	[RESET_TYPE_MCDI_TIMEOUT]	= "MCDI_TIMEOUT (FLR)",
+};
+
+#define RESET_TYPE(type) \
+	STRING_TABLE_LOOKUP(type, efx_reset_type)
+
+/* Loopback mode names (see LOOPBACK_MODE()) */
+const unsigned int efx_loopback_mode_max = LOOPBACK_MAX;
+const char *const efx_loopback_mode_names[] = {
+	[LOOPBACK_NONE]		= "NONE",
+	[LOOPBACK_DATA]		= "DATAPATH",
+	[LOOPBACK_GMAC]		= "GMAC",
+	[LOOPBACK_XGMII]	= "XGMII",
+	[LOOPBACK_XGXS]		= "XGXS",
+	[LOOPBACK_XAUI]		= "XAUI",
+	[LOOPBACK_GMII]		= "GMII",
+	[LOOPBACK_SGMII]	= "SGMII",
+	[LOOPBACK_XGBR]		= "XGBR",
+	[LOOPBACK_XFI]		= "XFI",
+	[LOOPBACK_XAUI_FAR]	= "XAUI_FAR",
+	[LOOPBACK_GMII_FAR]	= "GMII_FAR",
+	[LOOPBACK_SGMII_FAR]	= "SGMII_FAR",
+	[LOOPBACK_XFI_FAR]	= "XFI_FAR",
+	[LOOPBACK_GPHY]		= "GPHY",
+	[LOOPBACK_PHYXS]	= "PHYXS",
+	[LOOPBACK_PCS]		= "PCS",
+	[LOOPBACK_PMAPMD]	= "PMA/PMD",
+	[LOOPBACK_XPORT]	= "XPORT",
+	[LOOPBACK_XGMII_WS]	= "XGMII_WS",
+	[LOOPBACK_XAUI_WS]	= "XAUI_WS",
+	[LOOPBACK_XAUI_WS_FAR]  = "XAUI_WS_FAR",
+	[LOOPBACK_XAUI_WS_NEAR] = "XAUI_WS_NEAR",
+	[LOOPBACK_GMII_WS]	= "GMII_WS",
+	[LOOPBACK_XFI_WS]	= "XFI_WS",
+	[LOOPBACK_XFI_WS_FAR]	= "XFI_WS_FAR",
+	[LOOPBACK_PHYXS_WS]	= "PHYXS_WS",
+};
+
+/* Reset workqueue. If any NIC has a hardware failure then a reset will be
+ * queued onto this work queue. This is not a per-nic work queue, because
+ * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised.
+ */
+static struct workqueue_struct *reset_workqueue;
+
+int efx_create_reset_workqueue(void)
+{
+	reset_workqueue = create_singlethread_workqueue("sfc_reset");
+	if (!reset_workqueue) {
+		printk(KERN_ERR "Failed to create reset workqueue\n");
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+void efx_queue_reset_work(struct efx_nic *efx)
+{
+	queue_work(reset_workqueue, &efx->reset_work);
+}
+
+void efx_flush_reset_workqueue(struct efx_nic *efx)
+{
+	cancel_work_sync(&efx->reset_work);
+}
+
+void efx_destroy_reset_workqueue(void)
+{
+	if (reset_workqueue) {
+		destroy_workqueue(reset_workqueue);
+		reset_workqueue = NULL;
+	}
+}
+
+/* We assume that efx->type->reconfigure_mac will always try to sync RX
+ * filters and therefore needs to read-lock the filter table against freeing
+ */
+void efx_mac_reconfigure(struct efx_nic *efx, bool mtu_only)
+{
+	if (efx->type->reconfigure_mac) {
+		down_read(&efx->filter_sem);
+		efx->type->reconfigure_mac(efx, mtu_only);
+		up_read(&efx->filter_sem);
+	}
+}
+
+/* Asynchronous work item for changing MAC promiscuity and multicast
+ * hash.  Avoid a drain/rx_ingress enable by reconfiguring the current
+ * MAC directly.
+ */
+static void efx_mac_work(struct work_struct *data)
+{
+	struct efx_nic *efx = container_of(data, struct efx_nic, mac_work);
+
+	mutex_lock(&efx->mac_lock);
+	if (efx->port_enabled)
+		efx_mac_reconfigure(efx, false);
+	mutex_unlock(&efx->mac_lock);
+}
+
+int efx_set_mac_address(struct net_device *net_dev, void *data)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+	struct sockaddr *addr = data;
+	u8 *new_addr = addr->sa_data;
+	u8 old_addr[6];
+	int rc;
+
+	if (!is_valid_ether_addr(new_addr)) {
+		netif_err(efx, drv, efx->net_dev,
+			  "invalid ethernet MAC address requested: %pM\n",
+			  new_addr);
+		return -EADDRNOTAVAIL;
+	}
+
+	/* save old address */
+	ether_addr_copy(old_addr, net_dev->dev_addr);
+	eth_hw_addr_set(net_dev, new_addr);
+	if (efx->type->set_mac_address) {
+		rc = efx->type->set_mac_address(efx);
+		if (rc) {
+			eth_hw_addr_set(net_dev, old_addr);
+			return rc;
+		}
+	}
+
+	/* Reconfigure the MAC */
+	mutex_lock(&efx->mac_lock);
+	efx_mac_reconfigure(efx, false);
+	mutex_unlock(&efx->mac_lock);
+
+	return 0;
+}
+
+/* Context: netif_addr_lock held, BHs disabled. */
+void efx_set_rx_mode(struct net_device *net_dev)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+
+	if (efx->port_enabled)
+		queue_work(efx->workqueue, &efx->mac_work);
+	/* Otherwise efx_start_port() will do this */
+}
+
+int efx_set_features(struct net_device *net_dev, netdev_features_t data)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+	int rc;
+
+	/* If disabling RX n-tuple filtering, clear existing filters */
+	if (net_dev->features & ~data & NETIF_F_NTUPLE) {
+		rc = efx->type->filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL);
+		if (rc)
+			return rc;
+	}
+
+	/* If Rx VLAN filter is changed, update filters via mac_reconfigure.
+	 * If rx-fcs is changed, mac_reconfigure updates that too.
+	 */
+	if ((net_dev->features ^ data) & (NETIF_F_HW_VLAN_CTAG_FILTER |
+					  NETIF_F_RXFCS)) {
+		/* efx_set_rx_mode() will schedule MAC work to update filters
+		 * when a new features are finally set in net_dev.
+		 */
+		efx_set_rx_mode(net_dev);
+	}
+
+	return 0;
+}
+
+/* This ensures that the kernel is kept informed (via
+ * netif_carrier_on/off) of the link status, and also maintains the
+ * link status's stop on the port's TX queue.
+ */
+void efx_link_status_changed(struct efx_nic *efx)
+{
+	struct efx_link_state *link_state = &efx->link_state;
+
+	/* SFC Bug 5356: A net_dev notifier is registered, so we must ensure
+	 * that no events are triggered between unregister_netdev() and the
+	 * driver unloading. A more general condition is that NETDEV_CHANGE
+	 * can only be generated between NETDEV_UP and NETDEV_DOWN
+	 */
+	if (!netif_running(efx->net_dev))
+		return;
+
+	if (link_state->up != netif_carrier_ok(efx->net_dev)) {
+		efx->n_link_state_changes++;
+
+		if (link_state->up)
+			netif_carrier_on(efx->net_dev);
+		else
+			netif_carrier_off(efx->net_dev);
+	}
+
+	/* Status message for kernel log */
+	if (link_state->up)
+		netif_info(efx, link, efx->net_dev,
+			   "link up at %uMbps %s-duplex (MTU %d)\n",
+			   link_state->speed, link_state->fd ? "full" : "half",
+			   efx->net_dev->mtu);
+	else
+		netif_info(efx, link, efx->net_dev, "link down\n");
+}
+
+unsigned int efx_xdp_max_mtu(struct efx_nic *efx)
+{
+	/* The maximum MTU that we can fit in a single page, allowing for
+	 * framing, overhead and XDP headroom + tailroom.
+	 */
+	int overhead = EFX_MAX_FRAME_LEN(0) + sizeof(struct efx_rx_page_state) +
+		       efx->rx_prefix_size + efx->type->rx_buffer_padding +
+		       efx->rx_ip_align + EFX_XDP_HEADROOM + EFX_XDP_TAILROOM;
+
+	return PAGE_SIZE - overhead;
+}
+
+/* Context: process, rtnl_lock() held. */
+int efx_change_mtu(struct net_device *net_dev, int new_mtu)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+	int rc;
+
+	rc = efx_check_disabled(efx);
+	if (rc)
+		return rc;
+
+	if (rtnl_dereference(efx->xdp_prog) &&
+	    new_mtu > efx_xdp_max_mtu(efx)) {
+		netif_err(efx, drv, efx->net_dev,
+			  "Requested MTU of %d too big for XDP (max: %d)\n",
+			  new_mtu, efx_xdp_max_mtu(efx));
+		return -EINVAL;
+	}
+
+	netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
+
+	efx_device_detach_sync(efx);
+	efx_stop_all(efx);
+
+	mutex_lock(&efx->mac_lock);
+	net_dev->mtu = new_mtu;
+	efx_mac_reconfigure(efx, true);
+	mutex_unlock(&efx->mac_lock);
+
+	efx_start_all(efx);
+	efx_device_attach_if_not_resetting(efx);
+	return 0;
+}
+
+/**************************************************************************
+ *
+ * Hardware monitor
+ *
+ **************************************************************************/
+
+/* Run periodically off the general workqueue */
+static void efx_monitor(struct work_struct *data)
+{
+	struct efx_nic *efx = container_of(data, struct efx_nic,
+					   monitor_work.work);
+
+	netif_vdbg(efx, timer, efx->net_dev,
+		   "hardware monitor executing on CPU %d\n",
+		   raw_smp_processor_id());
+	BUG_ON(efx->type->monitor == NULL);
+
+	/* If the mac_lock is already held then it is likely a port
+	 * reconfiguration is already in place, which will likely do
+	 * most of the work of monitor() anyway.
+	 */
+	if (mutex_trylock(&efx->mac_lock)) {
+		if (efx->port_enabled && efx->type->monitor)
+			efx->type->monitor(efx);
+		mutex_unlock(&efx->mac_lock);
+	}
+
+	efx_start_monitor(efx);
+}
+
+void efx_start_monitor(struct efx_nic *efx)
+{
+	if (efx->type->monitor)
+		queue_delayed_work(efx->workqueue, &efx->monitor_work,
+				   efx_monitor_interval);
+}
+
+/**************************************************************************
+ *
+ * Event queue processing
+ *
+ *************************************************************************/
+
+/* Channels are shutdown and reinitialised whilst the NIC is running
+ * to propagate configuration changes (mtu, checksum offload), or
+ * to clear hardware error conditions
+ */
+static void efx_start_datapath(struct efx_nic *efx)
+{
+	netdev_features_t old_features = efx->net_dev->features;
+	bool old_rx_scatter = efx->rx_scatter;
+	size_t rx_buf_len;
+
+	/* Calculate the rx buffer allocation parameters required to
+	 * support the current MTU, including padding for header
+	 * alignment and overruns.
+	 */
+	efx->rx_dma_len = (efx->rx_prefix_size +
+			   EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
+			   efx->type->rx_buffer_padding);
+	rx_buf_len = (sizeof(struct efx_rx_page_state)   + EFX_XDP_HEADROOM +
+		      efx->rx_ip_align + efx->rx_dma_len + EFX_XDP_TAILROOM);
+
+	if (rx_buf_len <= PAGE_SIZE) {
+		efx->rx_scatter = efx->type->always_rx_scatter;
+		efx->rx_buffer_order = 0;
+	} else if (efx->type->can_rx_scatter) {
+		BUILD_BUG_ON(EFX_RX_USR_BUF_SIZE % L1_CACHE_BYTES);
+		BUILD_BUG_ON(sizeof(struct efx_rx_page_state) +
+			     2 * ALIGN(NET_IP_ALIGN + EFX_RX_USR_BUF_SIZE,
+				       EFX_RX_BUF_ALIGNMENT) >
+			     PAGE_SIZE);
+		efx->rx_scatter = true;
+		efx->rx_dma_len = EFX_RX_USR_BUF_SIZE;
+		efx->rx_buffer_order = 0;
+	} else {
+		efx->rx_scatter = false;
+		efx->rx_buffer_order = get_order(rx_buf_len);
+	}
+
+	efx_rx_config_page_split(efx);
+	if (efx->rx_buffer_order)
+		netif_dbg(efx, drv, efx->net_dev,
+			  "RX buf len=%u; page order=%u batch=%u\n",
+			  efx->rx_dma_len, efx->rx_buffer_order,
+			  efx->rx_pages_per_batch);
+	else
+		netif_dbg(efx, drv, efx->net_dev,
+			  "RX buf len=%u step=%u bpp=%u; page batch=%u\n",
+			  efx->rx_dma_len, efx->rx_page_buf_step,
+			  efx->rx_bufs_per_page, efx->rx_pages_per_batch);
+
+	/* Restore previously fixed features in hw_features and remove
+	 * features which are fixed now
+	 */
+	efx->net_dev->hw_features |= efx->net_dev->features;
+	efx->net_dev->hw_features &= ~efx->fixed_features;
+	efx->net_dev->features |= efx->fixed_features;
+	if (efx->net_dev->features != old_features)
+		netdev_features_change(efx->net_dev);
+
+	/* RX filters may also have scatter-enabled flags */
+	if ((efx->rx_scatter != old_rx_scatter) &&
+	    efx->type->filter_update_rx_scatter)
+		efx->type->filter_update_rx_scatter(efx);
+
+	/* We must keep at least one descriptor in a TX ring empty.
+	 * We could avoid this when the queue size does not exactly
+	 * match the hardware ring size, but it's not that important.
+	 * Therefore we stop the queue when one more skb might fill
+	 * the ring completely.  We wake it when half way back to
+	 * empty.
+	 */
+	efx->txq_stop_thresh = efx->txq_entries - efx_tx_max_skb_descs(efx);
+	efx->txq_wake_thresh = efx->txq_stop_thresh / 2;
+
+	/* Initialise the channels */
+	efx_start_channels(efx);
+
+	efx_ptp_start_datapath(efx);
+
+	if (netif_device_present(efx->net_dev))
+		netif_tx_wake_all_queues(efx->net_dev);
+}
+
+static void efx_stop_datapath(struct efx_nic *efx)
+{
+	EFX_ASSERT_RESET_SERIALISED(efx);
+	BUG_ON(efx->port_enabled);
+
+	efx_ptp_stop_datapath(efx);
+
+	efx_stop_channels(efx);
+}
+
+/**************************************************************************
+ *
+ * Port handling
+ *
+ **************************************************************************/
+
+/* Equivalent to efx_link_set_advertising with all-zeroes, except does not
+ * force the Autoneg bit on.
+ */
+void efx_link_clear_advertising(struct efx_nic *efx)
+{
+	bitmap_zero(efx->link_advertising, __ETHTOOL_LINK_MODE_MASK_NBITS);
+	efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX);
+}
+
+void efx_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc)
+{
+	efx->wanted_fc = wanted_fc;
+	if (efx->link_advertising[0]) {
+		if (wanted_fc & EFX_FC_RX)
+			efx->link_advertising[0] |= (ADVERTISED_Pause |
+						     ADVERTISED_Asym_Pause);
+		else
+			efx->link_advertising[0] &= ~(ADVERTISED_Pause |
+						      ADVERTISED_Asym_Pause);
+		if (wanted_fc & EFX_FC_TX)
+			efx->link_advertising[0] ^= ADVERTISED_Asym_Pause;
+	}
+}
+
+static void efx_start_port(struct efx_nic *efx)
+{
+	netif_dbg(efx, ifup, efx->net_dev, "start port\n");
+	BUG_ON(efx->port_enabled);
+
+	mutex_lock(&efx->mac_lock);
+	efx->port_enabled = true;
+
+	/* Ensure MAC ingress/egress is enabled */
+	efx_mac_reconfigure(efx, false);
+
+	mutex_unlock(&efx->mac_lock);
+}
+
+/* Cancel work for MAC reconfiguration, periodic hardware monitoring
+ * and the async self-test, wait for them to finish and prevent them
+ * being scheduled again.  This doesn't cover online resets, which
+ * should only be cancelled when removing the device.
+ */
+static void efx_stop_port(struct efx_nic *efx)
+{
+	netif_dbg(efx, ifdown, efx->net_dev, "stop port\n");
+
+	EFX_ASSERT_RESET_SERIALISED(efx);
+
+	mutex_lock(&efx->mac_lock);
+	efx->port_enabled = false;
+	mutex_unlock(&efx->mac_lock);
+
+	/* Serialise against efx_set_multicast_list() */
+	netif_addr_lock_bh(efx->net_dev);
+	netif_addr_unlock_bh(efx->net_dev);
+
+	cancel_delayed_work_sync(&efx->monitor_work);
+	efx_selftest_async_cancel(efx);
+	cancel_work_sync(&efx->mac_work);
+}
+
+/* If the interface is supposed to be running but is not, start
+ * the hardware and software data path, regular activity for the port
+ * (MAC statistics, link polling, etc.) and schedule the port to be
+ * reconfigured.  Interrupts must already be enabled.  This function
+ * is safe to call multiple times, so long as the NIC is not disabled.
+ * Requires the RTNL lock.
+ */
+void efx_start_all(struct efx_nic *efx)
+{
+	EFX_ASSERT_RESET_SERIALISED(efx);
+	BUG_ON(efx->state == STATE_DISABLED);
+
+	/* Check that it is appropriate to restart the interface. All
+	 * of these flags are safe to read under just the rtnl lock
+	 */
+	if (efx->port_enabled || !netif_running(efx->net_dev) ||
+	    efx->reset_pending)
+		return;
+
+	efx_start_port(efx);
+	efx_start_datapath(efx);
+
+	/* Start the hardware monitor if there is one */
+	efx_start_monitor(efx);
+
+	/* Link state detection is normally event-driven; we have
+	 * to poll now because we could have missed a change
+	 */
+	mutex_lock(&efx->mac_lock);
+	if (efx_mcdi_phy_poll(efx))
+		efx_link_status_changed(efx);
+	mutex_unlock(&efx->mac_lock);
+
+	if (efx->type->start_stats) {
+		efx->type->start_stats(efx);
+		efx->type->pull_stats(efx);
+		spin_lock_bh(&efx->stats_lock);
+		efx->type->update_stats(efx, NULL, NULL);
+		spin_unlock_bh(&efx->stats_lock);
+	}
+}
+
+/* Quiesce the hardware and software data path, and regular activity
+ * for the port without bringing the link down.  Safe to call multiple
+ * times with the NIC in almost any state, but interrupts should be
+ * enabled.  Requires the RTNL lock.
+ */
+void efx_stop_all(struct efx_nic *efx)
+{
+	EFX_ASSERT_RESET_SERIALISED(efx);
+
+	/* port_enabled can be read safely under the rtnl lock */
+	if (!efx->port_enabled)
+		return;
+
+	if (efx->type->update_stats) {
+		/* update stats before we go down so we can accurately count
+		 * rx_nodesc_drops
+		 */
+		efx->type->pull_stats(efx);
+		spin_lock_bh(&efx->stats_lock);
+		efx->type->update_stats(efx, NULL, NULL);
+		spin_unlock_bh(&efx->stats_lock);
+		efx->type->stop_stats(efx);
+	}
+
+	efx_stop_port(efx);
+
+	/* Stop the kernel transmit interface.  This is only valid if
+	 * the device is stopped or detached; otherwise the watchdog
+	 * may fire immediately.
+	 */
+	WARN_ON(netif_running(efx->net_dev) &&
+		netif_device_present(efx->net_dev));
+	netif_tx_disable(efx->net_dev);
+
+	efx_stop_datapath(efx);
+}
+
+/* Context: process, dev_base_lock or RTNL held, non-blocking. */
+void efx_net_stats(struct net_device *net_dev, struct rtnl_link_stats64 *stats)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+
+	spin_lock_bh(&efx->stats_lock);
+	efx_nic_update_stats_atomic(efx, NULL, stats);
+	spin_unlock_bh(&efx->stats_lock);
+}
+
+/* Push loopback/power/transmit disable settings to the PHY, and reconfigure
+ * the MAC appropriately. All other PHY configuration changes are pushed
+ * through phy_op->set_settings(), and pushed asynchronously to the MAC
+ * through efx_monitor().
+ *
+ * Callers must hold the mac_lock
+ */
+int __efx_reconfigure_port(struct efx_nic *efx)
+{
+	enum efx_phy_mode phy_mode;
+	int rc = 0;
+
+	WARN_ON(!mutex_is_locked(&efx->mac_lock));
+
+	/* Disable PHY transmit in mac level loopbacks */
+	phy_mode = efx->phy_mode;
+	if (LOOPBACK_INTERNAL(efx))
+		efx->phy_mode |= PHY_MODE_TX_DISABLED;
+	else
+		efx->phy_mode &= ~PHY_MODE_TX_DISABLED;
+
+	if (efx->type->reconfigure_port)
+		rc = efx->type->reconfigure_port(efx);
+
+	if (rc)
+		efx->phy_mode = phy_mode;
+
+	return rc;
+}
+
+/* Reinitialise the MAC to pick up new PHY settings, even if the port is
+ * disabled.
+ */
+int efx_reconfigure_port(struct efx_nic *efx)
+{
+	int rc;
+
+	EFX_ASSERT_RESET_SERIALISED(efx);
+
+	mutex_lock(&efx->mac_lock);
+	rc = __efx_reconfigure_port(efx);
+	mutex_unlock(&efx->mac_lock);
+
+	return rc;
+}
+
+/**************************************************************************
+ *
+ * Device reset and suspend
+ *
+ **************************************************************************/
+
+static void efx_wait_for_bist_end(struct efx_nic *efx)
+{
+	int i;
+
+	for (i = 0; i < BIST_WAIT_DELAY_COUNT; ++i) {
+		if (efx_mcdi_poll_reboot(efx))
+			goto out;
+		msleep(BIST_WAIT_DELAY_MS);
+	}
+
+	netif_err(efx, drv, efx->net_dev, "Warning: No MC reboot after BIST mode\n");
+out:
+	/* Either way unset the BIST flag. If we found no reboot we probably
+	 * won't recover, but we should try.
+	 */
+	efx->mc_bist_for_other_fn = false;
+}
+
+/* Try recovery mechanisms.
+ * For now only EEH is supported.
+ * Returns 0 if the recovery mechanisms are unsuccessful.
+ * Returns a non-zero value otherwise.
+ */
+int efx_try_recovery(struct efx_nic *efx)
+{
+#ifdef CONFIG_EEH
+	/* A PCI error can occur and not be seen by EEH because nothing
+	 * happens on the PCI bus. In this case the driver may fail and
+	 * schedule a 'recover or reset', leading to this recovery handler.
+	 * Manually call the eeh failure check function.
+	 */
+	struct eeh_dev *eehdev = pci_dev_to_eeh_dev(efx->pci_dev);
+	if (eeh_dev_check_failure(eehdev)) {
+		/* The EEH mechanisms will handle the error and reset the
+		 * device if necessary.
+		 */
+		return 1;
+	}
+#endif
+	return 0;
+}
+
+/* Tears down the entire software state and most of the hardware state
+ * before reset.
+ */
+void efx_reset_down(struct efx_nic *efx, enum reset_type method)
+{
+	EFX_ASSERT_RESET_SERIALISED(efx);
+
+	if (method == RESET_TYPE_MCDI_TIMEOUT)
+		efx->type->prepare_flr(efx);
+
+	efx_stop_all(efx);
+	efx_disable_interrupts(efx);
+
+	mutex_lock(&efx->mac_lock);
+	down_write(&efx->filter_sem);
+	mutex_lock(&efx->rss_lock);
+	efx->type->fini(efx);
+}
+
+/* Context: netif_tx_lock held, BHs disabled. */
+void efx_watchdog(struct net_device *net_dev, unsigned int txqueue)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+
+	netif_err(efx, tx_err, efx->net_dev,
+		  "TX stuck with port_enabled=%d: resetting channels\n",
+		  efx->port_enabled);
+
+	efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG);
+}
+
+/* This function will always ensure that the locks acquired in
+ * efx_reset_down() are released. A failure return code indicates
+ * that we were unable to reinitialise the hardware, and the
+ * driver should be disabled. If ok is false, then the rx and tx
+ * engines are not restarted, pending a RESET_DISABLE.
+ */
+int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
+{
+	int rc;
+
+	EFX_ASSERT_RESET_SERIALISED(efx);
+
+	if (method == RESET_TYPE_MCDI_TIMEOUT)
+		efx->type->finish_flr(efx);
+
+	/* Ensure that SRAM is initialised even if we're disabling the device */
+	rc = efx->type->init(efx);
+	if (rc) {
+		netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n");
+		goto fail;
+	}
+
+	if (!ok)
+		goto fail;
+
+	if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
+	    method != RESET_TYPE_DATAPATH) {
+		rc = efx_mcdi_port_reconfigure(efx);
+		if (rc && rc != -EPERM)
+			netif_err(efx, drv, efx->net_dev,
+				  "could not restore PHY settings\n");
+	}
+
+	rc = efx_enable_interrupts(efx);
+	if (rc)
+		goto fail;
+
+#ifdef CONFIG_SFC_SRIOV
+	rc = efx->type->vswitching_restore(efx);
+	if (rc) /* not fatal; the PF will still work fine */
+		netif_warn(efx, probe, efx->net_dev,
+			   "failed to restore vswitching rc=%d;"
+			   " VFs may not function\n", rc);
+#endif
+
+	if (efx->type->rx_restore_rss_contexts)
+		efx->type->rx_restore_rss_contexts(efx);
+	mutex_unlock(&efx->rss_lock);
+	efx->type->filter_table_restore(efx);
+	up_write(&efx->filter_sem);
+	if (efx->type->sriov_reset)
+		efx->type->sriov_reset(efx);
+
+	mutex_unlock(&efx->mac_lock);
+
+	efx_start_all(efx);
+
+	if (efx->type->udp_tnl_push_ports)
+		efx->type->udp_tnl_push_ports(efx);
+
+	return 0;
+
+fail:
+	efx->port_initialized = false;
+
+	mutex_unlock(&efx->rss_lock);
+	up_write(&efx->filter_sem);
+	mutex_unlock(&efx->mac_lock);
+
+	return rc;
+}
+
+/* Reset the NIC using the specified method.  Note that the reset may
+ * fail, in which case the card will be left in an unusable state.
+ *
+ * Caller must hold the rtnl_lock.
+ */
+int efx_reset(struct efx_nic *efx, enum reset_type method)
+{
+	int rc, rc2 = 0;
+	bool disabled;
+
+	netif_info(efx, drv, efx->net_dev, "resetting (%s)\n",
+		   RESET_TYPE(method));
+
+	efx_device_detach_sync(efx);
+	/* efx_reset_down() grabs locks that prevent recovery on EF100.
+	 * EF100 reset is handled in the efx_nic_type callback below.
+	 */
+	if (efx_nic_rev(efx) != EFX_REV_EF100)
+		efx_reset_down(efx, method);
+
+	rc = efx->type->reset(efx, method);
+	if (rc) {
+		netif_err(efx, drv, efx->net_dev, "failed to reset hardware\n");
+		goto out;
+	}
+
+	/* Clear flags for the scopes we covered.  We assume the NIC and
+	 * driver are now quiescent so that there is no race here.
+	 */
+	if (method < RESET_TYPE_MAX_METHOD)
+		efx->reset_pending &= -(1 << (method + 1));
+	else /* it doesn't fit into the well-ordered scope hierarchy */
+		__clear_bit(method, &efx->reset_pending);
+
+	/* Reinitialise bus-mastering, which may have been turned off before
+	 * the reset was scheduled. This is still appropriate, even in the
+	 * RESET_TYPE_DISABLE since this driver generally assumes the hardware
+	 * can respond to requests.
+	 */
+	pci_set_master(efx->pci_dev);
+
+out:
+	/* Leave device stopped if necessary */
+	disabled = rc ||
+		method == RESET_TYPE_DISABLE ||
+		method == RESET_TYPE_RECOVER_OR_DISABLE;
+	if (efx_nic_rev(efx) != EFX_REV_EF100)
+		rc2 = efx_reset_up(efx, method, !disabled);
+	if (rc2) {
+		disabled = true;
+		if (!rc)
+			rc = rc2;
+	}
+
+	if (disabled) {
+		dev_close(efx->net_dev);
+		netif_err(efx, drv, efx->net_dev, "has been disabled\n");
+		efx->state = STATE_DISABLED;
+	} else {
+		netif_dbg(efx, drv, efx->net_dev, "reset complete\n");
+		efx_device_attach_if_not_resetting(efx);
+	}
+	return rc;
+}
+
+/* The worker thread exists so that code that cannot sleep can
+ * schedule a reset for later.
+ */
+static void efx_reset_work(struct work_struct *data)
+{
+	struct efx_nic *efx = container_of(data, struct efx_nic, reset_work);
+	unsigned long pending;
+	enum reset_type method;
+
+	pending = READ_ONCE(efx->reset_pending);
+	method = fls(pending) - 1;
+
+	if (method == RESET_TYPE_MC_BIST)
+		efx_wait_for_bist_end(efx);
+
+	if ((method == RESET_TYPE_RECOVER_OR_DISABLE ||
+	     method == RESET_TYPE_RECOVER_OR_ALL) &&
+	    efx_try_recovery(efx))
+		return;
+
+	if (!pending)
+		return;
+
+	rtnl_lock();
+
+	/* We checked the state in efx_schedule_reset() but it may
+	 * have changed by now.  Now that we have the RTNL lock,
+	 * it cannot change again.
+	 */
+	if (efx->state == STATE_READY)
+		(void)efx_reset(efx, method);
+
+	rtnl_unlock();
+}
+
+void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
+{
+	enum reset_type method;
+
+	if (efx->state == STATE_RECOVERY) {
+		netif_dbg(efx, drv, efx->net_dev,
+			  "recovering: skip scheduling %s reset\n",
+			  RESET_TYPE(type));
+		return;
+	}
+
+	switch (type) {
+	case RESET_TYPE_INVISIBLE:
+	case RESET_TYPE_ALL:
+	case RESET_TYPE_RECOVER_OR_ALL:
+	case RESET_TYPE_WORLD:
+	case RESET_TYPE_DISABLE:
+	case RESET_TYPE_RECOVER_OR_DISABLE:
+	case RESET_TYPE_DATAPATH:
+	case RESET_TYPE_MC_BIST:
+	case RESET_TYPE_MCDI_TIMEOUT:
+		method = type;
+		netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n",
+			  RESET_TYPE(method));
+		break;
+	default:
+		method = efx->type->map_reset_reason(type);
+		netif_dbg(efx, drv, efx->net_dev,
+			  "scheduling %s reset for %s\n",
+			  RESET_TYPE(method), RESET_TYPE(type));
+		break;
+	}
+
+	set_bit(method, &efx->reset_pending);
+	smp_mb(); /* ensure we change reset_pending before checking state */
+
+	/* If we're not READY then just leave the flags set as the cue
+	 * to abort probing or reschedule the reset later.
+	 */
+	if (READ_ONCE(efx->state) != STATE_READY)
+		return;
+
+	/* efx_process_channel() will no longer read events once a
+	 * reset is scheduled. So switch back to poll'd MCDI completions.
+	 */
+	efx_mcdi_mode_poll(efx);
+
+	efx_queue_reset_work(efx);
+}
+
+/**************************************************************************
+ *
+ * Dummy NIC operations
+ *
+ * Can be used for some unimplemented operations
+ * Needed so all function pointers are valid and do not have to be tested
+ * before use
+ *
+ **************************************************************************/
+int efx_port_dummy_op_int(struct efx_nic *efx)
+{
+	return 0;
+}
+void efx_port_dummy_op_void(struct efx_nic *efx) {}
+
+/**************************************************************************
+ *
+ * Data housekeeping
+ *
+ **************************************************************************/
+
+/* This zeroes out and then fills in the invariants in a struct
+ * efx_nic (including all sub-structures).
+ */
+int efx_init_struct(struct efx_nic *efx,
+		    struct pci_dev *pci_dev, struct net_device *net_dev)
+{
+	int rc = -ENOMEM;
+
+	/* Initialise common structures */
+	INIT_LIST_HEAD(&efx->node);
+	INIT_LIST_HEAD(&efx->secondary_list);
+	spin_lock_init(&efx->biu_lock);
+#ifdef CONFIG_SFC_MTD
+	INIT_LIST_HEAD(&efx->mtd_list);
+#endif
+	INIT_WORK(&efx->reset_work, efx_reset_work);
+	INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor);
+	efx_selftest_async_init(efx);
+	efx->pci_dev = pci_dev;
+	efx->msg_enable = debug;
+	efx->state = STATE_UNINIT;
+	strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
+
+	efx->net_dev = net_dev;
+	efx->rx_prefix_size = efx->type->rx_prefix_size;
+	efx->rx_ip_align =
+		NET_IP_ALIGN ? (efx->rx_prefix_size + NET_IP_ALIGN) % 4 : 0;
+	efx->rx_packet_hash_offset =
+		efx->type->rx_hash_offset - efx->type->rx_prefix_size;
+	efx->rx_packet_ts_offset =
+		efx->type->rx_ts_offset - efx->type->rx_prefix_size;
+	INIT_LIST_HEAD(&efx->rss_context.list);
+	efx->rss_context.context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
+	mutex_init(&efx->rss_lock);
+	efx->vport_id = EVB_PORT_ID_ASSIGNED;
+	spin_lock_init(&efx->stats_lock);
+	efx->vi_stride = EFX_DEFAULT_VI_STRIDE;
+	efx->num_mac_stats = MC_CMD_MAC_NSTATS;
+	BUILD_BUG_ON(MC_CMD_MAC_NSTATS - 1 != MC_CMD_MAC_GENERATION_END);
+	mutex_init(&efx->mac_lock);
+	init_rwsem(&efx->filter_sem);
+#ifdef CONFIG_RFS_ACCEL
+	mutex_init(&efx->rps_mutex);
+	spin_lock_init(&efx->rps_hash_lock);
+	/* Failure to allocate is not fatal, but may degrade ARFS performance */
+	efx->rps_hash_table = kcalloc(EFX_ARFS_HASH_TABLE_SIZE,
+				      sizeof(*efx->rps_hash_table), GFP_KERNEL);
+#endif
+	efx->mdio.dev = net_dev;
+	INIT_WORK(&efx->mac_work, efx_mac_work);
+	init_waitqueue_head(&efx->flush_wq);
+
+	efx->tx_queues_per_channel = 1;
+	efx->rxq_entries = EFX_DEFAULT_DMAQ_SIZE;
+	efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE;
+
+	efx->mem_bar = UINT_MAX;
+
+	rc = efx_init_channels(efx);
+	if (rc)
+		goto fail;
+
+	/* Would be good to use the net_dev name, but we're too early */
+	snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s",
+		 pci_name(pci_dev));
+	efx->workqueue = create_singlethread_workqueue(efx->workqueue_name);
+	if (!efx->workqueue) {
+		rc = -ENOMEM;
+		goto fail;
+	}
+
+	return 0;
+
+fail:
+	efx_fini_struct(efx);
+	return rc;
+}
+
+void efx_fini_struct(struct efx_nic *efx)
+{
+#ifdef CONFIG_RFS_ACCEL
+	kfree(efx->rps_hash_table);
+#endif
+
+	efx_fini_channels(efx);
+
+	kfree(efx->vpd_sn);
+
+	if (efx->workqueue) {
+		destroy_workqueue(efx->workqueue);
+		efx->workqueue = NULL;
+	}
+}
+
+/* This configures the PCI device to enable I/O and DMA. */
+int efx_init_io(struct efx_nic *efx, int bar, dma_addr_t dma_mask,
+		unsigned int mem_map_size)
+{
+	struct pci_dev *pci_dev = efx->pci_dev;
+	int rc;
+
+	efx->mem_bar = UINT_MAX;
+
+	netif_dbg(efx, probe, efx->net_dev, "initialising I/O bar=%d\n", bar);
+
+	rc = pci_enable_device(pci_dev);
+	if (rc) {
+		netif_err(efx, probe, efx->net_dev,
+			  "failed to enable PCI device\n");
+		goto fail1;
+	}
+
+	pci_set_master(pci_dev);
+
+	rc = dma_set_mask_and_coherent(&pci_dev->dev, dma_mask);
+	if (rc) {
+		netif_err(efx, probe, efx->net_dev,
+			  "could not find a suitable DMA mask\n");
+		goto fail2;
+	}
+	netif_dbg(efx, probe, efx->net_dev,
+		  "using DMA mask %llx\n", (unsigned long long)dma_mask);
+
+	efx->membase_phys = pci_resource_start(efx->pci_dev, bar);
+	if (!efx->membase_phys) {
+		netif_err(efx, probe, efx->net_dev,
+			  "ERROR: No BAR%d mapping from the BIOS. "
+			  "Try pci=realloc on the kernel command line\n", bar);
+		rc = -ENODEV;
+		goto fail3;
+	}
+
+	rc = pci_request_region(pci_dev, bar, "sfc");
+	if (rc) {
+		netif_err(efx, probe, efx->net_dev,
+			  "request for memory BAR[%d] failed\n", bar);
+		rc = -EIO;
+		goto fail3;
+	}
+	efx->mem_bar = bar;
+	efx->membase = ioremap(efx->membase_phys, mem_map_size);
+	if (!efx->membase) {
+		netif_err(efx, probe, efx->net_dev,
+			  "could not map memory BAR[%d] at %llx+%x\n", bar,
+			  (unsigned long long)efx->membase_phys, mem_map_size);
+		rc = -ENOMEM;
+		goto fail4;
+	}
+	netif_dbg(efx, probe, efx->net_dev,
+		  "memory BAR[%d] at %llx+%x (virtual %p)\n", bar,
+		  (unsigned long long)efx->membase_phys, mem_map_size,
+		  efx->membase);
+
+	return 0;
+
+fail4:
+	pci_release_region(efx->pci_dev, bar);
+fail3:
+	efx->membase_phys = 0;
+fail2:
+	pci_disable_device(efx->pci_dev);
+fail1:
+	return rc;
+}
+
+void efx_fini_io(struct efx_nic *efx)
+{
+	netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n");
+
+	if (efx->membase) {
+		iounmap(efx->membase);
+		efx->membase = NULL;
+	}
+
+	if (efx->membase_phys) {
+		pci_release_region(efx->pci_dev, efx->mem_bar);
+		efx->membase_phys = 0;
+		efx->mem_bar = UINT_MAX;
+	}
+
+	/* Don't disable bus-mastering if VFs are assigned */
+	if (!pci_vfs_assigned(efx->pci_dev))
+		pci_disable_device(efx->pci_dev);
+}
+
+#ifdef CONFIG_SFC_MCDI_LOGGING
+static ssize_t mcdi_logging_show(struct device *dev,
+				 struct device_attribute *attr,
+				 char *buf)
+{
+	struct efx_nic *efx = dev_get_drvdata(dev);
+	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", mcdi->logging_enabled);
+}
+
+static ssize_t mcdi_logging_store(struct device *dev,
+				  struct device_attribute *attr,
+				  const char *buf, size_t count)
+{
+	struct efx_nic *efx = dev_get_drvdata(dev);
+	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+	bool enable = count > 0 && *buf != '0';
+
+	mcdi->logging_enabled = enable;
+	return count;
+}
+
+static DEVICE_ATTR_RW(mcdi_logging);
+
+void efx_init_mcdi_logging(struct efx_nic *efx)
+{
+	int rc = device_create_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging);
+
+	if (rc) {
+		netif_warn(efx, drv, efx->net_dev,
+			   "failed to init net dev attributes\n");
+	}
+}
+
+void efx_fini_mcdi_logging(struct efx_nic *efx)
+{
+	device_remove_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging);
+}
+#endif
+
+/* A PCI error affecting this device was detected.
+ * At this point MMIO and DMA may be disabled.
+ * Stop the software path and request a slot reset.
+ */
+static pci_ers_result_t efx_io_error_detected(struct pci_dev *pdev,
+					      pci_channel_state_t state)
+{
+	pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
+	struct efx_nic *efx = pci_get_drvdata(pdev);
+
+	if (state == pci_channel_io_perm_failure)
+		return PCI_ERS_RESULT_DISCONNECT;
+
+	rtnl_lock();
+
+	if (efx->state != STATE_DISABLED) {
+		efx->state = STATE_RECOVERY;
+		efx->reset_pending = 0;
+
+		efx_device_detach_sync(efx);
+
+		efx_stop_all(efx);
+		efx_disable_interrupts(efx);
+
+		status = PCI_ERS_RESULT_NEED_RESET;
+	} else {
+		/* If the interface is disabled we don't want to do anything
+		 * with it.
+		 */
+		status = PCI_ERS_RESULT_RECOVERED;
+	}
+
+	rtnl_unlock();
+
+	pci_disable_device(pdev);
+
+	return status;
+}
+
+/* Fake a successful reset, which will be performed later in efx_io_resume. */
+static pci_ers_result_t efx_io_slot_reset(struct pci_dev *pdev)
+{
+	struct efx_nic *efx = pci_get_drvdata(pdev);
+	pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
+
+	if (pci_enable_device(pdev)) {
+		netif_err(efx, hw, efx->net_dev,
+			  "Cannot re-enable PCI device after reset.\n");
+		status =  PCI_ERS_RESULT_DISCONNECT;
+	}
+
+	return status;
+}
+
+/* Perform the actual reset and resume I/O operations. */
+static void efx_io_resume(struct pci_dev *pdev)
+{
+	struct efx_nic *efx = pci_get_drvdata(pdev);
+	int rc;
+
+	rtnl_lock();
+
+	if (efx->state == STATE_DISABLED)
+		goto out;
+
+	rc = efx_reset(efx, RESET_TYPE_ALL);
+	if (rc) {
+		netif_err(efx, hw, efx->net_dev,
+			  "efx_reset failed after PCI error (%d)\n", rc);
+	} else {
+		efx->state = STATE_READY;
+		netif_dbg(efx, hw, efx->net_dev,
+			  "Done resetting and resuming IO after PCI error.\n");
+	}
+
+out:
+	rtnl_unlock();
+}
+
+/* For simplicity and reliability, we always require a slot reset and try to
+ * reset the hardware when a pci error affecting the device is detected.
+ * We leave both the link_reset and mmio_enabled callback unimplemented:
+ * with our request for slot reset the mmio_enabled callback will never be
+ * called, and the link_reset callback is not used by AER or EEH mechanisms.
+ */
+const struct pci_error_handlers efx_err_handlers = {
+	.error_detected = efx_io_error_detected,
+	.slot_reset	= efx_io_slot_reset,
+	.resume		= efx_io_resume,
+};
+
+/* Determine whether the NIC will be able to handle TX offloads for a given
+ * encapsulated packet.
+ */
+static bool efx_can_encap_offloads(struct efx_nic *efx, struct sk_buff *skb)
+{
+	struct gre_base_hdr *greh;
+	__be16 dst_port;
+	u8 ipproto;
+
+	/* Does the NIC support encap offloads?
+	 * If not, we should never get here, because we shouldn't have
+	 * advertised encap offload feature flags in the first place.
+	 */
+	if (WARN_ON_ONCE(!efx->type->udp_tnl_has_port))
+		return false;
+
+	/* Determine encapsulation protocol in use */
+	switch (skb->protocol) {
+	case htons(ETH_P_IP):
+		ipproto = ip_hdr(skb)->protocol;
+		break;
+	case htons(ETH_P_IPV6):
+		/* If there are extension headers, this will cause us to
+		 * think we can't offload something that we maybe could have.
+		 */
+		ipproto = ipv6_hdr(skb)->nexthdr;
+		break;
+	default:
+		/* Not IP, so can't offload it */
+		return false;
+	}
+	switch (ipproto) {
+	case IPPROTO_GRE:
+		/* We support NVGRE but not IP over GRE or random gretaps.
+		 * Specifically, the NIC will accept GRE as encapsulated if
+		 * the inner protocol is Ethernet, but only handle it
+		 * correctly if the GRE header is 8 bytes long.  Moreover,
+		 * it will not update the Checksum or Sequence Number fields
+		 * if they are present.  (The Routing Present flag,
+		 * GRE_ROUTING, cannot be set else the header would be more
+		 * than 8 bytes long; so we don't have to worry about it.)
+		 */
+		if (skb->inner_protocol_type != ENCAP_TYPE_ETHER)
+			return false;
+		if (ntohs(skb->inner_protocol) != ETH_P_TEB)
+			return false;
+		if (skb_inner_mac_header(skb) - skb_transport_header(skb) != 8)
+			return false;
+		greh = (struct gre_base_hdr *)skb_transport_header(skb);
+		return !(greh->flags & (GRE_CSUM | GRE_SEQ));
+	case IPPROTO_UDP:
+		/* If the port is registered for a UDP tunnel, we assume the
+		 * packet is for that tunnel, and the NIC will handle it as
+		 * such.  If not, the NIC won't know what to do with it.
+		 */
+		dst_port = udp_hdr(skb)->dest;
+		return efx->type->udp_tnl_has_port(efx, dst_port);
+	default:
+		return false;
+	}
+}
+
+netdev_features_t efx_features_check(struct sk_buff *skb, struct net_device *dev,
+				     netdev_features_t features)
+{
+	struct efx_nic *efx = netdev_priv(dev);
+
+	if (skb->encapsulation) {
+		if (features & NETIF_F_GSO_MASK)
+			/* Hardware can only do TSO with at most 208 bytes
+			 * of headers.
+			 */
+			if (skb_inner_transport_offset(skb) >
+			    EFX_TSO2_MAX_HDRLEN)
+				features &= ~(NETIF_F_GSO_MASK);
+		if (features & (NETIF_F_GSO_MASK | NETIF_F_CSUM_MASK))
+			if (!efx_can_encap_offloads(efx, skb))
+				features &= ~(NETIF_F_GSO_MASK |
+					      NETIF_F_CSUM_MASK);
+	}
+	return features;
+}
+
+int efx_get_phys_port_id(struct net_device *net_dev,
+			 struct netdev_phys_item_id *ppid)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+
+	if (efx->type->get_phys_port_id)
+		return efx->type->get_phys_port_id(efx, ppid);
+	else
+		return -EOPNOTSUPP;
+}
+
+int efx_get_phys_port_name(struct net_device *net_dev, char *name, size_t len)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+
+	if (snprintf(name, len, "p%u", efx->port_num) >= len)
+		return -EINVAL;
+	return 0;
+}
diff --git a/drivers/net/ethernet/sfc/siena/efx_common.h b/drivers/net/ethernet/sfc/siena/efx_common.h
new file mode 100644
index 000000000000..65513fd0cf6c
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/efx_common.h
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_COMMON_H
+#define EFX_COMMON_H
+
+int efx_init_io(struct efx_nic *efx, int bar, dma_addr_t dma_mask,
+		unsigned int mem_map_size);
+void efx_fini_io(struct efx_nic *efx);
+int efx_init_struct(struct efx_nic *efx, struct pci_dev *pci_dev,
+		    struct net_device *net_dev);
+void efx_fini_struct(struct efx_nic *efx);
+
+#define EFX_MAX_DMAQ_SIZE 4096UL
+#define EFX_DEFAULT_DMAQ_SIZE 1024UL
+#define EFX_MIN_DMAQ_SIZE 512UL
+
+#define EFX_MAX_EVQ_SIZE 16384UL
+#define EFX_MIN_EVQ_SIZE 512UL
+
+void efx_link_clear_advertising(struct efx_nic *efx);
+void efx_link_set_wanted_fc(struct efx_nic *efx, u8);
+
+void efx_start_all(struct efx_nic *efx);
+void efx_stop_all(struct efx_nic *efx);
+
+void efx_net_stats(struct net_device *net_dev, struct rtnl_link_stats64 *stats);
+
+int efx_create_reset_workqueue(void);
+void efx_queue_reset_work(struct efx_nic *efx);
+void efx_flush_reset_workqueue(struct efx_nic *efx);
+void efx_destroy_reset_workqueue(void);
+
+void efx_start_monitor(struct efx_nic *efx);
+
+int __efx_reconfigure_port(struct efx_nic *efx);
+int efx_reconfigure_port(struct efx_nic *efx);
+
+#define EFX_ASSERT_RESET_SERIALISED(efx)		\
+	do {						\
+		if ((efx->state == STATE_READY) ||	\
+		    (efx->state == STATE_RECOVERY) ||	\
+		    (efx->state == STATE_DISABLED))	\
+			ASSERT_RTNL();			\
+	} while (0)
+
+int efx_try_recovery(struct efx_nic *efx);
+void efx_reset_down(struct efx_nic *efx, enum reset_type method);
+void efx_watchdog(struct net_device *net_dev, unsigned int txqueue);
+int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok);
+int efx_reset(struct efx_nic *efx, enum reset_type method);
+void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);
+
+/* Dummy PHY ops for PHY drivers */
+int efx_port_dummy_op_int(struct efx_nic *efx);
+void efx_port_dummy_op_void(struct efx_nic *efx);
+
+static inline int efx_check_disabled(struct efx_nic *efx)
+{
+	if (efx->state == STATE_DISABLED || efx->state == STATE_RECOVERY) {
+		netif_err(efx, drv, efx->net_dev,
+			  "device is disabled due to earlier errors\n");
+		return -EIO;
+	}
+	return 0;
+}
+
+static inline void efx_schedule_channel(struct efx_channel *channel)
+{
+	netif_vdbg(channel->efx, intr, channel->efx->net_dev,
+		   "channel %d scheduling NAPI poll on CPU%d\n",
+		   channel->channel, raw_smp_processor_id());
+
+	napi_schedule(&channel->napi_str);
+}
+
+static inline void efx_schedule_channel_irq(struct efx_channel *channel)
+{
+	channel->event_test_cpu = raw_smp_processor_id();
+	efx_schedule_channel(channel);
+}
+
+#ifdef CONFIG_SFC_MCDI_LOGGING
+void efx_init_mcdi_logging(struct efx_nic *efx);
+void efx_fini_mcdi_logging(struct efx_nic *efx);
+#else
+static inline void efx_init_mcdi_logging(struct efx_nic *efx) {}
+static inline void efx_fini_mcdi_logging(struct efx_nic *efx) {}
+#endif
+
+void efx_mac_reconfigure(struct efx_nic *efx, bool mtu_only);
+int efx_set_mac_address(struct net_device *net_dev, void *data);
+void efx_set_rx_mode(struct net_device *net_dev);
+int efx_set_features(struct net_device *net_dev, netdev_features_t data);
+void efx_link_status_changed(struct efx_nic *efx);
+unsigned int efx_xdp_max_mtu(struct efx_nic *efx);
+int efx_change_mtu(struct net_device *net_dev, int new_mtu);
+
+extern const struct pci_error_handlers efx_err_handlers;
+
+netdev_features_t efx_features_check(struct sk_buff *skb, struct net_device *dev,
+				     netdev_features_t features);
+
+int efx_get_phys_port_id(struct net_device *net_dev,
+			 struct netdev_phys_item_id *ppid);
+
+int efx_get_phys_port_name(struct net_device *net_dev,
+			   char *name, size_t len);
+#endif
diff --git a/drivers/net/ethernet/sfc/siena/enum.h b/drivers/net/ethernet/sfc/siena/enum.h
new file mode 100644
index 000000000000..cd590e0685e5
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/enum.h
@@ -0,0 +1,176 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2007-2013 Solarflare Communications Inc.
+ */
+
+#ifndef EFX_ENUM_H
+#define EFX_ENUM_H
+
+/**
+ * enum efx_loopback_mode - loopback modes
+ * @LOOPBACK_NONE: no loopback
+ * @LOOPBACK_DATA: data path loopback
+ * @LOOPBACK_GMAC: loopback within GMAC
+ * @LOOPBACK_XGMII: loopback after XMAC
+ * @LOOPBACK_XGXS: loopback within BPX after XGXS
+ * @LOOPBACK_XAUI: loopback within BPX before XAUI serdes
+ * @LOOPBACK_GMII: loopback within BPX after GMAC
+ * @LOOPBACK_SGMII: loopback within BPX within SGMII
+ * @LOOPBACK_XGBR: loopback within BPX within XGBR
+ * @LOOPBACK_XFI: loopback within BPX before XFI serdes
+ * @LOOPBACK_XAUI_FAR: loopback within BPX after XAUI serdes
+ * @LOOPBACK_GMII_FAR: loopback within BPX before SGMII
+ * @LOOPBACK_SGMII_FAR: loopback within BPX after SGMII
+ * @LOOPBACK_XFI_FAR: loopback after XFI serdes
+ * @LOOPBACK_GPHY: loopback within 1G PHY at unspecified level
+ * @LOOPBACK_PHYXS: loopback within 10G PHY at PHYXS level
+ * @LOOPBACK_PCS: loopback within 10G PHY at PCS level
+ * @LOOPBACK_PMAPMD: loopback within 10G PHY at PMAPMD level
+ * @LOOPBACK_XPORT: cross port loopback
+ * @LOOPBACK_XGMII_WS: wireside loopback excluding XMAC
+ * @LOOPBACK_XAUI_WS: wireside loopback within BPX within XAUI serdes
+ * @LOOPBACK_XAUI_WS_FAR: wireside loopback within BPX including XAUI serdes
+ * @LOOPBACK_XAUI_WS_NEAR: wireside loopback within BPX excluding XAUI serdes
+ * @LOOPBACK_GMII_WS: wireside loopback excluding GMAC
+ * @LOOPBACK_XFI_WS: wireside loopback excluding XFI serdes
+ * @LOOPBACK_XFI_WS_FAR: wireside loopback including XFI serdes
+ * @LOOPBACK_PHYXS_WS: wireside loopback within 10G PHY at PHYXS level
+ */
+/* Please keep up-to-date w.r.t the following two #defines */
+enum efx_loopback_mode {
+	LOOPBACK_NONE = 0,
+	LOOPBACK_DATA = 1,
+	LOOPBACK_GMAC = 2,
+	LOOPBACK_XGMII = 3,
+	LOOPBACK_XGXS = 4,
+	LOOPBACK_XAUI = 5,
+	LOOPBACK_GMII = 6,
+	LOOPBACK_SGMII = 7,
+	LOOPBACK_XGBR = 8,
+	LOOPBACK_XFI = 9,
+	LOOPBACK_XAUI_FAR = 10,
+	LOOPBACK_GMII_FAR = 11,
+	LOOPBACK_SGMII_FAR = 12,
+	LOOPBACK_XFI_FAR = 13,
+	LOOPBACK_GPHY = 14,
+	LOOPBACK_PHYXS = 15,
+	LOOPBACK_PCS = 16,
+	LOOPBACK_PMAPMD = 17,
+	LOOPBACK_XPORT = 18,
+	LOOPBACK_XGMII_WS = 19,
+	LOOPBACK_XAUI_WS = 20,
+	LOOPBACK_XAUI_WS_FAR = 21,
+	LOOPBACK_XAUI_WS_NEAR = 22,
+	LOOPBACK_GMII_WS = 23,
+	LOOPBACK_XFI_WS = 24,
+	LOOPBACK_XFI_WS_FAR = 25,
+	LOOPBACK_PHYXS_WS = 26,
+	LOOPBACK_MAX
+};
+#define LOOPBACK_TEST_MAX LOOPBACK_PMAPMD
+
+/* These loopbacks occur within the controller */
+#define LOOPBACKS_INTERNAL ((1 << LOOPBACK_DATA) |		\
+			    (1 << LOOPBACK_GMAC) |		\
+			    (1 << LOOPBACK_XGMII)|		\
+			    (1 << LOOPBACK_XGXS) |		\
+			    (1 << LOOPBACK_XAUI) |		\
+			    (1 << LOOPBACK_GMII) |		\
+			    (1 << LOOPBACK_SGMII) |		\
+			    (1 << LOOPBACK_XGBR) |		\
+			    (1 << LOOPBACK_XFI) |		\
+			    (1 << LOOPBACK_XAUI_FAR) |		\
+			    (1 << LOOPBACK_GMII_FAR) |		\
+			    (1 << LOOPBACK_SGMII_FAR) |		\
+			    (1 << LOOPBACK_XFI_FAR) |		\
+			    (1 << LOOPBACK_XGMII_WS) |		\
+			    (1 << LOOPBACK_XAUI_WS) |		\
+			    (1 << LOOPBACK_XAUI_WS_FAR) |	\
+			    (1 << LOOPBACK_XAUI_WS_NEAR) |	\
+			    (1 << LOOPBACK_GMII_WS) |		\
+			    (1 << LOOPBACK_XFI_WS) |		\
+			    (1 << LOOPBACK_XFI_WS_FAR))
+
+#define LOOPBACKS_WS ((1 << LOOPBACK_XGMII_WS) |		\
+		      (1 << LOOPBACK_XAUI_WS) |			\
+		      (1 << LOOPBACK_XAUI_WS_FAR) |		\
+		      (1 << LOOPBACK_XAUI_WS_NEAR) |		\
+		      (1 << LOOPBACK_GMII_WS) |			\
+		      (1 << LOOPBACK_XFI_WS) |			\
+		      (1 << LOOPBACK_XFI_WS_FAR) |		\
+		      (1 << LOOPBACK_PHYXS_WS))
+
+#define LOOPBACKS_EXTERNAL(_efx)					\
+	((_efx)->loopback_modes & ~LOOPBACKS_INTERNAL &			\
+	 ~(1 << LOOPBACK_NONE))
+
+#define LOOPBACK_MASK(_efx)			\
+	(1 << (_efx)->loopback_mode)
+
+#define LOOPBACK_INTERNAL(_efx)				\
+	(!!(LOOPBACKS_INTERNAL & LOOPBACK_MASK(_efx)))
+
+#define LOOPBACK_EXTERNAL(_efx)				\
+	(!!(LOOPBACK_MASK(_efx) & LOOPBACKS_EXTERNAL(_efx)))
+
+#define LOOPBACK_CHANGED(_from, _to, _mask)				\
+	(!!((LOOPBACK_MASK(_from) ^ LOOPBACK_MASK(_to)) & (_mask)))
+
+#define LOOPBACK_OUT_OF(_from, _to, _mask)				\
+	((LOOPBACK_MASK(_from) & (_mask)) && !(LOOPBACK_MASK(_to) & (_mask)))
+
+/*****************************************************************************/
+
+/**
+ * enum reset_type - reset types
+ *
+ * %RESET_TYPE_INVSIBLE, %RESET_TYPE_ALL, %RESET_TYPE_WORLD and
+ * %RESET_TYPE_DISABLE specify the method/scope of the reset.  The
+ * other valuesspecify reasons, which efx_schedule_reset() will choose
+ * a method for.
+ *
+ * Reset methods are numbered in order of increasing scope.
+ *
+ * @RESET_TYPE_INVISIBLE: Reset datapath and MAC (Falcon only)
+ * @RESET_TYPE_RECOVER_OR_ALL: Try to recover. Apply RESET_TYPE_ALL
+ * if unsuccessful.
+ * @RESET_TYPE_ALL: Reset datapath, MAC and PHY
+ * @RESET_TYPE_WORLD: Reset as much as possible
+ * @RESET_TYPE_RECOVER_OR_DISABLE: Try to recover. Apply RESET_TYPE_DISABLE if
+ * unsuccessful.
+ * @RESET_TYPE_DATAPATH: Reset datapath only.
+ * @RESET_TYPE_MC_BIST: MC entering BIST mode.
+ * @RESET_TYPE_DISABLE: Reset datapath, MAC and PHY; leave NIC disabled
+ * @RESET_TYPE_TX_WATCHDOG: reset due to TX watchdog
+ * @RESET_TYPE_INT_ERROR: reset due to internal error
+ * @RESET_TYPE_DMA_ERROR: DMA error
+ * @RESET_TYPE_TX_SKIP: hardware completed empty tx descriptors
+ * @RESET_TYPE_MC_FAILURE: MC reboot/assertion
+ * @RESET_TYPE_MCDI_TIMEOUT: MCDI timeout.
+ */
+enum reset_type {
+	RESET_TYPE_INVISIBLE,
+	RESET_TYPE_RECOVER_OR_ALL,
+	RESET_TYPE_ALL,
+	RESET_TYPE_WORLD,
+	RESET_TYPE_RECOVER_OR_DISABLE,
+	RESET_TYPE_DATAPATH,
+	RESET_TYPE_MC_BIST,
+	RESET_TYPE_DISABLE,
+	RESET_TYPE_MAX_METHOD,
+	RESET_TYPE_TX_WATCHDOG,
+	RESET_TYPE_INT_ERROR,
+	RESET_TYPE_DMA_ERROR,
+	RESET_TYPE_TX_SKIP,
+	RESET_TYPE_MC_FAILURE,
+	/* RESET_TYPE_MCDI_TIMEOUT is actually a method, not just a reason, but
+	 * it doesn't fit the scope hierarchy (not well-ordered by inclusion).
+	 * We encode this by having its enum value be greater than
+	 * RESET_TYPE_MAX_METHOD.
+	 */
+	RESET_TYPE_MCDI_TIMEOUT,
+	RESET_TYPE_MAX,
+};
+
+#endif /* EFX_ENUM_H */
diff --git a/drivers/net/ethernet/sfc/siena/ethtool.c b/drivers/net/ethernet/sfc/siena/ethtool.c
new file mode 100644
index 000000000000..48506373721a
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/ethtool.c
@@ -0,0 +1,282 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2013 Solarflare Communications Inc.
+ */
+
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/rtnetlink.h>
+#include <linux/in.h>
+#include "net_driver.h"
+#include "workarounds.h"
+#include "selftest.h"
+#include "efx.h"
+#include "efx_channels.h"
+#include "rx_common.h"
+#include "tx_common.h"
+#include "ethtool_common.h"
+#include "filter.h"
+#include "nic.h"
+
+#define EFX_ETHTOOL_EEPROM_MAGIC 0xEFAB
+
+/**************************************************************************
+ *
+ * Ethtool operations
+ *
+ **************************************************************************
+ */
+
+/* Identify device by flashing LEDs */
+static int efx_ethtool_phys_id(struct net_device *net_dev,
+			       enum ethtool_phys_id_state state)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+	enum efx_led_mode mode = EFX_LED_DEFAULT;
+
+	switch (state) {
+	case ETHTOOL_ID_ON:
+		mode = EFX_LED_ON;
+		break;
+	case ETHTOOL_ID_OFF:
+		mode = EFX_LED_OFF;
+		break;
+	case ETHTOOL_ID_INACTIVE:
+		mode = EFX_LED_DEFAULT;
+		break;
+	case ETHTOOL_ID_ACTIVE:
+		return 1;	/* cycle on/off once per second */
+	}
+
+	return efx_mcdi_set_id_led(efx, mode);
+}
+
+static int efx_ethtool_get_regs_len(struct net_device *net_dev)
+{
+	return efx_nic_get_regs_len(netdev_priv(net_dev));
+}
+
+static void efx_ethtool_get_regs(struct net_device *net_dev,
+				 struct ethtool_regs *regs, void *buf)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+
+	regs->version = efx->type->revision;
+	efx_nic_get_regs(efx, buf);
+}
+
+/*
+ * Each channel has a single IRQ and moderation timer, started by any
+ * completion (or other event).  Unless the module parameter
+ * separate_tx_channels is set, IRQs and moderation are therefore
+ * shared between RX and TX completions.  In this case, when RX IRQ
+ * moderation is explicitly changed then TX IRQ moderation is
+ * automatically changed too, but otherwise we fail if the two values
+ * are requested to be different.
+ *
+ * The hardware does not support a limit on the number of completions
+ * before an IRQ, so we do not use the max_frames fields.  We should
+ * report and require that max_frames == (usecs != 0), but this would
+ * invalidate existing user documentation.
+ *
+ * The hardware does not have distinct settings for interrupt
+ * moderation while the previous IRQ is being handled, so we should
+ * not use the 'irq' fields.  However, an earlier developer
+ * misunderstood the meaning of the 'irq' fields and the driver did
+ * not support the standard fields.  To avoid invalidating existing
+ * user documentation, we report and accept changes through either the
+ * standard or 'irq' fields.  If both are changed at the same time, we
+ * prefer the standard field.
+ *
+ * We implement adaptive IRQ moderation, but use a different algorithm
+ * from that assumed in the definition of struct ethtool_coalesce.
+ * Therefore we do not use any of the adaptive moderation parameters
+ * in it.
+ */
+
+static int efx_ethtool_get_coalesce(struct net_device *net_dev,
+				    struct ethtool_coalesce *coalesce,
+				    struct kernel_ethtool_coalesce *kernel_coal,
+				    struct netlink_ext_ack *extack)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+	unsigned int tx_usecs, rx_usecs;
+	bool rx_adaptive;
+
+	efx_get_irq_moderation(efx, &tx_usecs, &rx_usecs, &rx_adaptive);
+
+	coalesce->tx_coalesce_usecs = tx_usecs;
+	coalesce->tx_coalesce_usecs_irq = tx_usecs;
+	coalesce->rx_coalesce_usecs = rx_usecs;
+	coalesce->rx_coalesce_usecs_irq = rx_usecs;
+	coalesce->use_adaptive_rx_coalesce = rx_adaptive;
+
+	return 0;
+}
+
+static int efx_ethtool_set_coalesce(struct net_device *net_dev,
+				    struct ethtool_coalesce *coalesce,
+				    struct kernel_ethtool_coalesce *kernel_coal,
+				    struct netlink_ext_ack *extack)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+	struct efx_channel *channel;
+	unsigned int tx_usecs, rx_usecs;
+	bool adaptive, rx_may_override_tx;
+	int rc;
+
+	efx_get_irq_moderation(efx, &tx_usecs, &rx_usecs, &adaptive);
+
+	if (coalesce->rx_coalesce_usecs != rx_usecs)
+		rx_usecs = coalesce->rx_coalesce_usecs;
+	else
+		rx_usecs = coalesce->rx_coalesce_usecs_irq;
+
+	adaptive = coalesce->use_adaptive_rx_coalesce;
+
+	/* If channels are shared, TX IRQ moderation can be quietly
+	 * overridden unless it is changed from its old value.
+	 */
+	rx_may_override_tx = (coalesce->tx_coalesce_usecs == tx_usecs &&
+			      coalesce->tx_coalesce_usecs_irq == tx_usecs);
+	if (coalesce->tx_coalesce_usecs != tx_usecs)
+		tx_usecs = coalesce->tx_coalesce_usecs;
+	else
+		tx_usecs = coalesce->tx_coalesce_usecs_irq;
+
+	rc = efx_init_irq_moderation(efx, tx_usecs, rx_usecs, adaptive,
+				     rx_may_override_tx);
+	if (rc != 0)
+		return rc;
+
+	efx_for_each_channel(channel, efx)
+		efx->type->push_irq_moderation(channel);
+
+	return 0;
+}
+
+static void
+efx_ethtool_get_ringparam(struct net_device *net_dev,
+			  struct ethtool_ringparam *ring,
+			  struct kernel_ethtool_ringparam *kernel_ring,
+			  struct netlink_ext_ack *extack)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+
+	ring->rx_max_pending = EFX_MAX_DMAQ_SIZE;
+	ring->tx_max_pending = EFX_TXQ_MAX_ENT(efx);
+	ring->rx_pending = efx->rxq_entries;
+	ring->tx_pending = efx->txq_entries;
+}
+
+static int
+efx_ethtool_set_ringparam(struct net_device *net_dev,
+			  struct ethtool_ringparam *ring,
+			  struct kernel_ethtool_ringparam *kernel_ring,
+			  struct netlink_ext_ack *extack)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+	u32 txq_entries;
+
+	if (ring->rx_mini_pending || ring->rx_jumbo_pending ||
+	    ring->rx_pending > EFX_MAX_DMAQ_SIZE ||
+	    ring->tx_pending > EFX_TXQ_MAX_ENT(efx))
+		return -EINVAL;
+
+	if (ring->rx_pending < EFX_RXQ_MIN_ENT) {
+		netif_err(efx, drv, efx->net_dev,
+			  "RX queues cannot be smaller than %u\n",
+			  EFX_RXQ_MIN_ENT);
+		return -EINVAL;
+	}
+
+	txq_entries = max(ring->tx_pending, EFX_TXQ_MIN_ENT(efx));
+	if (txq_entries != ring->tx_pending)
+		netif_warn(efx, drv, efx->net_dev,
+			   "increasing TX queue size to minimum of %u\n",
+			   txq_entries);
+
+	return efx_realloc_channels(efx, ring->rx_pending, txq_entries);
+}
+
+static void efx_ethtool_get_wol(struct net_device *net_dev,
+				struct ethtool_wolinfo *wol)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+	return efx->type->get_wol(efx, wol);
+}
+
+
+static int efx_ethtool_set_wol(struct net_device *net_dev,
+			       struct ethtool_wolinfo *wol)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+	return efx->type->set_wol(efx, wol->wolopts);
+}
+
+static void efx_ethtool_get_fec_stats(struct net_device *net_dev,
+				      struct ethtool_fec_stats *fec_stats)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+
+	if (efx->type->get_fec_stats)
+		efx->type->get_fec_stats(efx, fec_stats);
+}
+
+static int efx_ethtool_get_ts_info(struct net_device *net_dev,
+				   struct ethtool_ts_info *ts_info)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+
+	/* Software capabilities */
+	ts_info->so_timestamping = (SOF_TIMESTAMPING_RX_SOFTWARE |
+				    SOF_TIMESTAMPING_SOFTWARE);
+	ts_info->phc_index = -1;
+
+	efx_ptp_get_ts_info(efx, ts_info);
+	return 0;
+}
+
+const struct ethtool_ops efx_ethtool_ops = {
+	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+				     ETHTOOL_COALESCE_USECS_IRQ |
+				     ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
+	.get_drvinfo		= efx_ethtool_get_drvinfo,
+	.get_regs_len		= efx_ethtool_get_regs_len,
+	.get_regs		= efx_ethtool_get_regs,
+	.get_msglevel		= efx_ethtool_get_msglevel,
+	.set_msglevel		= efx_ethtool_set_msglevel,
+	.get_link		= ethtool_op_get_link,
+	.get_coalesce		= efx_ethtool_get_coalesce,
+	.set_coalesce		= efx_ethtool_set_coalesce,
+	.get_ringparam		= efx_ethtool_get_ringparam,
+	.set_ringparam		= efx_ethtool_set_ringparam,
+	.get_pauseparam         = efx_ethtool_get_pauseparam,
+	.set_pauseparam         = efx_ethtool_set_pauseparam,
+	.get_sset_count		= efx_ethtool_get_sset_count,
+	.self_test		= efx_ethtool_self_test,
+	.get_strings		= efx_ethtool_get_strings,
+	.set_phys_id		= efx_ethtool_phys_id,
+	.get_ethtool_stats	= efx_ethtool_get_stats,
+	.get_wol                = efx_ethtool_get_wol,
+	.set_wol                = efx_ethtool_set_wol,
+	.reset			= efx_ethtool_reset,
+	.get_rxnfc		= efx_ethtool_get_rxnfc,
+	.set_rxnfc		= efx_ethtool_set_rxnfc,
+	.get_rxfh_indir_size	= efx_ethtool_get_rxfh_indir_size,
+	.get_rxfh_key_size	= efx_ethtool_get_rxfh_key_size,
+	.get_rxfh		= efx_ethtool_get_rxfh,
+	.set_rxfh		= efx_ethtool_set_rxfh,
+	.get_rxfh_context	= efx_ethtool_get_rxfh_context,
+	.set_rxfh_context	= efx_ethtool_set_rxfh_context,
+	.get_ts_info		= efx_ethtool_get_ts_info,
+	.get_module_info	= efx_ethtool_get_module_info,
+	.get_module_eeprom	= efx_ethtool_get_module_eeprom,
+	.get_link_ksettings	= efx_ethtool_get_link_ksettings,
+	.set_link_ksettings	= efx_ethtool_set_link_ksettings,
+	.get_fec_stats		= efx_ethtool_get_fec_stats,
+	.get_fecparam		= efx_ethtool_get_fecparam,
+	.set_fecparam		= efx_ethtool_set_fecparam,
+};
diff --git a/drivers/net/ethernet/sfc/siena/ethtool_common.c b/drivers/net/ethernet/sfc/siena/ethtool_common.c
new file mode 100644
index 000000000000..bd552c7dffcb
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/ethtool_common.c
@@ -0,0 +1,1338 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2019 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include "net_driver.h"
+#include "mcdi.h"
+#include "nic.h"
+#include "selftest.h"
+#include "rx_common.h"
+#include "ethtool_common.h"
+#include "mcdi_port_common.h"
+
+struct efx_sw_stat_desc {
+	const char *name;
+	enum {
+		EFX_ETHTOOL_STAT_SOURCE_nic,
+		EFX_ETHTOOL_STAT_SOURCE_channel,
+		EFX_ETHTOOL_STAT_SOURCE_tx_queue
+	} source;
+	unsigned int offset;
+	u64 (*get_stat)(void *field); /* Reader function */
+};
+
+/* Initialiser for a struct efx_sw_stat_desc with type-checking */
+#define EFX_ETHTOOL_STAT(stat_name, source_name, field, field_type, \
+				get_stat_function) {			\
+	.name = #stat_name,						\
+	.source = EFX_ETHTOOL_STAT_SOURCE_##source_name,		\
+	.offset = ((((field_type *) 0) ==				\
+		      &((struct efx_##source_name *)0)->field) ?	\
+		    offsetof(struct efx_##source_name, field) :		\
+		    offsetof(struct efx_##source_name, field)),		\
+	.get_stat = get_stat_function,					\
+}
+
+static u64 efx_get_uint_stat(void *field)
+{
+	return *(unsigned int *)field;
+}
+
+static u64 efx_get_atomic_stat(void *field)
+{
+	return atomic_read((atomic_t *) field);
+}
+
+#define EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(field)		\
+	EFX_ETHTOOL_STAT(field, nic, field,			\
+			 atomic_t, efx_get_atomic_stat)
+
+#define EFX_ETHTOOL_UINT_CHANNEL_STAT(field)			\
+	EFX_ETHTOOL_STAT(field, channel, n_##field,		\
+			 unsigned int, efx_get_uint_stat)
+#define EFX_ETHTOOL_UINT_CHANNEL_STAT_NO_N(field)		\
+	EFX_ETHTOOL_STAT(field, channel, field,			\
+			 unsigned int, efx_get_uint_stat)
+
+#define EFX_ETHTOOL_UINT_TXQ_STAT(field)			\
+	EFX_ETHTOOL_STAT(tx_##field, tx_queue, field,		\
+			 unsigned int, efx_get_uint_stat)
+
+static const struct efx_sw_stat_desc efx_sw_stat_desc[] = {
+	EFX_ETHTOOL_UINT_TXQ_STAT(merge_events),
+	EFX_ETHTOOL_UINT_TXQ_STAT(tso_bursts),
+	EFX_ETHTOOL_UINT_TXQ_STAT(tso_long_headers),
+	EFX_ETHTOOL_UINT_TXQ_STAT(tso_packets),
+	EFX_ETHTOOL_UINT_TXQ_STAT(tso_fallbacks),
+	EFX_ETHTOOL_UINT_TXQ_STAT(pushes),
+	EFX_ETHTOOL_UINT_TXQ_STAT(pio_packets),
+	EFX_ETHTOOL_UINT_TXQ_STAT(cb_packets),
+	EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset),
+	EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc),
+	EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err),
+	EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err),
+	EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_inner_ip_hdr_chksum_err),
+	EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_inner_tcp_udp_chksum_err),
+	EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_outer_ip_hdr_chksum_err),
+	EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_outer_tcp_udp_chksum_err),
+	EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_eth_crc_err),
+	EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch),
+	EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc),
+	EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_events),
+	EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_packets),
+	EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_drops),
+	EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_bad_drops),
+	EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_tx),
+	EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_xdp_redirect),
+#ifdef CONFIG_RFS_ACCEL
+	EFX_ETHTOOL_UINT_CHANNEL_STAT_NO_N(rfs_filter_count),
+	EFX_ETHTOOL_UINT_CHANNEL_STAT(rfs_succeeded),
+	EFX_ETHTOOL_UINT_CHANNEL_STAT(rfs_failed),
+#endif
+};
+
+#define EFX_ETHTOOL_SW_STAT_COUNT ARRAY_SIZE(efx_sw_stat_desc)
+
+void efx_ethtool_get_drvinfo(struct net_device *net_dev,
+			     struct ethtool_drvinfo *info)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+
+	strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+	efx_mcdi_print_fwver(efx, info->fw_version,
+			     sizeof(info->fw_version));
+	strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info));
+}
+
+u32 efx_ethtool_get_msglevel(struct net_device *net_dev)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+
+	return efx->msg_enable;
+}
+
+void efx_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+
+	efx->msg_enable = msg_enable;
+}
+
+void efx_ethtool_self_test(struct net_device *net_dev,
+			   struct ethtool_test *test, u64 *data)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+	struct efx_self_tests *efx_tests;
+	bool already_up;
+	int rc = -ENOMEM;
+
+	efx_tests = kzalloc(sizeof(*efx_tests), GFP_KERNEL);
+	if (!efx_tests)
+		goto fail;
+
+	if (efx->state != STATE_READY) {
+		rc = -EBUSY;
+		goto out;
+	}
+
+	netif_info(efx, drv, efx->net_dev, "starting %sline testing\n",
+		   (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
+
+	/* We need rx buffers and interrupts. */
+	already_up = (efx->net_dev->flags & IFF_UP);
+	if (!already_up) {
+		rc = dev_open(efx->net_dev, NULL);
+		if (rc) {
+			netif_err(efx, drv, efx->net_dev,
+				  "failed opening device.\n");
+			goto out;
+		}
+	}
+
+	rc = efx_selftest(efx, efx_tests, test->flags);
+
+	if (!already_up)
+		dev_close(efx->net_dev);
+
+	netif_info(efx, drv, efx->net_dev, "%s %sline self-tests\n",
+		   rc == 0 ? "passed" : "failed",
+		   (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
+
+out:
+	efx_ethtool_fill_self_tests(efx, efx_tests, NULL, data);
+	kfree(efx_tests);
+fail:
+	if (rc)
+		test->flags |= ETH_TEST_FL_FAILED;
+}
+
+void efx_ethtool_get_pauseparam(struct net_device *net_dev,
+				struct ethtool_pauseparam *pause)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+
+	pause->rx_pause = !!(efx->wanted_fc & EFX_FC_RX);
+	pause->tx_pause = !!(efx->wanted_fc & EFX_FC_TX);
+	pause->autoneg = !!(efx->wanted_fc & EFX_FC_AUTO);
+}
+
+int efx_ethtool_set_pauseparam(struct net_device *net_dev,
+			       struct ethtool_pauseparam *pause)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+	u8 wanted_fc, old_fc;
+	u32 old_adv;
+	int rc = 0;
+
+	mutex_lock(&efx->mac_lock);
+
+	wanted_fc = ((pause->rx_pause ? EFX_FC_RX : 0) |
+		     (pause->tx_pause ? EFX_FC_TX : 0) |
+		     (pause->autoneg ? EFX_FC_AUTO : 0));
+
+	if ((wanted_fc & EFX_FC_TX) && !(wanted_fc & EFX_FC_RX)) {
+		netif_dbg(efx, drv, efx->net_dev,
+			  "Flow control unsupported: tx ON rx OFF\n");
+		rc = -EINVAL;
+		goto out;
+	}
+
+	if ((wanted_fc & EFX_FC_AUTO) && !efx->link_advertising[0]) {
+		netif_dbg(efx, drv, efx->net_dev,
+			  "Autonegotiation is disabled\n");
+		rc = -EINVAL;
+		goto out;
+	}
+
+	/* Hook for Falcon bug 11482 workaround */
+	if (efx->type->prepare_enable_fc_tx &&
+	    (wanted_fc & EFX_FC_TX) && !(efx->wanted_fc & EFX_FC_TX))
+		efx->type->prepare_enable_fc_tx(efx);
+
+	old_adv = efx->link_advertising[0];
+	old_fc = efx->wanted_fc;
+	efx_link_set_wanted_fc(efx, wanted_fc);
+	if (efx->link_advertising[0] != old_adv ||
+	    (efx->wanted_fc ^ old_fc) & EFX_FC_AUTO) {
+		rc = efx_mcdi_port_reconfigure(efx);
+		if (rc) {
+			netif_err(efx, drv, efx->net_dev,
+				  "Unable to advertise requested flow "
+				  "control setting\n");
+			goto out;
+		}
+	}
+
+	/* Reconfigure the MAC. The PHY *may* generate a link state change event
+	 * if the user just changed the advertised capabilities, but there's no
+	 * harm doing this twice */
+	efx_mac_reconfigure(efx, false);
+
+out:
+	mutex_unlock(&efx->mac_lock);
+
+	return rc;
+}
+
+/**
+ * efx_fill_test - fill in an individual self-test entry
+ * @test_index:		Index of the test
+ * @strings:		Ethtool strings, or %NULL
+ * @data:		Ethtool test results, or %NULL
+ * @test:		Pointer to test result (used only if data != %NULL)
+ * @unit_format:	Unit name format (e.g. "chan\%d")
+ * @unit_id:		Unit id (e.g. 0 for "chan0")
+ * @test_format:	Test name format (e.g. "loopback.\%s.tx.sent")
+ * @test_id:		Test id (e.g. "PHYXS" for "loopback.PHYXS.tx_sent")
+ *
+ * Fill in an individual self-test entry.
+ */
+static void efx_fill_test(unsigned int test_index, u8 *strings, u64 *data,
+			  int *test, const char *unit_format, int unit_id,
+			  const char *test_format, const char *test_id)
+{
+	char unit_str[ETH_GSTRING_LEN], test_str[ETH_GSTRING_LEN];
+
+	/* Fill data value, if applicable */
+	if (data)
+		data[test_index] = *test;
+
+	/* Fill string, if applicable */
+	if (strings) {
+		if (strchr(unit_format, '%'))
+			snprintf(unit_str, sizeof(unit_str),
+				 unit_format, unit_id);
+		else
+			strcpy(unit_str, unit_format);
+		snprintf(test_str, sizeof(test_str), test_format, test_id);
+		snprintf(strings + test_index * ETH_GSTRING_LEN,
+			 ETH_GSTRING_LEN,
+			 "%-6s %-24s", unit_str, test_str);
+	}
+}
+
+#define EFX_CHANNEL_NAME(_channel) "chan%d", _channel->channel
+#define EFX_TX_QUEUE_NAME(_tx_queue) "txq%d", _tx_queue->label
+#define EFX_LOOPBACK_NAME(_mode, _counter)			\
+	"loopback.%s." _counter, STRING_TABLE_LOOKUP(_mode, efx_loopback_mode)
+
+/**
+ * efx_fill_loopback_test - fill in a block of loopback self-test entries
+ * @efx:		Efx NIC
+ * @lb_tests:		Efx loopback self-test results structure
+ * @mode:		Loopback test mode
+ * @test_index:		Starting index of the test
+ * @strings:		Ethtool strings, or %NULL
+ * @data:		Ethtool test results, or %NULL
+ *
+ * Fill in a block of loopback self-test entries.  Return new test
+ * index.
+ */
+static int efx_fill_loopback_test(struct efx_nic *efx,
+				  struct efx_loopback_self_tests *lb_tests,
+				  enum efx_loopback_mode mode,
+				  unsigned int test_index,
+				  u8 *strings, u64 *data)
+{
+	struct efx_channel *channel =
+		efx_get_channel(efx, efx->tx_channel_offset);
+	struct efx_tx_queue *tx_queue;
+
+	efx_for_each_channel_tx_queue(tx_queue, channel) {
+		efx_fill_test(test_index++, strings, data,
+			      &lb_tests->tx_sent[tx_queue->label],
+			      EFX_TX_QUEUE_NAME(tx_queue),
+			      EFX_LOOPBACK_NAME(mode, "tx_sent"));
+		efx_fill_test(test_index++, strings, data,
+			      &lb_tests->tx_done[tx_queue->label],
+			      EFX_TX_QUEUE_NAME(tx_queue),
+			      EFX_LOOPBACK_NAME(mode, "tx_done"));
+	}
+	efx_fill_test(test_index++, strings, data,
+		      &lb_tests->rx_good,
+		      "rx", 0,
+		      EFX_LOOPBACK_NAME(mode, "rx_good"));
+	efx_fill_test(test_index++, strings, data,
+		      &lb_tests->rx_bad,
+		      "rx", 0,
+		      EFX_LOOPBACK_NAME(mode, "rx_bad"));
+
+	return test_index;
+}
+
+/**
+ * efx_ethtool_fill_self_tests - get self-test details
+ * @efx:		Efx NIC
+ * @tests:		Efx self-test results structure, or %NULL
+ * @strings:		Ethtool strings, or %NULL
+ * @data:		Ethtool test results, or %NULL
+ *
+ * Get self-test number of strings, strings, and/or test results.
+ * Return number of strings (== number of test results).
+ *
+ * The reason for merging these three functions is to make sure that
+ * they can never be inconsistent.
+ */
+int efx_ethtool_fill_self_tests(struct efx_nic *efx,
+				struct efx_self_tests *tests,
+				u8 *strings, u64 *data)
+{
+	struct efx_channel *channel;
+	unsigned int n = 0, i;
+	enum efx_loopback_mode mode;
+
+	efx_fill_test(n++, strings, data, &tests->phy_alive,
+		      "phy", 0, "alive", NULL);
+	efx_fill_test(n++, strings, data, &tests->nvram,
+		      "core", 0, "nvram", NULL);
+	efx_fill_test(n++, strings, data, &tests->interrupt,
+		      "core", 0, "interrupt", NULL);
+
+	/* Event queues */
+	efx_for_each_channel(channel, efx) {
+		efx_fill_test(n++, strings, data,
+			      &tests->eventq_dma[channel->channel],
+			      EFX_CHANNEL_NAME(channel),
+			      "eventq.dma", NULL);
+		efx_fill_test(n++, strings, data,
+			      &tests->eventq_int[channel->channel],
+			      EFX_CHANNEL_NAME(channel),
+			      "eventq.int", NULL);
+	}
+
+	efx_fill_test(n++, strings, data, &tests->memory,
+		      "core", 0, "memory", NULL);
+	efx_fill_test(n++, strings, data, &tests->registers,
+		      "core", 0, "registers", NULL);
+
+	for (i = 0; true; ++i) {
+		const char *name;
+
+		EFX_WARN_ON_PARANOID(i >= EFX_MAX_PHY_TESTS);
+		name = efx_mcdi_phy_test_name(efx, i);
+		if (name == NULL)
+			break;
+
+		efx_fill_test(n++, strings, data, &tests->phy_ext[i], "phy", 0, name, NULL);
+	}
+
+	/* Loopback tests */
+	for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) {
+		if (!(efx->loopback_modes & (1 << mode)))
+			continue;
+		n = efx_fill_loopback_test(efx,
+					   &tests->loopback[mode], mode, n,
+					   strings, data);
+	}
+
+	return n;
+}
+
+static size_t efx_describe_per_queue_stats(struct efx_nic *efx, u8 *strings)
+{
+	size_t n_stats = 0;
+	struct efx_channel *channel;
+
+	efx_for_each_channel(channel, efx) {
+		if (efx_channel_has_tx_queues(channel)) {
+			n_stats++;
+			if (strings != NULL) {
+				snprintf(strings, ETH_GSTRING_LEN,
+					 "tx-%u.tx_packets",
+					 channel->tx_queue[0].queue /
+					 EFX_MAX_TXQ_PER_CHANNEL);
+
+				strings += ETH_GSTRING_LEN;
+			}
+		}
+	}
+	efx_for_each_channel(channel, efx) {
+		if (efx_channel_has_rx_queue(channel)) {
+			n_stats++;
+			if (strings != NULL) {
+				snprintf(strings, ETH_GSTRING_LEN,
+					 "rx-%d.rx_packets", channel->channel);
+				strings += ETH_GSTRING_LEN;
+			}
+		}
+	}
+	if (efx->xdp_tx_queue_count && efx->xdp_tx_queues) {
+		unsigned short xdp;
+
+		for (xdp = 0; xdp < efx->xdp_tx_queue_count; xdp++) {
+			n_stats++;
+			if (strings) {
+				snprintf(strings, ETH_GSTRING_LEN,
+					 "tx-xdp-cpu-%hu.tx_packets", xdp);
+				strings += ETH_GSTRING_LEN;
+			}
+		}
+	}
+
+	return n_stats;
+}
+
+int efx_ethtool_get_sset_count(struct net_device *net_dev, int string_set)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+
+	switch (string_set) {
+	case ETH_SS_STATS:
+		return efx->type->describe_stats(efx, NULL) +
+		       EFX_ETHTOOL_SW_STAT_COUNT +
+		       efx_describe_per_queue_stats(efx, NULL) +
+		       efx_ptp_describe_stats(efx, NULL);
+	case ETH_SS_TEST:
+		return efx_ethtool_fill_self_tests(efx, NULL, NULL, NULL);
+	default:
+		return -EINVAL;
+	}
+}
+
+void efx_ethtool_get_strings(struct net_device *net_dev,
+			     u32 string_set, u8 *strings)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+	int i;
+
+	switch (string_set) {
+	case ETH_SS_STATS:
+		strings += (efx->type->describe_stats(efx, strings) *
+			    ETH_GSTRING_LEN);
+		for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++)
+			strlcpy(strings + i * ETH_GSTRING_LEN,
+				efx_sw_stat_desc[i].name, ETH_GSTRING_LEN);
+		strings += EFX_ETHTOOL_SW_STAT_COUNT * ETH_GSTRING_LEN;
+		strings += (efx_describe_per_queue_stats(efx, strings) *
+			    ETH_GSTRING_LEN);
+		efx_ptp_describe_stats(efx, strings);
+		break;
+	case ETH_SS_TEST:
+		efx_ethtool_fill_self_tests(efx, NULL, strings, NULL);
+		break;
+	default:
+		/* No other string sets */
+		break;
+	}
+}
+
+void efx_ethtool_get_stats(struct net_device *net_dev,
+			   struct ethtool_stats *stats,
+			   u64 *data)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+	const struct efx_sw_stat_desc *stat;
+	struct efx_channel *channel;
+	struct efx_tx_queue *tx_queue;
+	struct efx_rx_queue *rx_queue;
+	int i;
+
+	spin_lock_bh(&efx->stats_lock);
+
+	/* Get NIC statistics */
+	data += efx->type->update_stats(efx, data, NULL);
+
+	/* Get software statistics */
+	for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++) {
+		stat = &efx_sw_stat_desc[i];
+		switch (stat->source) {
+		case EFX_ETHTOOL_STAT_SOURCE_nic:
+			data[i] = stat->get_stat((void *)efx + stat->offset);
+			break;
+		case EFX_ETHTOOL_STAT_SOURCE_channel:
+			data[i] = 0;
+			efx_for_each_channel(channel, efx)
+				data[i] += stat->get_stat((void *)channel +
+							  stat->offset);
+			break;
+		case EFX_ETHTOOL_STAT_SOURCE_tx_queue:
+			data[i] = 0;
+			efx_for_each_channel(channel, efx) {
+				efx_for_each_channel_tx_queue(tx_queue, channel)
+					data[i] +=
+						stat->get_stat((void *)tx_queue
+							       + stat->offset);
+			}
+			break;
+		}
+	}
+	data += EFX_ETHTOOL_SW_STAT_COUNT;
+
+	spin_unlock_bh(&efx->stats_lock);
+
+	efx_for_each_channel(channel, efx) {
+		if (efx_channel_has_tx_queues(channel)) {
+			*data = 0;
+			efx_for_each_channel_tx_queue(tx_queue, channel) {
+				*data += tx_queue->tx_packets;
+			}
+			data++;
+		}
+	}
+	efx_for_each_channel(channel, efx) {
+		if (efx_channel_has_rx_queue(channel)) {
+			*data = 0;
+			efx_for_each_channel_rx_queue(rx_queue, channel) {
+				*data += rx_queue->rx_packets;
+			}
+			data++;
+		}
+	}
+	if (efx->xdp_tx_queue_count && efx->xdp_tx_queues) {
+		int xdp;
+
+		for (xdp = 0; xdp < efx->xdp_tx_queue_count; xdp++) {
+			data[0] = efx->xdp_tx_queues[xdp]->tx_packets;
+			data++;
+		}
+	}
+
+	efx_ptp_update_stats(efx, data);
+}
+
+/* This must be called with rtnl_lock held. */
+int efx_ethtool_get_link_ksettings(struct net_device *net_dev,
+				   struct ethtool_link_ksettings *cmd)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+	struct efx_link_state *link_state = &efx->link_state;
+
+	mutex_lock(&efx->mac_lock);
+	efx_mcdi_phy_get_link_ksettings(efx, cmd);
+	mutex_unlock(&efx->mac_lock);
+
+	/* Both MACs support pause frames (bidirectional and respond-only) */
+	ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
+	ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause);
+
+	if (LOOPBACK_INTERNAL(efx)) {
+		cmd->base.speed = link_state->speed;
+		cmd->base.duplex = link_state->fd ? DUPLEX_FULL : DUPLEX_HALF;
+	}
+
+	return 0;
+}
+
+/* This must be called with rtnl_lock held. */
+int efx_ethtool_set_link_ksettings(struct net_device *net_dev,
+				   const struct ethtool_link_ksettings *cmd)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+	int rc;
+
+	/* GMAC does not support 1000Mbps HD */
+	if ((cmd->base.speed == SPEED_1000) &&
+	    (cmd->base.duplex != DUPLEX_FULL)) {
+		netif_dbg(efx, drv, efx->net_dev,
+			  "rejecting unsupported 1000Mbps HD setting\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&efx->mac_lock);
+	rc = efx_mcdi_phy_set_link_ksettings(efx, cmd);
+	mutex_unlock(&efx->mac_lock);
+	return rc;
+}
+
+int efx_ethtool_get_fecparam(struct net_device *net_dev,
+			     struct ethtool_fecparam *fecparam)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+	int rc;
+
+	mutex_lock(&efx->mac_lock);
+	rc = efx_mcdi_phy_get_fecparam(efx, fecparam);
+	mutex_unlock(&efx->mac_lock);
+
+	return rc;
+}
+
+int efx_ethtool_set_fecparam(struct net_device *net_dev,
+			     struct ethtool_fecparam *fecparam)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+	int rc;
+
+	mutex_lock(&efx->mac_lock);
+	rc = efx_mcdi_phy_set_fecparam(efx, fecparam);
+	mutex_unlock(&efx->mac_lock);
+
+	return rc;
+}
+
+/* MAC address mask including only I/G bit */
+static const u8 mac_addr_ig_mask[ETH_ALEN] __aligned(2) = {0x01, 0, 0, 0, 0, 0};
+
+#define IP4_ADDR_FULL_MASK	((__force __be32)~0)
+#define IP_PROTO_FULL_MASK	0xFF
+#define PORT_FULL_MASK		((__force __be16)~0)
+#define ETHER_TYPE_FULL_MASK	((__force __be16)~0)
+
+static inline void ip6_fill_mask(__be32 *mask)
+{
+	mask[0] = mask[1] = mask[2] = mask[3] = ~(__be32)0;
+}
+
+static int efx_ethtool_get_class_rule(struct efx_nic *efx,
+				      struct ethtool_rx_flow_spec *rule,
+				      u32 *rss_context)
+{
+	struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec;
+	struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec;
+	struct ethtool_usrip4_spec *uip_entry = &rule->h_u.usr_ip4_spec;
+	struct ethtool_usrip4_spec *uip_mask = &rule->m_u.usr_ip4_spec;
+	struct ethtool_tcpip6_spec *ip6_entry = &rule->h_u.tcp_ip6_spec;
+	struct ethtool_tcpip6_spec *ip6_mask = &rule->m_u.tcp_ip6_spec;
+	struct ethtool_usrip6_spec *uip6_entry = &rule->h_u.usr_ip6_spec;
+	struct ethtool_usrip6_spec *uip6_mask = &rule->m_u.usr_ip6_spec;
+	struct ethhdr *mac_entry = &rule->h_u.ether_spec;
+	struct ethhdr *mac_mask = &rule->m_u.ether_spec;
+	struct efx_filter_spec spec;
+	int rc;
+
+	rc = efx_filter_get_filter_safe(efx, EFX_FILTER_PRI_MANUAL,
+					rule->location, &spec);
+	if (rc)
+		return rc;
+
+	if (spec.dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP)
+		rule->ring_cookie = RX_CLS_FLOW_DISC;
+	else
+		rule->ring_cookie = spec.dmaq_id;
+
+	if ((spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) &&
+	    spec.ether_type == htons(ETH_P_IP) &&
+	    (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) &&
+	    (spec.ip_proto == IPPROTO_TCP || spec.ip_proto == IPPROTO_UDP) &&
+	    !(spec.match_flags &
+	      ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID |
+		EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST |
+		EFX_FILTER_MATCH_IP_PROTO |
+		EFX_FILTER_MATCH_LOC_PORT | EFX_FILTER_MATCH_REM_PORT))) {
+		rule->flow_type = ((spec.ip_proto == IPPROTO_TCP) ?
+				   TCP_V4_FLOW : UDP_V4_FLOW);
+		if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) {
+			ip_entry->ip4dst = spec.loc_host[0];
+			ip_mask->ip4dst = IP4_ADDR_FULL_MASK;
+		}
+		if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) {
+			ip_entry->ip4src = spec.rem_host[0];
+			ip_mask->ip4src = IP4_ADDR_FULL_MASK;
+		}
+		if (spec.match_flags & EFX_FILTER_MATCH_LOC_PORT) {
+			ip_entry->pdst = spec.loc_port;
+			ip_mask->pdst = PORT_FULL_MASK;
+		}
+		if (spec.match_flags & EFX_FILTER_MATCH_REM_PORT) {
+			ip_entry->psrc = spec.rem_port;
+			ip_mask->psrc = PORT_FULL_MASK;
+		}
+	} else if ((spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) &&
+	    spec.ether_type == htons(ETH_P_IPV6) &&
+	    (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) &&
+	    (spec.ip_proto == IPPROTO_TCP || spec.ip_proto == IPPROTO_UDP) &&
+	    !(spec.match_flags &
+	      ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID |
+		EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST |
+		EFX_FILTER_MATCH_IP_PROTO |
+		EFX_FILTER_MATCH_LOC_PORT | EFX_FILTER_MATCH_REM_PORT))) {
+		rule->flow_type = ((spec.ip_proto == IPPROTO_TCP) ?
+				   TCP_V6_FLOW : UDP_V6_FLOW);
+		if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) {
+			memcpy(ip6_entry->ip6dst, spec.loc_host,
+			       sizeof(ip6_entry->ip6dst));
+			ip6_fill_mask(ip6_mask->ip6dst);
+		}
+		if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) {
+			memcpy(ip6_entry->ip6src, spec.rem_host,
+			       sizeof(ip6_entry->ip6src));
+			ip6_fill_mask(ip6_mask->ip6src);
+		}
+		if (spec.match_flags & EFX_FILTER_MATCH_LOC_PORT) {
+			ip6_entry->pdst = spec.loc_port;
+			ip6_mask->pdst = PORT_FULL_MASK;
+		}
+		if (spec.match_flags & EFX_FILTER_MATCH_REM_PORT) {
+			ip6_entry->psrc = spec.rem_port;
+			ip6_mask->psrc = PORT_FULL_MASK;
+		}
+	} else if (!(spec.match_flags &
+		     ~(EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG |
+		       EFX_FILTER_MATCH_REM_MAC | EFX_FILTER_MATCH_ETHER_TYPE |
+		       EFX_FILTER_MATCH_OUTER_VID))) {
+		rule->flow_type = ETHER_FLOW;
+		if (spec.match_flags &
+		    (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG)) {
+			ether_addr_copy(mac_entry->h_dest, spec.loc_mac);
+			if (spec.match_flags & EFX_FILTER_MATCH_LOC_MAC)
+				eth_broadcast_addr(mac_mask->h_dest);
+			else
+				ether_addr_copy(mac_mask->h_dest,
+						mac_addr_ig_mask);
+		}
+		if (spec.match_flags & EFX_FILTER_MATCH_REM_MAC) {
+			ether_addr_copy(mac_entry->h_source, spec.rem_mac);
+			eth_broadcast_addr(mac_mask->h_source);
+		}
+		if (spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) {
+			mac_entry->h_proto = spec.ether_type;
+			mac_mask->h_proto = ETHER_TYPE_FULL_MASK;
+		}
+	} else if (spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE &&
+		   spec.ether_type == htons(ETH_P_IP) &&
+		   !(spec.match_flags &
+		     ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID |
+		       EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST |
+		       EFX_FILTER_MATCH_IP_PROTO))) {
+		rule->flow_type = IPV4_USER_FLOW;
+		uip_entry->ip_ver = ETH_RX_NFC_IP4;
+		if (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) {
+			uip_mask->proto = IP_PROTO_FULL_MASK;
+			uip_entry->proto = spec.ip_proto;
+		}
+		if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) {
+			uip_entry->ip4dst = spec.loc_host[0];
+			uip_mask->ip4dst = IP4_ADDR_FULL_MASK;
+		}
+		if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) {
+			uip_entry->ip4src = spec.rem_host[0];
+			uip_mask->ip4src = IP4_ADDR_FULL_MASK;
+		}
+	} else if (spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE &&
+		   spec.ether_type == htons(ETH_P_IPV6) &&
+		   !(spec.match_flags &
+		     ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID |
+		       EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST |
+		       EFX_FILTER_MATCH_IP_PROTO))) {
+		rule->flow_type = IPV6_USER_FLOW;
+		if (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) {
+			uip6_mask->l4_proto = IP_PROTO_FULL_MASK;
+			uip6_entry->l4_proto = spec.ip_proto;
+		}
+		if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) {
+			memcpy(uip6_entry->ip6dst, spec.loc_host,
+			       sizeof(uip6_entry->ip6dst));
+			ip6_fill_mask(uip6_mask->ip6dst);
+		}
+		if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) {
+			memcpy(uip6_entry->ip6src, spec.rem_host,
+			       sizeof(uip6_entry->ip6src));
+			ip6_fill_mask(uip6_mask->ip6src);
+		}
+	} else {
+		/* The above should handle all filters that we insert */
+		WARN_ON(1);
+		return -EINVAL;
+	}
+
+	if (spec.match_flags & EFX_FILTER_MATCH_OUTER_VID) {
+		rule->flow_type |= FLOW_EXT;
+		rule->h_ext.vlan_tci = spec.outer_vid;
+		rule->m_ext.vlan_tci = htons(0xfff);
+	}
+
+	if (spec.flags & EFX_FILTER_FLAG_RX_RSS) {
+		rule->flow_type |= FLOW_RSS;
+		*rss_context = spec.rss_context;
+	}
+
+	return rc;
+}
+
+int efx_ethtool_get_rxnfc(struct net_device *net_dev,
+			  struct ethtool_rxnfc *info, u32 *rule_locs)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+	u32 rss_context = 0;
+	s32 rc = 0;
+
+	switch (info->cmd) {
+	case ETHTOOL_GRXRINGS:
+		info->data = efx->n_rx_channels;
+		return 0;
+
+	case ETHTOOL_GRXFH: {
+		struct efx_rss_context *ctx = &efx->rss_context;
+		__u64 data;
+
+		mutex_lock(&efx->rss_lock);
+		if (info->flow_type & FLOW_RSS && info->rss_context) {
+			ctx = efx_find_rss_context_entry(efx, info->rss_context);
+			if (!ctx) {
+				rc = -ENOENT;
+				goto out_unlock;
+			}
+		}
+
+		data = 0;
+		if (!efx_rss_active(ctx)) /* No RSS */
+			goto out_setdata_unlock;
+
+		switch (info->flow_type & ~FLOW_RSS) {
+		case UDP_V4_FLOW:
+		case UDP_V6_FLOW:
+			if (ctx->rx_hash_udp_4tuple)
+				data = (RXH_L4_B_0_1 | RXH_L4_B_2_3 |
+					RXH_IP_SRC | RXH_IP_DST);
+			else
+				data = RXH_IP_SRC | RXH_IP_DST;
+			break;
+		case TCP_V4_FLOW:
+		case TCP_V6_FLOW:
+			data = (RXH_L4_B_0_1 | RXH_L4_B_2_3 |
+				RXH_IP_SRC | RXH_IP_DST);
+			break;
+		case SCTP_V4_FLOW:
+		case SCTP_V6_FLOW:
+		case AH_ESP_V4_FLOW:
+		case AH_ESP_V6_FLOW:
+		case IPV4_FLOW:
+		case IPV6_FLOW:
+			data = RXH_IP_SRC | RXH_IP_DST;
+			break;
+		default:
+			break;
+		}
+out_setdata_unlock:
+		info->data = data;
+out_unlock:
+		mutex_unlock(&efx->rss_lock);
+		return rc;
+	}
+
+	case ETHTOOL_GRXCLSRLCNT:
+		info->data = efx_filter_get_rx_id_limit(efx);
+		if (info->data == 0)
+			return -EOPNOTSUPP;
+		info->data |= RX_CLS_LOC_SPECIAL;
+		info->rule_cnt =
+			efx_filter_count_rx_used(efx, EFX_FILTER_PRI_MANUAL);
+		return 0;
+
+	case ETHTOOL_GRXCLSRULE:
+		if (efx_filter_get_rx_id_limit(efx) == 0)
+			return -EOPNOTSUPP;
+		rc = efx_ethtool_get_class_rule(efx, &info->fs, &rss_context);
+		if (rc < 0)
+			return rc;
+		if (info->fs.flow_type & FLOW_RSS)
+			info->rss_context = rss_context;
+		return 0;
+
+	case ETHTOOL_GRXCLSRLALL:
+		info->data = efx_filter_get_rx_id_limit(efx);
+		if (info->data == 0)
+			return -EOPNOTSUPP;
+		rc = efx_filter_get_rx_ids(efx, EFX_FILTER_PRI_MANUAL,
+					   rule_locs, info->rule_cnt);
+		if (rc < 0)
+			return rc;
+		info->rule_cnt = rc;
+		return 0;
+
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static inline bool ip6_mask_is_full(__be32 mask[4])
+{
+	return !~(mask[0] & mask[1] & mask[2] & mask[3]);
+}
+
+static inline bool ip6_mask_is_empty(__be32 mask[4])
+{
+	return !(mask[0] | mask[1] | mask[2] | mask[3]);
+}
+
+static int efx_ethtool_set_class_rule(struct efx_nic *efx,
+				      struct ethtool_rx_flow_spec *rule,
+				      u32 rss_context)
+{
+	struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec;
+	struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec;
+	struct ethtool_usrip4_spec *uip_entry = &rule->h_u.usr_ip4_spec;
+	struct ethtool_usrip4_spec *uip_mask = &rule->m_u.usr_ip4_spec;
+	struct ethtool_tcpip6_spec *ip6_entry = &rule->h_u.tcp_ip6_spec;
+	struct ethtool_tcpip6_spec *ip6_mask = &rule->m_u.tcp_ip6_spec;
+	struct ethtool_usrip6_spec *uip6_entry = &rule->h_u.usr_ip6_spec;
+	struct ethtool_usrip6_spec *uip6_mask = &rule->m_u.usr_ip6_spec;
+	u32 flow_type = rule->flow_type & ~(FLOW_EXT | FLOW_RSS);
+	struct ethhdr *mac_entry = &rule->h_u.ether_spec;
+	struct ethhdr *mac_mask = &rule->m_u.ether_spec;
+	enum efx_filter_flags flags = 0;
+	struct efx_filter_spec spec;
+	int rc;
+
+	/* Check that user wants us to choose the location */
+	if (rule->location != RX_CLS_LOC_ANY)
+		return -EINVAL;
+
+	/* Range-check ring_cookie */
+	if (rule->ring_cookie >= efx->n_rx_channels &&
+	    rule->ring_cookie != RX_CLS_FLOW_DISC)
+		return -EINVAL;
+
+	/* Check for unsupported extensions */
+	if ((rule->flow_type & FLOW_EXT) &&
+	    (rule->m_ext.vlan_etype || rule->m_ext.data[0] ||
+	     rule->m_ext.data[1]))
+		return -EINVAL;
+
+	if (efx->rx_scatter)
+		flags |= EFX_FILTER_FLAG_RX_SCATTER;
+	if (rule->flow_type & FLOW_RSS)
+		flags |= EFX_FILTER_FLAG_RX_RSS;
+
+	efx_filter_init_rx(&spec, EFX_FILTER_PRI_MANUAL, flags,
+			   (rule->ring_cookie == RX_CLS_FLOW_DISC) ?
+			   EFX_FILTER_RX_DMAQ_ID_DROP : rule->ring_cookie);
+
+	if (rule->flow_type & FLOW_RSS)
+		spec.rss_context = rss_context;
+
+	switch (flow_type) {
+	case TCP_V4_FLOW:
+	case UDP_V4_FLOW:
+		spec.match_flags = (EFX_FILTER_MATCH_ETHER_TYPE |
+				    EFX_FILTER_MATCH_IP_PROTO);
+		spec.ether_type = htons(ETH_P_IP);
+		spec.ip_proto = flow_type == TCP_V4_FLOW ? IPPROTO_TCP
+							 : IPPROTO_UDP;
+		if (ip_mask->ip4dst) {
+			if (ip_mask->ip4dst != IP4_ADDR_FULL_MASK)
+				return -EINVAL;
+			spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST;
+			spec.loc_host[0] = ip_entry->ip4dst;
+		}
+		if (ip_mask->ip4src) {
+			if (ip_mask->ip4src != IP4_ADDR_FULL_MASK)
+				return -EINVAL;
+			spec.match_flags |= EFX_FILTER_MATCH_REM_HOST;
+			spec.rem_host[0] = ip_entry->ip4src;
+		}
+		if (ip_mask->pdst) {
+			if (ip_mask->pdst != PORT_FULL_MASK)
+				return -EINVAL;
+			spec.match_flags |= EFX_FILTER_MATCH_LOC_PORT;
+			spec.loc_port = ip_entry->pdst;
+		}
+		if (ip_mask->psrc) {
+			if (ip_mask->psrc != PORT_FULL_MASK)
+				return -EINVAL;
+			spec.match_flags |= EFX_FILTER_MATCH_REM_PORT;
+			spec.rem_port = ip_entry->psrc;
+		}
+		if (ip_mask->tos)
+			return -EINVAL;
+		break;
+
+	case TCP_V6_FLOW:
+	case UDP_V6_FLOW:
+		spec.match_flags = (EFX_FILTER_MATCH_ETHER_TYPE |
+				    EFX_FILTER_MATCH_IP_PROTO);
+		spec.ether_type = htons(ETH_P_IPV6);
+		spec.ip_proto = flow_type == TCP_V6_FLOW ? IPPROTO_TCP
+							 : IPPROTO_UDP;
+		if (!ip6_mask_is_empty(ip6_mask->ip6dst)) {
+			if (!ip6_mask_is_full(ip6_mask->ip6dst))
+				return -EINVAL;
+			spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST;
+			memcpy(spec.loc_host, ip6_entry->ip6dst, sizeof(spec.loc_host));
+		}
+		if (!ip6_mask_is_empty(ip6_mask->ip6src)) {
+			if (!ip6_mask_is_full(ip6_mask->ip6src))
+				return -EINVAL;
+			spec.match_flags |= EFX_FILTER_MATCH_REM_HOST;
+			memcpy(spec.rem_host, ip6_entry->ip6src, sizeof(spec.rem_host));
+		}
+		if (ip6_mask->pdst) {
+			if (ip6_mask->pdst != PORT_FULL_MASK)
+				return -EINVAL;
+			spec.match_flags |= EFX_FILTER_MATCH_LOC_PORT;
+			spec.loc_port = ip6_entry->pdst;
+		}
+		if (ip6_mask->psrc) {
+			if (ip6_mask->psrc != PORT_FULL_MASK)
+				return -EINVAL;
+			spec.match_flags |= EFX_FILTER_MATCH_REM_PORT;
+			spec.rem_port = ip6_entry->psrc;
+		}
+		if (ip6_mask->tclass)
+			return -EINVAL;
+		break;
+
+	case IPV4_USER_FLOW:
+		if (uip_mask->l4_4_bytes || uip_mask->tos || uip_mask->ip_ver ||
+		    uip_entry->ip_ver != ETH_RX_NFC_IP4)
+			return -EINVAL;
+		spec.match_flags = EFX_FILTER_MATCH_ETHER_TYPE;
+		spec.ether_type = htons(ETH_P_IP);
+		if (uip_mask->ip4dst) {
+			if (uip_mask->ip4dst != IP4_ADDR_FULL_MASK)
+				return -EINVAL;
+			spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST;
+			spec.loc_host[0] = uip_entry->ip4dst;
+		}
+		if (uip_mask->ip4src) {
+			if (uip_mask->ip4src != IP4_ADDR_FULL_MASK)
+				return -EINVAL;
+			spec.match_flags |= EFX_FILTER_MATCH_REM_HOST;
+			spec.rem_host[0] = uip_entry->ip4src;
+		}
+		if (uip_mask->proto) {
+			if (uip_mask->proto != IP_PROTO_FULL_MASK)
+				return -EINVAL;
+			spec.match_flags |= EFX_FILTER_MATCH_IP_PROTO;
+			spec.ip_proto = uip_entry->proto;
+		}
+		break;
+
+	case IPV6_USER_FLOW:
+		if (uip6_mask->l4_4_bytes || uip6_mask->tclass)
+			return -EINVAL;
+		spec.match_flags = EFX_FILTER_MATCH_ETHER_TYPE;
+		spec.ether_type = htons(ETH_P_IPV6);
+		if (!ip6_mask_is_empty(uip6_mask->ip6dst)) {
+			if (!ip6_mask_is_full(uip6_mask->ip6dst))
+				return -EINVAL;
+			spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST;
+			memcpy(spec.loc_host, uip6_entry->ip6dst, sizeof(spec.loc_host));
+		}
+		if (!ip6_mask_is_empty(uip6_mask->ip6src)) {
+			if (!ip6_mask_is_full(uip6_mask->ip6src))
+				return -EINVAL;
+			spec.match_flags |= EFX_FILTER_MATCH_REM_HOST;
+			memcpy(spec.rem_host, uip6_entry->ip6src, sizeof(spec.rem_host));
+		}
+		if (uip6_mask->l4_proto) {
+			if (uip6_mask->l4_proto != IP_PROTO_FULL_MASK)
+				return -EINVAL;
+			spec.match_flags |= EFX_FILTER_MATCH_IP_PROTO;
+			spec.ip_proto = uip6_entry->l4_proto;
+		}
+		break;
+
+	case ETHER_FLOW:
+		if (!is_zero_ether_addr(mac_mask->h_dest)) {
+			if (ether_addr_equal(mac_mask->h_dest,
+					     mac_addr_ig_mask))
+				spec.match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG;
+			else if (is_broadcast_ether_addr(mac_mask->h_dest))
+				spec.match_flags |= EFX_FILTER_MATCH_LOC_MAC;
+			else
+				return -EINVAL;
+			ether_addr_copy(spec.loc_mac, mac_entry->h_dest);
+		}
+		if (!is_zero_ether_addr(mac_mask->h_source)) {
+			if (!is_broadcast_ether_addr(mac_mask->h_source))
+				return -EINVAL;
+			spec.match_flags |= EFX_FILTER_MATCH_REM_MAC;
+			ether_addr_copy(spec.rem_mac, mac_entry->h_source);
+		}
+		if (mac_mask->h_proto) {
+			if (mac_mask->h_proto != ETHER_TYPE_FULL_MASK)
+				return -EINVAL;
+			spec.match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
+			spec.ether_type = mac_entry->h_proto;
+		}
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	if ((rule->flow_type & FLOW_EXT) && rule->m_ext.vlan_tci) {
+		if (rule->m_ext.vlan_tci != htons(0xfff))
+			return -EINVAL;
+		spec.match_flags |= EFX_FILTER_MATCH_OUTER_VID;
+		spec.outer_vid = rule->h_ext.vlan_tci;
+	}
+
+	rc = efx_filter_insert_filter(efx, &spec, true);
+	if (rc < 0)
+		return rc;
+
+	rule->location = rc;
+	return 0;
+}
+
+int efx_ethtool_set_rxnfc(struct net_device *net_dev,
+			  struct ethtool_rxnfc *info)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+
+	if (efx_filter_get_rx_id_limit(efx) == 0)
+		return -EOPNOTSUPP;
+
+	switch (info->cmd) {
+	case ETHTOOL_SRXCLSRLINS:
+		return efx_ethtool_set_class_rule(efx, &info->fs,
+						  info->rss_context);
+
+	case ETHTOOL_SRXCLSRLDEL:
+		return efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_MANUAL,
+						 info->fs.location);
+
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+u32 efx_ethtool_get_rxfh_indir_size(struct net_device *net_dev)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+
+	if (efx->n_rx_channels == 1)
+		return 0;
+	return ARRAY_SIZE(efx->rss_context.rx_indir_table);
+}
+
+u32 efx_ethtool_get_rxfh_key_size(struct net_device *net_dev)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+
+	return efx->type->rx_hash_key_size;
+}
+
+int efx_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key,
+			 u8 *hfunc)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+	int rc;
+
+	rc = efx->type->rx_pull_rss_config(efx);
+	if (rc)
+		return rc;
+
+	if (hfunc)
+		*hfunc = ETH_RSS_HASH_TOP;
+	if (indir)
+		memcpy(indir, efx->rss_context.rx_indir_table,
+		       sizeof(efx->rss_context.rx_indir_table));
+	if (key)
+		memcpy(key, efx->rss_context.rx_hash_key,
+		       efx->type->rx_hash_key_size);
+	return 0;
+}
+
+int efx_ethtool_set_rxfh(struct net_device *net_dev, const u32 *indir,
+			 const u8 *key, const u8 hfunc)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+
+	/* Hash function is Toeplitz, cannot be changed */
+	if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+		return -EOPNOTSUPP;
+	if (!indir && !key)
+		return 0;
+
+	if (!key)
+		key = efx->rss_context.rx_hash_key;
+	if (!indir)
+		indir = efx->rss_context.rx_indir_table;
+
+	return efx->type->rx_push_rss_config(efx, true, indir, key);
+}
+
+int efx_ethtool_get_rxfh_context(struct net_device *net_dev, u32 *indir,
+				 u8 *key, u8 *hfunc, u32 rss_context)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+	struct efx_rss_context *ctx;
+	int rc = 0;
+
+	if (!efx->type->rx_pull_rss_context_config)
+		return -EOPNOTSUPP;
+
+	mutex_lock(&efx->rss_lock);
+	ctx = efx_find_rss_context_entry(efx, rss_context);
+	if (!ctx) {
+		rc = -ENOENT;
+		goto out_unlock;
+	}
+	rc = efx->type->rx_pull_rss_context_config(efx, ctx);
+	if (rc)
+		goto out_unlock;
+
+	if (hfunc)
+		*hfunc = ETH_RSS_HASH_TOP;
+	if (indir)
+		memcpy(indir, ctx->rx_indir_table, sizeof(ctx->rx_indir_table));
+	if (key)
+		memcpy(key, ctx->rx_hash_key, efx->type->rx_hash_key_size);
+out_unlock:
+	mutex_unlock(&efx->rss_lock);
+	return rc;
+}
+
+int efx_ethtool_set_rxfh_context(struct net_device *net_dev,
+				 const u32 *indir, const u8 *key,
+				 const u8 hfunc, u32 *rss_context,
+				 bool delete)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+	struct efx_rss_context *ctx;
+	bool allocated = false;
+	int rc;
+
+	if (!efx->type->rx_push_rss_context_config)
+		return -EOPNOTSUPP;
+	/* Hash function is Toeplitz, cannot be changed */
+	if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+		return -EOPNOTSUPP;
+
+	mutex_lock(&efx->rss_lock);
+
+	if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
+		if (delete) {
+			/* alloc + delete == Nothing to do */
+			rc = -EINVAL;
+			goto out_unlock;
+		}
+		ctx = efx_alloc_rss_context_entry(efx);
+		if (!ctx) {
+			rc = -ENOMEM;
+			goto out_unlock;
+		}
+		ctx->context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
+		/* Initialise indir table and key to defaults */
+		efx_set_default_rx_indir_table(efx, ctx);
+		netdev_rss_key_fill(ctx->rx_hash_key, sizeof(ctx->rx_hash_key));
+		allocated = true;
+	} else {
+		ctx = efx_find_rss_context_entry(efx, *rss_context);
+		if (!ctx) {
+			rc = -ENOENT;
+			goto out_unlock;
+		}
+	}
+
+	if (delete) {
+		/* delete this context */
+		rc = efx->type->rx_push_rss_context_config(efx, ctx, NULL, NULL);
+		if (!rc)
+			efx_free_rss_context_entry(ctx);
+		goto out_unlock;
+	}
+
+	if (!key)
+		key = ctx->rx_hash_key;
+	if (!indir)
+		indir = ctx->rx_indir_table;
+
+	rc = efx->type->rx_push_rss_context_config(efx, ctx, indir, key);
+	if (rc && allocated)
+		efx_free_rss_context_entry(ctx);
+	else
+		*rss_context = ctx->user_id;
+out_unlock:
+	mutex_unlock(&efx->rss_lock);
+	return rc;
+}
+
+int efx_ethtool_reset(struct net_device *net_dev, u32 *flags)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+	int rc;
+
+	rc = efx->type->map_reset_flags(flags);
+	if (rc < 0)
+		return rc;
+
+	return efx_reset(efx, rc);
+}
+
+int efx_ethtool_get_module_eeprom(struct net_device *net_dev,
+				  struct ethtool_eeprom *ee,
+				  u8 *data)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+	int ret;
+
+	mutex_lock(&efx->mac_lock);
+	ret = efx_mcdi_phy_get_module_eeprom(efx, ee, data);
+	mutex_unlock(&efx->mac_lock);
+
+	return ret;
+}
+
+int efx_ethtool_get_module_info(struct net_device *net_dev,
+				struct ethtool_modinfo *modinfo)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+	int ret;
+
+	mutex_lock(&efx->mac_lock);
+	ret = efx_mcdi_phy_get_module_info(efx, modinfo);
+	mutex_unlock(&efx->mac_lock);
+
+	return ret;
+}
diff --git a/drivers/net/ethernet/sfc/siena/ethtool_common.h b/drivers/net/ethernet/sfc/siena/ethtool_common.h
new file mode 100644
index 000000000000..659491932101
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/ethtool_common.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2019 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_ETHTOOL_COMMON_H
+#define EFX_ETHTOOL_COMMON_H
+
+void efx_ethtool_get_drvinfo(struct net_device *net_dev,
+			     struct ethtool_drvinfo *info);
+u32 efx_ethtool_get_msglevel(struct net_device *net_dev);
+void efx_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable);
+void efx_ethtool_self_test(struct net_device *net_dev,
+			   struct ethtool_test *test, u64 *data);
+void efx_ethtool_get_pauseparam(struct net_device *net_dev,
+				struct ethtool_pauseparam *pause);
+int efx_ethtool_set_pauseparam(struct net_device *net_dev,
+			       struct ethtool_pauseparam *pause);
+int efx_ethtool_fill_self_tests(struct efx_nic *efx,
+				struct efx_self_tests *tests,
+				u8 *strings, u64 *data);
+int efx_ethtool_get_sset_count(struct net_device *net_dev, int string_set);
+void efx_ethtool_get_strings(struct net_device *net_dev, u32 string_set,
+			     u8 *strings);
+void efx_ethtool_get_stats(struct net_device *net_dev,
+			   struct ethtool_stats *stats __attribute__ ((unused)),
+			   u64 *data);
+int efx_ethtool_get_link_ksettings(struct net_device *net_dev,
+				   struct ethtool_link_ksettings *out);
+int efx_ethtool_set_link_ksettings(struct net_device *net_dev,
+				   const struct ethtool_link_ksettings *settings);
+int efx_ethtool_get_fecparam(struct net_device *net_dev,
+			     struct ethtool_fecparam *fecparam);
+int efx_ethtool_set_fecparam(struct net_device *net_dev,
+			     struct ethtool_fecparam *fecparam);
+int efx_ethtool_get_rxnfc(struct net_device *net_dev,
+			  struct ethtool_rxnfc *info, u32 *rule_locs);
+int efx_ethtool_set_rxnfc(struct net_device *net_dev,
+			  struct ethtool_rxnfc *info);
+u32 efx_ethtool_get_rxfh_indir_size(struct net_device *net_dev);
+u32 efx_ethtool_get_rxfh_key_size(struct net_device *net_dev);
+int efx_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key,
+			 u8 *hfunc);
+int efx_ethtool_set_rxfh(struct net_device *net_dev,
+			 const u32 *indir, const u8 *key, const u8 hfunc);
+int efx_ethtool_get_rxfh_context(struct net_device *net_dev, u32 *indir,
+				 u8 *key, u8 *hfunc, u32 rss_context);
+int efx_ethtool_set_rxfh_context(struct net_device *net_dev,
+				 const u32 *indir, const u8 *key,
+				 const u8 hfunc, u32 *rss_context,
+				 bool delete);
+int efx_ethtool_reset(struct net_device *net_dev, u32 *flags);
+int efx_ethtool_get_module_eeprom(struct net_device *net_dev,
+				  struct ethtool_eeprom *ee,
+				  u8 *data);
+int efx_ethtool_get_module_info(struct net_device *net_dev,
+				struct ethtool_modinfo *modinfo);
+#endif
diff --git a/drivers/net/ethernet/sfc/siena/farch_regs.h b/drivers/net/ethernet/sfc/siena/farch_regs.h
new file mode 100644
index 000000000000..d138be423e63
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/farch_regs.h
@@ -0,0 +1,2929 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2012 Solarflare Communications Inc.
+ */
+
+#ifndef EFX_FARCH_REGS_H
+#define EFX_FARCH_REGS_H
+
+/*
+ * Falcon hardware architecture definitions have a name prefix following
+ * the format:
+ *
+ *     F<type>_<min-rev><max-rev>_
+ *
+ * The following <type> strings are used:
+ *
+ *             MMIO register  MC register  Host memory structure
+ * -------------------------------------------------------------
+ * Address     R              MCR
+ * Bitfield    RF             MCRF         SF
+ * Enumerator  FE             MCFE         SE
+ *
+ * <min-rev> is the first revision to which the definition applies:
+ *
+ *     A: Falcon A1 (SFC4000AB)
+ *     B: Falcon B0 (SFC4000BA)
+ *     C: Siena A0 (SFL9021AA)
+ *
+ * If the definition has been changed or removed in later revisions
+ * then <max-rev> is the last revision to which the definition applies;
+ * otherwise it is "Z".
+ */
+
+/**************************************************************************
+ *
+ * Falcon/Siena registers and descriptors
+ *
+ **************************************************************************
+ */
+
+/* ADR_REGION_REG: Address region register */
+#define	FR_AZ_ADR_REGION 0x00000000
+#define	FRF_AZ_ADR_REGION3_LBN 96
+#define	FRF_AZ_ADR_REGION3_WIDTH 18
+#define	FRF_AZ_ADR_REGION2_LBN 64
+#define	FRF_AZ_ADR_REGION2_WIDTH 18
+#define	FRF_AZ_ADR_REGION1_LBN 32
+#define	FRF_AZ_ADR_REGION1_WIDTH 18
+#define	FRF_AZ_ADR_REGION0_LBN 0
+#define	FRF_AZ_ADR_REGION0_WIDTH 18
+
+/* INT_EN_REG_KER: Kernel driver Interrupt enable register */
+#define	FR_AZ_INT_EN_KER 0x00000010
+#define	FRF_AZ_KER_INT_LEVE_SEL_LBN 8
+#define	FRF_AZ_KER_INT_LEVE_SEL_WIDTH 6
+#define	FRF_AZ_KER_INT_CHAR_LBN 4
+#define	FRF_AZ_KER_INT_CHAR_WIDTH 1
+#define	FRF_AZ_KER_INT_KER_LBN 3
+#define	FRF_AZ_KER_INT_KER_WIDTH 1
+#define	FRF_AZ_DRV_INT_EN_KER_LBN 0
+#define	FRF_AZ_DRV_INT_EN_KER_WIDTH 1
+
+/* INT_EN_REG_CHAR: Char Driver interrupt enable register */
+#define	FR_BZ_INT_EN_CHAR 0x00000020
+#define	FRF_BZ_CHAR_INT_LEVE_SEL_LBN 8
+#define	FRF_BZ_CHAR_INT_LEVE_SEL_WIDTH 6
+#define	FRF_BZ_CHAR_INT_CHAR_LBN 4
+#define	FRF_BZ_CHAR_INT_CHAR_WIDTH 1
+#define	FRF_BZ_CHAR_INT_KER_LBN 3
+#define	FRF_BZ_CHAR_INT_KER_WIDTH 1
+#define	FRF_BZ_DRV_INT_EN_CHAR_LBN 0
+#define	FRF_BZ_DRV_INT_EN_CHAR_WIDTH 1
+
+/* INT_ADR_REG_KER: Interrupt host address for Kernel driver */
+#define	FR_AZ_INT_ADR_KER 0x00000030
+#define	FRF_AZ_NORM_INT_VEC_DIS_KER_LBN 64
+#define	FRF_AZ_NORM_INT_VEC_DIS_KER_WIDTH 1
+#define	FRF_AZ_INT_ADR_KER_LBN 0
+#define	FRF_AZ_INT_ADR_KER_WIDTH 64
+
+/* INT_ADR_REG_CHAR: Interrupt host address for Char driver */
+#define	FR_BZ_INT_ADR_CHAR 0x00000040
+#define	FRF_BZ_NORM_INT_VEC_DIS_CHAR_LBN 64
+#define	FRF_BZ_NORM_INT_VEC_DIS_CHAR_WIDTH 1
+#define	FRF_BZ_INT_ADR_CHAR_LBN 0
+#define	FRF_BZ_INT_ADR_CHAR_WIDTH 64
+
+/* INT_ACK_KER: Kernel interrupt acknowledge register */
+#define	FR_AA_INT_ACK_KER 0x00000050
+#define	FRF_AA_INT_ACK_KER_FIELD_LBN 0
+#define	FRF_AA_INT_ACK_KER_FIELD_WIDTH 32
+
+/* INT_ISR0_REG: Function 0 Interrupt Acknowledge Status register */
+#define	FR_BZ_INT_ISR0 0x00000090
+#define	FRF_BZ_INT_ISR_REG_LBN 0
+#define	FRF_BZ_INT_ISR_REG_WIDTH 64
+
+/* HW_INIT_REG: Hardware initialization register */
+#define	FR_AZ_HW_INIT 0x000000c0
+#define	FRF_BB_BDMRD_CPLF_FULL_LBN 124
+#define	FRF_BB_BDMRD_CPLF_FULL_WIDTH 1
+#define	FRF_BB_PCIE_CPL_TIMEOUT_CTRL_LBN 121
+#define	FRF_BB_PCIE_CPL_TIMEOUT_CTRL_WIDTH 3
+#define	FRF_CZ_TX_MRG_TAGS_LBN 120
+#define	FRF_CZ_TX_MRG_TAGS_WIDTH 1
+#define	FRF_AB_TRGT_MASK_ALL_LBN 100
+#define	FRF_AB_TRGT_MASK_ALL_WIDTH 1
+#define	FRF_AZ_DOORBELL_DROP_LBN 92
+#define	FRF_AZ_DOORBELL_DROP_WIDTH 8
+#define	FRF_AB_TX_RREQ_MASK_EN_LBN 76
+#define	FRF_AB_TX_RREQ_MASK_EN_WIDTH 1
+#define	FRF_AB_PE_EIDLE_DIS_LBN 75
+#define	FRF_AB_PE_EIDLE_DIS_WIDTH 1
+#define	FRF_AA_FC_BLOCKING_EN_LBN 45
+#define	FRF_AA_FC_BLOCKING_EN_WIDTH 1
+#define	FRF_BZ_B2B_REQ_EN_LBN 45
+#define	FRF_BZ_B2B_REQ_EN_WIDTH 1
+#define	FRF_AA_B2B_REQ_EN_LBN 44
+#define	FRF_AA_B2B_REQ_EN_WIDTH 1
+#define	FRF_BB_FC_BLOCKING_EN_LBN 44
+#define	FRF_BB_FC_BLOCKING_EN_WIDTH 1
+#define	FRF_AZ_POST_WR_MASK_LBN 40
+#define	FRF_AZ_POST_WR_MASK_WIDTH 4
+#define	FRF_AZ_TLP_TC_LBN 34
+#define	FRF_AZ_TLP_TC_WIDTH 3
+#define	FRF_AZ_TLP_ATTR_LBN 32
+#define	FRF_AZ_TLP_ATTR_WIDTH 2
+#define	FRF_AB_INTB_VEC_LBN 24
+#define	FRF_AB_INTB_VEC_WIDTH 5
+#define	FRF_AB_INTA_VEC_LBN 16
+#define	FRF_AB_INTA_VEC_WIDTH 5
+#define	FRF_AZ_WD_TIMER_LBN 8
+#define	FRF_AZ_WD_TIMER_WIDTH 8
+#define	FRF_AZ_US_DISABLE_LBN 5
+#define	FRF_AZ_US_DISABLE_WIDTH 1
+#define	FRF_AZ_TLP_EP_LBN 4
+#define	FRF_AZ_TLP_EP_WIDTH 1
+#define	FRF_AZ_ATTR_SEL_LBN 3
+#define	FRF_AZ_ATTR_SEL_WIDTH 1
+#define	FRF_AZ_TD_SEL_LBN 1
+#define	FRF_AZ_TD_SEL_WIDTH 1
+#define	FRF_AZ_TLP_TD_LBN 0
+#define	FRF_AZ_TLP_TD_WIDTH 1
+
+/* EE_SPI_HCMD_REG: SPI host command register */
+#define	FR_AB_EE_SPI_HCMD 0x00000100
+#define	FRF_AB_EE_SPI_HCMD_CMD_EN_LBN 31
+#define	FRF_AB_EE_SPI_HCMD_CMD_EN_WIDTH 1
+#define	FRF_AB_EE_WR_TIMER_ACTIVE_LBN 28
+#define	FRF_AB_EE_WR_TIMER_ACTIVE_WIDTH 1
+#define	FRF_AB_EE_SPI_HCMD_SF_SEL_LBN 24
+#define	FRF_AB_EE_SPI_HCMD_SF_SEL_WIDTH 1
+#define	FRF_AB_EE_SPI_HCMD_DABCNT_LBN 16
+#define	FRF_AB_EE_SPI_HCMD_DABCNT_WIDTH 5
+#define	FRF_AB_EE_SPI_HCMD_READ_LBN 15
+#define	FRF_AB_EE_SPI_HCMD_READ_WIDTH 1
+#define	FRF_AB_EE_SPI_HCMD_DUBCNT_LBN 12
+#define	FRF_AB_EE_SPI_HCMD_DUBCNT_WIDTH 2
+#define	FRF_AB_EE_SPI_HCMD_ADBCNT_LBN 8
+#define	FRF_AB_EE_SPI_HCMD_ADBCNT_WIDTH 2
+#define	FRF_AB_EE_SPI_HCMD_ENC_LBN 0
+#define	FRF_AB_EE_SPI_HCMD_ENC_WIDTH 8
+
+/* USR_EV_CFG: User Level Event Configuration register */
+#define	FR_CZ_USR_EV_CFG 0x00000100
+#define	FRF_CZ_USREV_DIS_LBN 16
+#define	FRF_CZ_USREV_DIS_WIDTH 1
+#define	FRF_CZ_DFLT_EVQ_LBN 0
+#define	FRF_CZ_DFLT_EVQ_WIDTH 10
+
+/* EE_SPI_HADR_REG: SPI host address register */
+#define	FR_AB_EE_SPI_HADR 0x00000110
+#define	FRF_AB_EE_SPI_HADR_DUBYTE_LBN 24
+#define	FRF_AB_EE_SPI_HADR_DUBYTE_WIDTH 8
+#define	FRF_AB_EE_SPI_HADR_ADR_LBN 0
+#define	FRF_AB_EE_SPI_HADR_ADR_WIDTH 24
+
+/* EE_SPI_HDATA_REG: SPI host data register */
+#define	FR_AB_EE_SPI_HDATA 0x00000120
+#define	FRF_AB_EE_SPI_HDATA3_LBN 96
+#define	FRF_AB_EE_SPI_HDATA3_WIDTH 32
+#define	FRF_AB_EE_SPI_HDATA2_LBN 64
+#define	FRF_AB_EE_SPI_HDATA2_WIDTH 32
+#define	FRF_AB_EE_SPI_HDATA1_LBN 32
+#define	FRF_AB_EE_SPI_HDATA1_WIDTH 32
+#define	FRF_AB_EE_SPI_HDATA0_LBN 0
+#define	FRF_AB_EE_SPI_HDATA0_WIDTH 32
+
+/* EE_BASE_PAGE_REG: Expansion ROM base mirror register */
+#define	FR_AB_EE_BASE_PAGE 0x00000130
+#define	FRF_AB_EE_EXPROM_MASK_LBN 16
+#define	FRF_AB_EE_EXPROM_MASK_WIDTH 13
+#define	FRF_AB_EE_EXP_ROM_WINDOW_BASE_LBN 0
+#define	FRF_AB_EE_EXP_ROM_WINDOW_BASE_WIDTH 13
+
+/* EE_VPD_CFG0_REG: SPI/VPD configuration register 0 */
+#define	FR_AB_EE_VPD_CFG0 0x00000140
+#define	FRF_AB_EE_SF_FASTRD_EN_LBN 127
+#define	FRF_AB_EE_SF_FASTRD_EN_WIDTH 1
+#define	FRF_AB_EE_SF_CLOCK_DIV_LBN 120
+#define	FRF_AB_EE_SF_CLOCK_DIV_WIDTH 7
+#define	FRF_AB_EE_VPD_WIP_POLL_LBN 119
+#define	FRF_AB_EE_VPD_WIP_POLL_WIDTH 1
+#define	FRF_AB_EE_EE_CLOCK_DIV_LBN 112
+#define	FRF_AB_EE_EE_CLOCK_DIV_WIDTH 7
+#define	FRF_AB_EE_EE_WR_TMR_VALUE_LBN 96
+#define	FRF_AB_EE_EE_WR_TMR_VALUE_WIDTH 16
+#define	FRF_AB_EE_VPDW_LENGTH_LBN 80
+#define	FRF_AB_EE_VPDW_LENGTH_WIDTH 15
+#define	FRF_AB_EE_VPDW_BASE_LBN 64
+#define	FRF_AB_EE_VPDW_BASE_WIDTH 15
+#define	FRF_AB_EE_VPD_WR_CMD_EN_LBN 56
+#define	FRF_AB_EE_VPD_WR_CMD_EN_WIDTH 8
+#define	FRF_AB_EE_VPD_BASE_LBN 32
+#define	FRF_AB_EE_VPD_BASE_WIDTH 24
+#define	FRF_AB_EE_VPD_LENGTH_LBN 16
+#define	FRF_AB_EE_VPD_LENGTH_WIDTH 15
+#define	FRF_AB_EE_VPD_AD_SIZE_LBN 8
+#define	FRF_AB_EE_VPD_AD_SIZE_WIDTH 5
+#define	FRF_AB_EE_VPD_ACCESS_ON_LBN 5
+#define	FRF_AB_EE_VPD_ACCESS_ON_WIDTH 1
+#define	FRF_AB_EE_VPD_ACCESS_BLOCK_LBN 4
+#define	FRF_AB_EE_VPD_ACCESS_BLOCK_WIDTH 1
+#define	FRF_AB_EE_VPD_DEV_SF_SEL_LBN 2
+#define	FRF_AB_EE_VPD_DEV_SF_SEL_WIDTH 1
+#define	FRF_AB_EE_VPD_EN_AD9_MODE_LBN 1
+#define	FRF_AB_EE_VPD_EN_AD9_MODE_WIDTH 1
+#define	FRF_AB_EE_VPD_EN_LBN 0
+#define	FRF_AB_EE_VPD_EN_WIDTH 1
+
+/* EE_VPD_SW_CNTL_REG: VPD access SW control register */
+#define	FR_AB_EE_VPD_SW_CNTL 0x00000150
+#define	FRF_AB_EE_VPD_CYCLE_PENDING_LBN 31
+#define	FRF_AB_EE_VPD_CYCLE_PENDING_WIDTH 1
+#define	FRF_AB_EE_VPD_CYC_WRITE_LBN 28
+#define	FRF_AB_EE_VPD_CYC_WRITE_WIDTH 1
+#define	FRF_AB_EE_VPD_CYC_ADR_LBN 0
+#define	FRF_AB_EE_VPD_CYC_ADR_WIDTH 15
+
+/* EE_VPD_SW_DATA_REG: VPD access SW data register */
+#define	FR_AB_EE_VPD_SW_DATA 0x00000160
+#define	FRF_AB_EE_VPD_CYC_DAT_LBN 0
+#define	FRF_AB_EE_VPD_CYC_DAT_WIDTH 32
+
+/* PBMX_DBG_IADDR_REG: Capture Module address register */
+#define	FR_CZ_PBMX_DBG_IADDR 0x000001f0
+#define	FRF_CZ_PBMX_DBG_IADDR_LBN 0
+#define	FRF_CZ_PBMX_DBG_IADDR_WIDTH 32
+
+/* PCIE_CORE_INDIRECT_REG: Indirect Access to PCIE Core registers */
+#define	FR_BB_PCIE_CORE_INDIRECT 0x000001f0
+#define	FRF_BB_PCIE_CORE_TARGET_DATA_LBN 32
+#define	FRF_BB_PCIE_CORE_TARGET_DATA_WIDTH 32
+#define	FRF_BB_PCIE_CORE_INDIRECT_ACCESS_DIR_LBN 15
+#define	FRF_BB_PCIE_CORE_INDIRECT_ACCESS_DIR_WIDTH 1
+#define	FRF_BB_PCIE_CORE_TARGET_REG_ADRS_LBN 0
+#define	FRF_BB_PCIE_CORE_TARGET_REG_ADRS_WIDTH 12
+
+/* PBMX_DBG_IDATA_REG: Capture Module data register */
+#define	FR_CZ_PBMX_DBG_IDATA 0x000001f8
+#define	FRF_CZ_PBMX_DBG_IDATA_LBN 0
+#define	FRF_CZ_PBMX_DBG_IDATA_WIDTH 64
+
+/* NIC_STAT_REG: NIC status register */
+#define	FR_AB_NIC_STAT 0x00000200
+#define	FRF_BB_AER_DIS_LBN 34
+#define	FRF_BB_AER_DIS_WIDTH 1
+#define	FRF_BB_EE_STRAP_EN_LBN 31
+#define	FRF_BB_EE_STRAP_EN_WIDTH 1
+#define	FRF_BB_EE_STRAP_LBN 24
+#define	FRF_BB_EE_STRAP_WIDTH 4
+#define	FRF_BB_REVISION_ID_LBN 17
+#define	FRF_BB_REVISION_ID_WIDTH 7
+#define	FRF_AB_ONCHIP_SRAM_LBN 16
+#define	FRF_AB_ONCHIP_SRAM_WIDTH 1
+#define	FRF_AB_SF_PRST_LBN 9
+#define	FRF_AB_SF_PRST_WIDTH 1
+#define	FRF_AB_EE_PRST_LBN 8
+#define	FRF_AB_EE_PRST_WIDTH 1
+#define	FRF_AB_ATE_MODE_LBN 3
+#define	FRF_AB_ATE_MODE_WIDTH 1
+#define	FRF_AB_STRAP_PINS_LBN 0
+#define	FRF_AB_STRAP_PINS_WIDTH 3
+
+/* GPIO_CTL_REG: GPIO control register */
+#define	FR_AB_GPIO_CTL 0x00000210
+#define	FRF_AB_GPIO_OUT3_LBN 112
+#define	FRF_AB_GPIO_OUT3_WIDTH 16
+#define	FRF_AB_GPIO_IN3_LBN 104
+#define	FRF_AB_GPIO_IN3_WIDTH 8
+#define	FRF_AB_GPIO_PWRUP_VALUE3_LBN 96
+#define	FRF_AB_GPIO_PWRUP_VALUE3_WIDTH 8
+#define	FRF_AB_GPIO_OUT2_LBN 80
+#define	FRF_AB_GPIO_OUT2_WIDTH 16
+#define	FRF_AB_GPIO_IN2_LBN 72
+#define	FRF_AB_GPIO_IN2_WIDTH 8
+#define	FRF_AB_GPIO_PWRUP_VALUE2_LBN 64
+#define	FRF_AB_GPIO_PWRUP_VALUE2_WIDTH 8
+#define	FRF_AB_GPIO15_OEN_LBN 63
+#define	FRF_AB_GPIO15_OEN_WIDTH 1
+#define	FRF_AB_GPIO14_OEN_LBN 62
+#define	FRF_AB_GPIO14_OEN_WIDTH 1
+#define	FRF_AB_GPIO13_OEN_LBN 61
+#define	FRF_AB_GPIO13_OEN_WIDTH 1
+#define	FRF_AB_GPIO12_OEN_LBN 60
+#define	FRF_AB_GPIO12_OEN_WIDTH 1
+#define	FRF_AB_GPIO11_OEN_LBN 59
+#define	FRF_AB_GPIO11_OEN_WIDTH 1
+#define	FRF_AB_GPIO10_OEN_LBN 58
+#define	FRF_AB_GPIO10_OEN_WIDTH 1
+#define	FRF_AB_GPIO9_OEN_LBN 57
+#define	FRF_AB_GPIO9_OEN_WIDTH 1
+#define	FRF_AB_GPIO8_OEN_LBN 56
+#define	FRF_AB_GPIO8_OEN_WIDTH 1
+#define	FRF_AB_GPIO15_OUT_LBN 55
+#define	FRF_AB_GPIO15_OUT_WIDTH 1
+#define	FRF_AB_GPIO14_OUT_LBN 54
+#define	FRF_AB_GPIO14_OUT_WIDTH 1
+#define	FRF_AB_GPIO13_OUT_LBN 53
+#define	FRF_AB_GPIO13_OUT_WIDTH 1
+#define	FRF_AB_GPIO12_OUT_LBN 52
+#define	FRF_AB_GPIO12_OUT_WIDTH 1
+#define	FRF_AB_GPIO11_OUT_LBN 51
+#define	FRF_AB_GPIO11_OUT_WIDTH 1
+#define	FRF_AB_GPIO10_OUT_LBN 50
+#define	FRF_AB_GPIO10_OUT_WIDTH 1
+#define	FRF_AB_GPIO9_OUT_LBN 49
+#define	FRF_AB_GPIO9_OUT_WIDTH 1
+#define	FRF_AB_GPIO8_OUT_LBN 48
+#define	FRF_AB_GPIO8_OUT_WIDTH 1
+#define	FRF_AB_GPIO15_IN_LBN 47
+#define	FRF_AB_GPIO15_IN_WIDTH 1
+#define	FRF_AB_GPIO14_IN_LBN 46
+#define	FRF_AB_GPIO14_IN_WIDTH 1
+#define	FRF_AB_GPIO13_IN_LBN 45
+#define	FRF_AB_GPIO13_IN_WIDTH 1
+#define	FRF_AB_GPIO12_IN_LBN 44
+#define	FRF_AB_GPIO12_IN_WIDTH 1
+#define	FRF_AB_GPIO11_IN_LBN 43
+#define	FRF_AB_GPIO11_IN_WIDTH 1
+#define	FRF_AB_GPIO10_IN_LBN 42
+#define	FRF_AB_GPIO10_IN_WIDTH 1
+#define	FRF_AB_GPIO9_IN_LBN 41
+#define	FRF_AB_GPIO9_IN_WIDTH 1
+#define	FRF_AB_GPIO8_IN_LBN 40
+#define	FRF_AB_GPIO8_IN_WIDTH 1
+#define	FRF_AB_GPIO15_PWRUP_VALUE_LBN 39
+#define	FRF_AB_GPIO15_PWRUP_VALUE_WIDTH 1
+#define	FRF_AB_GPIO14_PWRUP_VALUE_LBN 38
+#define	FRF_AB_GPIO14_PWRUP_VALUE_WIDTH 1
+#define	FRF_AB_GPIO13_PWRUP_VALUE_LBN 37
+#define	FRF_AB_GPIO13_PWRUP_VALUE_WIDTH 1
+#define	FRF_AB_GPIO12_PWRUP_VALUE_LBN 36
+#define	FRF_AB_GPIO12_PWRUP_VALUE_WIDTH 1
+#define	FRF_AB_GPIO11_PWRUP_VALUE_LBN 35
+#define	FRF_AB_GPIO11_PWRUP_VALUE_WIDTH 1
+#define	FRF_AB_GPIO10_PWRUP_VALUE_LBN 34
+#define	FRF_AB_GPIO10_PWRUP_VALUE_WIDTH 1
+#define	FRF_AB_GPIO9_PWRUP_VALUE_LBN 33
+#define	FRF_AB_GPIO9_PWRUP_VALUE_WIDTH 1
+#define	FRF_AB_GPIO8_PWRUP_VALUE_LBN 32
+#define	FRF_AB_GPIO8_PWRUP_VALUE_WIDTH 1
+#define	FRF_AB_CLK156_OUT_EN_LBN 31
+#define	FRF_AB_CLK156_OUT_EN_WIDTH 1
+#define	FRF_AB_USE_NIC_CLK_LBN 30
+#define	FRF_AB_USE_NIC_CLK_WIDTH 1
+#define	FRF_AB_GPIO5_OEN_LBN 29
+#define	FRF_AB_GPIO5_OEN_WIDTH 1
+#define	FRF_AB_GPIO4_OEN_LBN 28
+#define	FRF_AB_GPIO4_OEN_WIDTH 1
+#define	FRF_AB_GPIO3_OEN_LBN 27
+#define	FRF_AB_GPIO3_OEN_WIDTH 1
+#define	FRF_AB_GPIO2_OEN_LBN 26
+#define	FRF_AB_GPIO2_OEN_WIDTH 1
+#define	FRF_AB_GPIO1_OEN_LBN 25
+#define	FRF_AB_GPIO1_OEN_WIDTH 1
+#define	FRF_AB_GPIO0_OEN_LBN 24
+#define	FRF_AB_GPIO0_OEN_WIDTH 1
+#define	FRF_AB_GPIO7_OUT_LBN 23
+#define	FRF_AB_GPIO7_OUT_WIDTH 1
+#define	FRF_AB_GPIO6_OUT_LBN 22
+#define	FRF_AB_GPIO6_OUT_WIDTH 1
+#define	FRF_AB_GPIO5_OUT_LBN 21
+#define	FRF_AB_GPIO5_OUT_WIDTH 1
+#define	FRF_AB_GPIO4_OUT_LBN 20
+#define	FRF_AB_GPIO4_OUT_WIDTH 1
+#define	FRF_AB_GPIO3_OUT_LBN 19
+#define	FRF_AB_GPIO3_OUT_WIDTH 1
+#define	FRF_AB_GPIO2_OUT_LBN 18
+#define	FRF_AB_GPIO2_OUT_WIDTH 1
+#define	FRF_AB_GPIO1_OUT_LBN 17
+#define	FRF_AB_GPIO1_OUT_WIDTH 1
+#define	FRF_AB_GPIO0_OUT_LBN 16
+#define	FRF_AB_GPIO0_OUT_WIDTH 1
+#define	FRF_AB_GPIO7_IN_LBN 15
+#define	FRF_AB_GPIO7_IN_WIDTH 1
+#define	FRF_AB_GPIO6_IN_LBN 14
+#define	FRF_AB_GPIO6_IN_WIDTH 1
+#define	FRF_AB_GPIO5_IN_LBN 13
+#define	FRF_AB_GPIO5_IN_WIDTH 1
+#define	FRF_AB_GPIO4_IN_LBN 12
+#define	FRF_AB_GPIO4_IN_WIDTH 1
+#define	FRF_AB_GPIO3_IN_LBN 11
+#define	FRF_AB_GPIO3_IN_WIDTH 1
+#define	FRF_AB_GPIO2_IN_LBN 10
+#define	FRF_AB_GPIO2_IN_WIDTH 1
+#define	FRF_AB_GPIO1_IN_LBN 9
+#define	FRF_AB_GPIO1_IN_WIDTH 1
+#define	FRF_AB_GPIO0_IN_LBN 8
+#define	FRF_AB_GPIO0_IN_WIDTH 1
+#define	FRF_AB_GPIO7_PWRUP_VALUE_LBN 7
+#define	FRF_AB_GPIO7_PWRUP_VALUE_WIDTH 1
+#define	FRF_AB_GPIO6_PWRUP_VALUE_LBN 6
+#define	FRF_AB_GPIO6_PWRUP_VALUE_WIDTH 1
+#define	FRF_AB_GPIO5_PWRUP_VALUE_LBN 5
+#define	FRF_AB_GPIO5_PWRUP_VALUE_WIDTH 1
+#define	FRF_AB_GPIO4_PWRUP_VALUE_LBN 4
+#define	FRF_AB_GPIO4_PWRUP_VALUE_WIDTH 1
+#define	FRF_AB_GPIO3_PWRUP_VALUE_LBN 3
+#define	FRF_AB_GPIO3_PWRUP_VALUE_WIDTH 1
+#define	FRF_AB_GPIO2_PWRUP_VALUE_LBN 2
+#define	FRF_AB_GPIO2_PWRUP_VALUE_WIDTH 1
+#define	FRF_AB_GPIO1_PWRUP_VALUE_LBN 1
+#define	FRF_AB_GPIO1_PWRUP_VALUE_WIDTH 1
+#define	FRF_AB_GPIO0_PWRUP_VALUE_LBN 0
+#define	FRF_AB_GPIO0_PWRUP_VALUE_WIDTH 1
+
+/* GLB_CTL_REG: Global control register */
+#define	FR_AB_GLB_CTL 0x00000220
+#define	FRF_AB_EXT_PHY_RST_CTL_LBN 63
+#define	FRF_AB_EXT_PHY_RST_CTL_WIDTH 1
+#define	FRF_AB_XAUI_SD_RST_CTL_LBN 62
+#define	FRF_AB_XAUI_SD_RST_CTL_WIDTH 1
+#define	FRF_AB_PCIE_SD_RST_CTL_LBN 61
+#define	FRF_AB_PCIE_SD_RST_CTL_WIDTH 1
+#define	FRF_AA_PCIX_RST_CTL_LBN 60
+#define	FRF_AA_PCIX_RST_CTL_WIDTH 1
+#define	FRF_BB_BIU_RST_CTL_LBN 60
+#define	FRF_BB_BIU_RST_CTL_WIDTH 1
+#define	FRF_AB_PCIE_STKY_RST_CTL_LBN 59
+#define	FRF_AB_PCIE_STKY_RST_CTL_WIDTH 1
+#define	FRF_AB_PCIE_NSTKY_RST_CTL_LBN 58
+#define	FRF_AB_PCIE_NSTKY_RST_CTL_WIDTH 1
+#define	FRF_AB_PCIE_CORE_RST_CTL_LBN 57
+#define	FRF_AB_PCIE_CORE_RST_CTL_WIDTH 1
+#define	FRF_AB_XGRX_RST_CTL_LBN 56
+#define	FRF_AB_XGRX_RST_CTL_WIDTH 1
+#define	FRF_AB_XGTX_RST_CTL_LBN 55
+#define	FRF_AB_XGTX_RST_CTL_WIDTH 1
+#define	FRF_AB_EM_RST_CTL_LBN 54
+#define	FRF_AB_EM_RST_CTL_WIDTH 1
+#define	FRF_AB_EV_RST_CTL_LBN 53
+#define	FRF_AB_EV_RST_CTL_WIDTH 1
+#define	FRF_AB_SR_RST_CTL_LBN 52
+#define	FRF_AB_SR_RST_CTL_WIDTH 1
+#define	FRF_AB_RX_RST_CTL_LBN 51
+#define	FRF_AB_RX_RST_CTL_WIDTH 1
+#define	FRF_AB_TX_RST_CTL_LBN 50
+#define	FRF_AB_TX_RST_CTL_WIDTH 1
+#define	FRF_AB_EE_RST_CTL_LBN 49
+#define	FRF_AB_EE_RST_CTL_WIDTH 1
+#define	FRF_AB_CS_RST_CTL_LBN 48
+#define	FRF_AB_CS_RST_CTL_WIDTH 1
+#define	FRF_AB_HOT_RST_CTL_LBN 40
+#define	FRF_AB_HOT_RST_CTL_WIDTH 2
+#define	FRF_AB_RST_EXT_PHY_LBN 31
+#define	FRF_AB_RST_EXT_PHY_WIDTH 1
+#define	FRF_AB_RST_XAUI_SD_LBN 30
+#define	FRF_AB_RST_XAUI_SD_WIDTH 1
+#define	FRF_AB_RST_PCIE_SD_LBN 29
+#define	FRF_AB_RST_PCIE_SD_WIDTH 1
+#define	FRF_AA_RST_PCIX_LBN 28
+#define	FRF_AA_RST_PCIX_WIDTH 1
+#define	FRF_BB_RST_BIU_LBN 28
+#define	FRF_BB_RST_BIU_WIDTH 1
+#define	FRF_AB_RST_PCIE_STKY_LBN 27
+#define	FRF_AB_RST_PCIE_STKY_WIDTH 1
+#define	FRF_AB_RST_PCIE_NSTKY_LBN 26
+#define	FRF_AB_RST_PCIE_NSTKY_WIDTH 1
+#define	FRF_AB_RST_PCIE_CORE_LBN 25
+#define	FRF_AB_RST_PCIE_CORE_WIDTH 1
+#define	FRF_AB_RST_XGRX_LBN 24
+#define	FRF_AB_RST_XGRX_WIDTH 1
+#define	FRF_AB_RST_XGTX_LBN 23
+#define	FRF_AB_RST_XGTX_WIDTH 1
+#define	FRF_AB_RST_EM_LBN 22
+#define	FRF_AB_RST_EM_WIDTH 1
+#define	FRF_AB_RST_EV_LBN 21
+#define	FRF_AB_RST_EV_WIDTH 1
+#define	FRF_AB_RST_SR_LBN 20
+#define	FRF_AB_RST_SR_WIDTH 1
+#define	FRF_AB_RST_RX_LBN 19
+#define	FRF_AB_RST_RX_WIDTH 1
+#define	FRF_AB_RST_TX_LBN 18
+#define	FRF_AB_RST_TX_WIDTH 1
+#define	FRF_AB_RST_SF_LBN 17
+#define	FRF_AB_RST_SF_WIDTH 1
+#define	FRF_AB_RST_CS_LBN 16
+#define	FRF_AB_RST_CS_WIDTH 1
+#define	FRF_AB_INT_RST_DUR_LBN 4
+#define	FRF_AB_INT_RST_DUR_WIDTH 3
+#define	FRF_AB_EXT_PHY_RST_DUR_LBN 1
+#define	FRF_AB_EXT_PHY_RST_DUR_WIDTH 3
+#define	FFE_AB_EXT_PHY_RST_DUR_10240US 7
+#define	FFE_AB_EXT_PHY_RST_DUR_5120US 6
+#define	FFE_AB_EXT_PHY_RST_DUR_2560US 5
+#define	FFE_AB_EXT_PHY_RST_DUR_1280US 4
+#define	FFE_AB_EXT_PHY_RST_DUR_640US 3
+#define	FFE_AB_EXT_PHY_RST_DUR_320US 2
+#define	FFE_AB_EXT_PHY_RST_DUR_160US 1
+#define	FFE_AB_EXT_PHY_RST_DUR_80US 0
+#define	FRF_AB_SWRST_LBN 0
+#define	FRF_AB_SWRST_WIDTH 1
+
+/* FATAL_INTR_REG_KER: Fatal interrupt register for Kernel */
+#define	FR_AZ_FATAL_INTR_KER 0x00000230
+#define	FRF_CZ_SRAM_PERR_INT_P_KER_EN_LBN 44
+#define	FRF_CZ_SRAM_PERR_INT_P_KER_EN_WIDTH 1
+#define	FRF_AB_PCI_BUSERR_INT_KER_EN_LBN 43
+#define	FRF_AB_PCI_BUSERR_INT_KER_EN_WIDTH 1
+#define	FRF_CZ_MBU_PERR_INT_KER_EN_LBN 43
+#define	FRF_CZ_MBU_PERR_INT_KER_EN_WIDTH 1
+#define	FRF_AZ_SRAM_OOB_INT_KER_EN_LBN 42
+#define	FRF_AZ_SRAM_OOB_INT_KER_EN_WIDTH 1
+#define	FRF_AZ_BUFID_OOB_INT_KER_EN_LBN 41
+#define	FRF_AZ_BUFID_OOB_INT_KER_EN_WIDTH 1
+#define	FRF_AZ_MEM_PERR_INT_KER_EN_LBN 40
+#define	FRF_AZ_MEM_PERR_INT_KER_EN_WIDTH 1
+#define	FRF_AZ_RBUF_OWN_INT_KER_EN_LBN 39
+#define	FRF_AZ_RBUF_OWN_INT_KER_EN_WIDTH 1
+#define	FRF_AZ_TBUF_OWN_INT_KER_EN_LBN 38
+#define	FRF_AZ_TBUF_OWN_INT_KER_EN_WIDTH 1
+#define	FRF_AZ_RDESCQ_OWN_INT_KER_EN_LBN 37
+#define	FRF_AZ_RDESCQ_OWN_INT_KER_EN_WIDTH 1
+#define	FRF_AZ_TDESCQ_OWN_INT_KER_EN_LBN 36
+#define	FRF_AZ_TDESCQ_OWN_INT_KER_EN_WIDTH 1
+#define	FRF_AZ_EVQ_OWN_INT_KER_EN_LBN 35
+#define	FRF_AZ_EVQ_OWN_INT_KER_EN_WIDTH 1
+#define	FRF_AZ_EVF_OFLO_INT_KER_EN_LBN 34
+#define	FRF_AZ_EVF_OFLO_INT_KER_EN_WIDTH 1
+#define	FRF_AZ_ILL_ADR_INT_KER_EN_LBN 33
+#define	FRF_AZ_ILL_ADR_INT_KER_EN_WIDTH 1
+#define	FRF_AZ_SRM_PERR_INT_KER_EN_LBN 32
+#define	FRF_AZ_SRM_PERR_INT_KER_EN_WIDTH 1
+#define	FRF_CZ_SRAM_PERR_INT_P_KER_LBN 12
+#define	FRF_CZ_SRAM_PERR_INT_P_KER_WIDTH 1
+#define	FRF_AB_PCI_BUSERR_INT_KER_LBN 11
+#define	FRF_AB_PCI_BUSERR_INT_KER_WIDTH 1
+#define	FRF_CZ_MBU_PERR_INT_KER_LBN 11
+#define	FRF_CZ_MBU_PERR_INT_KER_WIDTH 1
+#define	FRF_AZ_SRAM_OOB_INT_KER_LBN 10
+#define	FRF_AZ_SRAM_OOB_INT_KER_WIDTH 1
+#define	FRF_AZ_BUFID_DC_OOB_INT_KER_LBN 9
+#define	FRF_AZ_BUFID_DC_OOB_INT_KER_WIDTH 1
+#define	FRF_AZ_MEM_PERR_INT_KER_LBN 8
+#define	FRF_AZ_MEM_PERR_INT_KER_WIDTH 1
+#define	FRF_AZ_RBUF_OWN_INT_KER_LBN 7
+#define	FRF_AZ_RBUF_OWN_INT_KER_WIDTH 1
+#define	FRF_AZ_TBUF_OWN_INT_KER_LBN 6
+#define	FRF_AZ_TBUF_OWN_INT_KER_WIDTH 1
+#define	FRF_AZ_RDESCQ_OWN_INT_KER_LBN 5
+#define	FRF_AZ_RDESCQ_OWN_INT_KER_WIDTH 1
+#define	FRF_AZ_TDESCQ_OWN_INT_KER_LBN 4
+#define	FRF_AZ_TDESCQ_OWN_INT_KER_WIDTH 1
+#define	FRF_AZ_EVQ_OWN_INT_KER_LBN 3
+#define	FRF_AZ_EVQ_OWN_INT_KER_WIDTH 1
+#define	FRF_AZ_EVF_OFLO_INT_KER_LBN 2
+#define	FRF_AZ_EVF_OFLO_INT_KER_WIDTH 1
+#define	FRF_AZ_ILL_ADR_INT_KER_LBN 1
+#define	FRF_AZ_ILL_ADR_INT_KER_WIDTH 1
+#define	FRF_AZ_SRM_PERR_INT_KER_LBN 0
+#define	FRF_AZ_SRM_PERR_INT_KER_WIDTH 1
+
+/* FATAL_INTR_REG_CHAR: Fatal interrupt register for Char */
+#define	FR_BZ_FATAL_INTR_CHAR 0x00000240
+#define	FRF_CZ_SRAM_PERR_INT_P_CHAR_EN_LBN 44
+#define	FRF_CZ_SRAM_PERR_INT_P_CHAR_EN_WIDTH 1
+#define	FRF_BB_PCI_BUSERR_INT_CHAR_EN_LBN 43
+#define	FRF_BB_PCI_BUSERR_INT_CHAR_EN_WIDTH 1
+#define	FRF_CZ_MBU_PERR_INT_CHAR_EN_LBN 43
+#define	FRF_CZ_MBU_PERR_INT_CHAR_EN_WIDTH 1
+#define	FRF_BZ_SRAM_OOB_INT_CHAR_EN_LBN 42
+#define	FRF_BZ_SRAM_OOB_INT_CHAR_EN_WIDTH 1
+#define	FRF_BZ_BUFID_OOB_INT_CHAR_EN_LBN 41
+#define	FRF_BZ_BUFID_OOB_INT_CHAR_EN_WIDTH 1
+#define	FRF_BZ_MEM_PERR_INT_CHAR_EN_LBN 40
+#define	FRF_BZ_MEM_PERR_INT_CHAR_EN_WIDTH 1
+#define	FRF_BZ_RBUF_OWN_INT_CHAR_EN_LBN 39
+#define	FRF_BZ_RBUF_OWN_INT_CHAR_EN_WIDTH 1
+#define	FRF_BZ_TBUF_OWN_INT_CHAR_EN_LBN 38
+#define	FRF_BZ_TBUF_OWN_INT_CHAR_EN_WIDTH 1
+#define	FRF_BZ_RDESCQ_OWN_INT_CHAR_EN_LBN 37
+#define	FRF_BZ_RDESCQ_OWN_INT_CHAR_EN_WIDTH 1
+#define	FRF_BZ_TDESCQ_OWN_INT_CHAR_EN_LBN 36
+#define	FRF_BZ_TDESCQ_OWN_INT_CHAR_EN_WIDTH 1
+#define	FRF_BZ_EVQ_OWN_INT_CHAR_EN_LBN 35
+#define	FRF_BZ_EVQ_OWN_INT_CHAR_EN_WIDTH 1
+#define	FRF_BZ_EVF_OFLO_INT_CHAR_EN_LBN 34
+#define	FRF_BZ_EVF_OFLO_INT_CHAR_EN_WIDTH 1
+#define	FRF_BZ_ILL_ADR_INT_CHAR_EN_LBN 33
+#define	FRF_BZ_ILL_ADR_INT_CHAR_EN_WIDTH 1
+#define	FRF_BZ_SRM_PERR_INT_CHAR_EN_LBN 32
+#define	FRF_BZ_SRM_PERR_INT_CHAR_EN_WIDTH 1
+#define	FRF_CZ_SRAM_PERR_INT_P_CHAR_LBN 12
+#define	FRF_CZ_SRAM_PERR_INT_P_CHAR_WIDTH 1
+#define	FRF_BB_PCI_BUSERR_INT_CHAR_LBN 11
+#define	FRF_BB_PCI_BUSERR_INT_CHAR_WIDTH 1
+#define	FRF_CZ_MBU_PERR_INT_CHAR_LBN 11
+#define	FRF_CZ_MBU_PERR_INT_CHAR_WIDTH 1
+#define	FRF_BZ_SRAM_OOB_INT_CHAR_LBN 10
+#define	FRF_BZ_SRAM_OOB_INT_CHAR_WIDTH 1
+#define	FRF_BZ_BUFID_DC_OOB_INT_CHAR_LBN 9
+#define	FRF_BZ_BUFID_DC_OOB_INT_CHAR_WIDTH 1
+#define	FRF_BZ_MEM_PERR_INT_CHAR_LBN 8
+#define	FRF_BZ_MEM_PERR_INT_CHAR_WIDTH 1
+#define	FRF_BZ_RBUF_OWN_INT_CHAR_LBN 7
+#define	FRF_BZ_RBUF_OWN_INT_CHAR_WIDTH 1
+#define	FRF_BZ_TBUF_OWN_INT_CHAR_LBN 6
+#define	FRF_BZ_TBUF_OWN_INT_CHAR_WIDTH 1
+#define	FRF_BZ_RDESCQ_OWN_INT_CHAR_LBN 5
+#define	FRF_BZ_RDESCQ_OWN_INT_CHAR_WIDTH 1
+#define	FRF_BZ_TDESCQ_OWN_INT_CHAR_LBN 4
+#define	FRF_BZ_TDESCQ_OWN_INT_CHAR_WIDTH 1
+#define	FRF_BZ_EVQ_OWN_INT_CHAR_LBN 3
+#define	FRF_BZ_EVQ_OWN_INT_CHAR_WIDTH 1
+#define	FRF_BZ_EVF_OFLO_INT_CHAR_LBN 2
+#define	FRF_BZ_EVF_OFLO_INT_CHAR_WIDTH 1
+#define	FRF_BZ_ILL_ADR_INT_CHAR_LBN 1
+#define	FRF_BZ_ILL_ADR_INT_CHAR_WIDTH 1
+#define	FRF_BZ_SRM_PERR_INT_CHAR_LBN 0
+#define	FRF_BZ_SRM_PERR_INT_CHAR_WIDTH 1
+
+/* DP_CTRL_REG: Datapath control register */
+#define	FR_BZ_DP_CTRL 0x00000250
+#define	FRF_BZ_FLS_EVQ_ID_LBN 0
+#define	FRF_BZ_FLS_EVQ_ID_WIDTH 12
+
+/* MEM_STAT_REG: Memory status register */
+#define	FR_AZ_MEM_STAT 0x00000260
+#define	FRF_AB_MEM_PERR_VEC_LBN 53
+#define	FRF_AB_MEM_PERR_VEC_WIDTH 38
+#define	FRF_AB_MBIST_CORR_LBN 38
+#define	FRF_AB_MBIST_CORR_WIDTH 15
+#define	FRF_AB_MBIST_ERR_LBN 0
+#define	FRF_AB_MBIST_ERR_WIDTH 40
+#define	FRF_CZ_MEM_PERR_VEC_LBN 0
+#define	FRF_CZ_MEM_PERR_VEC_WIDTH 35
+
+/* CS_DEBUG_REG: Debug register */
+#define	FR_AZ_CS_DEBUG 0x00000270
+#define	FRF_AB_GLB_DEBUG2_SEL_LBN 50
+#define	FRF_AB_GLB_DEBUG2_SEL_WIDTH 3
+#define	FRF_AB_DEBUG_BLK_SEL2_LBN 47
+#define	FRF_AB_DEBUG_BLK_SEL2_WIDTH 3
+#define	FRF_AB_DEBUG_BLK_SEL1_LBN 44
+#define	FRF_AB_DEBUG_BLK_SEL1_WIDTH 3
+#define	FRF_AB_DEBUG_BLK_SEL0_LBN 41
+#define	FRF_AB_DEBUG_BLK_SEL0_WIDTH 3
+#define	FRF_CZ_CS_PORT_NUM_LBN 40
+#define	FRF_CZ_CS_PORT_NUM_WIDTH 2
+#define	FRF_AB_MISC_DEBUG_ADDR_LBN 36
+#define	FRF_AB_MISC_DEBUG_ADDR_WIDTH 5
+#define	FRF_AB_SERDES_DEBUG_ADDR_LBN 31
+#define	FRF_AB_SERDES_DEBUG_ADDR_WIDTH 5
+#define	FRF_CZ_CS_PORT_FPE_LBN 1
+#define	FRF_CZ_CS_PORT_FPE_WIDTH 35
+#define	FRF_AB_EM_DEBUG_ADDR_LBN 26
+#define	FRF_AB_EM_DEBUG_ADDR_WIDTH 5
+#define	FRF_AB_SR_DEBUG_ADDR_LBN 21
+#define	FRF_AB_SR_DEBUG_ADDR_WIDTH 5
+#define	FRF_AB_EV_DEBUG_ADDR_LBN 16
+#define	FRF_AB_EV_DEBUG_ADDR_WIDTH 5
+#define	FRF_AB_RX_DEBUG_ADDR_LBN 11
+#define	FRF_AB_RX_DEBUG_ADDR_WIDTH 5
+#define	FRF_AB_TX_DEBUG_ADDR_LBN 6
+#define	FRF_AB_TX_DEBUG_ADDR_WIDTH 5
+#define	FRF_AB_CS_BIU_DEBUG_ADDR_LBN 1
+#define	FRF_AB_CS_BIU_DEBUG_ADDR_WIDTH 5
+#define	FRF_AZ_CS_DEBUG_EN_LBN 0
+#define	FRF_AZ_CS_DEBUG_EN_WIDTH 1
+
+/* DRIVER_REG: Driver scratch register [0-7] */
+#define	FR_AZ_DRIVER 0x00000280
+#define	FR_AZ_DRIVER_STEP 16
+#define	FR_AZ_DRIVER_ROWS 8
+#define	FRF_AZ_DRIVER_DW0_LBN 0
+#define	FRF_AZ_DRIVER_DW0_WIDTH 32
+
+/* ALTERA_BUILD_REG: Altera build register */
+#define	FR_AZ_ALTERA_BUILD 0x00000300
+#define	FRF_AZ_ALTERA_BUILD_VER_LBN 0
+#define	FRF_AZ_ALTERA_BUILD_VER_WIDTH 32
+
+/* CSR_SPARE_REG: Spare register */
+#define	FR_AZ_CSR_SPARE 0x00000310
+#define	FRF_AB_MEM_PERR_EN_LBN 64
+#define	FRF_AB_MEM_PERR_EN_WIDTH 38
+#define	FRF_CZ_MEM_PERR_EN_LBN 64
+#define	FRF_CZ_MEM_PERR_EN_WIDTH 35
+#define	FRF_AB_MEM_PERR_EN_TX_DATA_LBN 72
+#define	FRF_AB_MEM_PERR_EN_TX_DATA_WIDTH 2
+#define	FRF_AZ_CSR_SPARE_BITS_LBN 0
+#define	FRF_AZ_CSR_SPARE_BITS_WIDTH 32
+
+/* PCIE_SD_CTL0123_REG: PCIE SerDes control register 0 to 3 */
+#define	FR_AB_PCIE_SD_CTL0123 0x00000320
+#define	FRF_AB_PCIE_TESTSIG_H_LBN 96
+#define	FRF_AB_PCIE_TESTSIG_H_WIDTH 19
+#define	FRF_AB_PCIE_TESTSIG_L_LBN 64
+#define	FRF_AB_PCIE_TESTSIG_L_WIDTH 19
+#define	FRF_AB_PCIE_OFFSET_LBN 56
+#define	FRF_AB_PCIE_OFFSET_WIDTH 8
+#define	FRF_AB_PCIE_OFFSETEN_H_LBN 55
+#define	FRF_AB_PCIE_OFFSETEN_H_WIDTH 1
+#define	FRF_AB_PCIE_OFFSETEN_L_LBN 54
+#define	FRF_AB_PCIE_OFFSETEN_L_WIDTH 1
+#define	FRF_AB_PCIE_HIVMODE_H_LBN 53
+#define	FRF_AB_PCIE_HIVMODE_H_WIDTH 1
+#define	FRF_AB_PCIE_HIVMODE_L_LBN 52
+#define	FRF_AB_PCIE_HIVMODE_L_WIDTH 1
+#define	FRF_AB_PCIE_PARRESET_H_LBN 51
+#define	FRF_AB_PCIE_PARRESET_H_WIDTH 1
+#define	FRF_AB_PCIE_PARRESET_L_LBN 50
+#define	FRF_AB_PCIE_PARRESET_L_WIDTH 1
+#define	FRF_AB_PCIE_LPBKWDRV_H_LBN 49
+#define	FRF_AB_PCIE_LPBKWDRV_H_WIDTH 1
+#define	FRF_AB_PCIE_LPBKWDRV_L_LBN 48
+#define	FRF_AB_PCIE_LPBKWDRV_L_WIDTH 1
+#define	FRF_AB_PCIE_LPBK_LBN 40
+#define	FRF_AB_PCIE_LPBK_WIDTH 8
+#define	FRF_AB_PCIE_PARLPBK_LBN 32
+#define	FRF_AB_PCIE_PARLPBK_WIDTH 8
+#define	FRF_AB_PCIE_RXTERMADJ_H_LBN 30
+#define	FRF_AB_PCIE_RXTERMADJ_H_WIDTH 2
+#define	FRF_AB_PCIE_RXTERMADJ_L_LBN 28
+#define	FRF_AB_PCIE_RXTERMADJ_L_WIDTH 2
+#define	FFE_AB_PCIE_RXTERMADJ_MIN15PCNT 3
+#define	FFE_AB_PCIE_RXTERMADJ_PL10PCNT 2
+#define	FFE_AB_PCIE_RXTERMADJ_MIN17PCNT 1
+#define	FFE_AB_PCIE_RXTERMADJ_NOMNL 0
+#define	FRF_AB_PCIE_TXTERMADJ_H_LBN 26
+#define	FRF_AB_PCIE_TXTERMADJ_H_WIDTH 2
+#define	FRF_AB_PCIE_TXTERMADJ_L_LBN 24
+#define	FRF_AB_PCIE_TXTERMADJ_L_WIDTH 2
+#define	FFE_AB_PCIE_TXTERMADJ_MIN15PCNT 3
+#define	FFE_AB_PCIE_TXTERMADJ_PL10PCNT 2
+#define	FFE_AB_PCIE_TXTERMADJ_MIN17PCNT 1
+#define	FFE_AB_PCIE_TXTERMADJ_NOMNL 0
+#define	FRF_AB_PCIE_RXEQCTL_H_LBN 18
+#define	FRF_AB_PCIE_RXEQCTL_H_WIDTH 2
+#define	FRF_AB_PCIE_RXEQCTL_L_LBN 16
+#define	FRF_AB_PCIE_RXEQCTL_L_WIDTH 2
+#define	FFE_AB_PCIE_RXEQCTL_OFF_ALT 3
+#define	FFE_AB_PCIE_RXEQCTL_OFF 2
+#define	FFE_AB_PCIE_RXEQCTL_MIN 1
+#define	FFE_AB_PCIE_RXEQCTL_MAX 0
+#define	FRF_AB_PCIE_HIDRV_LBN 8
+#define	FRF_AB_PCIE_HIDRV_WIDTH 8
+#define	FRF_AB_PCIE_LODRV_LBN 0
+#define	FRF_AB_PCIE_LODRV_WIDTH 8
+
+/* PCIE_SD_CTL45_REG: PCIE SerDes control register 4 and 5 */
+#define	FR_AB_PCIE_SD_CTL45 0x00000330
+#define	FRF_AB_PCIE_DTX7_LBN 60
+#define	FRF_AB_PCIE_DTX7_WIDTH 4
+#define	FRF_AB_PCIE_DTX6_LBN 56
+#define	FRF_AB_PCIE_DTX6_WIDTH 4
+#define	FRF_AB_PCIE_DTX5_LBN 52
+#define	FRF_AB_PCIE_DTX5_WIDTH 4
+#define	FRF_AB_PCIE_DTX4_LBN 48
+#define	FRF_AB_PCIE_DTX4_WIDTH 4
+#define	FRF_AB_PCIE_DTX3_LBN 44
+#define	FRF_AB_PCIE_DTX3_WIDTH 4
+#define	FRF_AB_PCIE_DTX2_LBN 40
+#define	FRF_AB_PCIE_DTX2_WIDTH 4
+#define	FRF_AB_PCIE_DTX1_LBN 36
+#define	FRF_AB_PCIE_DTX1_WIDTH 4
+#define	FRF_AB_PCIE_DTX0_LBN 32
+#define	FRF_AB_PCIE_DTX0_WIDTH 4
+#define	FRF_AB_PCIE_DEQ7_LBN 28
+#define	FRF_AB_PCIE_DEQ7_WIDTH 4
+#define	FRF_AB_PCIE_DEQ6_LBN 24
+#define	FRF_AB_PCIE_DEQ6_WIDTH 4
+#define	FRF_AB_PCIE_DEQ5_LBN 20
+#define	FRF_AB_PCIE_DEQ5_WIDTH 4
+#define	FRF_AB_PCIE_DEQ4_LBN 16
+#define	FRF_AB_PCIE_DEQ4_WIDTH 4
+#define	FRF_AB_PCIE_DEQ3_LBN 12
+#define	FRF_AB_PCIE_DEQ3_WIDTH 4
+#define	FRF_AB_PCIE_DEQ2_LBN 8
+#define	FRF_AB_PCIE_DEQ2_WIDTH 4
+#define	FRF_AB_PCIE_DEQ1_LBN 4
+#define	FRF_AB_PCIE_DEQ1_WIDTH 4
+#define	FRF_AB_PCIE_DEQ0_LBN 0
+#define	FRF_AB_PCIE_DEQ0_WIDTH 4
+
+/* PCIE_PCS_CTL_STAT_REG: PCIE PCS control and status register */
+#define	FR_AB_PCIE_PCS_CTL_STAT 0x00000340
+#define	FRF_AB_PCIE_PRBSERRCOUNT0_H_LBN 52
+#define	FRF_AB_PCIE_PRBSERRCOUNT0_H_WIDTH 4
+#define	FRF_AB_PCIE_PRBSERRCOUNT0_L_LBN 48
+#define	FRF_AB_PCIE_PRBSERRCOUNT0_L_WIDTH 4
+#define	FRF_AB_PCIE_PRBSERR_LBN 40
+#define	FRF_AB_PCIE_PRBSERR_WIDTH 8
+#define	FRF_AB_PCIE_PRBSERRH0_LBN 32
+#define	FRF_AB_PCIE_PRBSERRH0_WIDTH 8
+#define	FRF_AB_PCIE_FASTINIT_H_LBN 15
+#define	FRF_AB_PCIE_FASTINIT_H_WIDTH 1
+#define	FRF_AB_PCIE_FASTINIT_L_LBN 14
+#define	FRF_AB_PCIE_FASTINIT_L_WIDTH 1
+#define	FRF_AB_PCIE_CTCDISABLE_H_LBN 13
+#define	FRF_AB_PCIE_CTCDISABLE_H_WIDTH 1
+#define	FRF_AB_PCIE_CTCDISABLE_L_LBN 12
+#define	FRF_AB_PCIE_CTCDISABLE_L_WIDTH 1
+#define	FRF_AB_PCIE_PRBSSYNC_H_LBN 11
+#define	FRF_AB_PCIE_PRBSSYNC_H_WIDTH 1
+#define	FRF_AB_PCIE_PRBSSYNC_L_LBN 10
+#define	FRF_AB_PCIE_PRBSSYNC_L_WIDTH 1
+#define	FRF_AB_PCIE_PRBSERRACK_H_LBN 9
+#define	FRF_AB_PCIE_PRBSERRACK_H_WIDTH 1
+#define	FRF_AB_PCIE_PRBSERRACK_L_LBN 8
+#define	FRF_AB_PCIE_PRBSERRACK_L_WIDTH 1
+#define	FRF_AB_PCIE_PRBSSEL_LBN 0
+#define	FRF_AB_PCIE_PRBSSEL_WIDTH 8
+
+/* DEBUG_DATA_OUT_REG: Live Debug and Debug 2 out ports */
+#define	FR_BB_DEBUG_DATA_OUT 0x00000350
+#define	FRF_BB_DEBUG2_PORT_LBN 25
+#define	FRF_BB_DEBUG2_PORT_WIDTH 15
+#define	FRF_BB_DEBUG1_PORT_LBN 0
+#define	FRF_BB_DEBUG1_PORT_WIDTH 25
+
+/* EVQ_RPTR_REGP0: Event queue read pointer register */
+#define	FR_BZ_EVQ_RPTR_P0 0x00000400
+#define	FR_BZ_EVQ_RPTR_P0_STEP 8192
+#define	FR_BZ_EVQ_RPTR_P0_ROWS 1024
+/* EVQ_RPTR_REG_KER: Event queue read pointer register */
+#define	FR_AA_EVQ_RPTR_KER 0x00011b00
+#define	FR_AA_EVQ_RPTR_KER_STEP 4
+#define	FR_AA_EVQ_RPTR_KER_ROWS 4
+/* EVQ_RPTR_REG: Event queue read pointer register */
+#define	FR_BZ_EVQ_RPTR 0x00fa0000
+#define	FR_BZ_EVQ_RPTR_STEP 16
+#define	FR_BB_EVQ_RPTR_ROWS 4096
+#define	FR_CZ_EVQ_RPTR_ROWS 1024
+/* EVQ_RPTR_REGP123: Event queue read pointer register */
+#define	FR_BB_EVQ_RPTR_P123 0x01000400
+#define	FR_BB_EVQ_RPTR_P123_STEP 8192
+#define	FR_BB_EVQ_RPTR_P123_ROWS 3072
+#define	FRF_AZ_EVQ_RPTR_VLD_LBN 15
+#define	FRF_AZ_EVQ_RPTR_VLD_WIDTH 1
+#define	FRF_AZ_EVQ_RPTR_LBN 0
+#define	FRF_AZ_EVQ_RPTR_WIDTH 15
+
+/* TIMER_COMMAND_REGP0: Timer Command Registers */
+#define	FR_BZ_TIMER_COMMAND_P0 0x00000420
+#define	FR_BZ_TIMER_COMMAND_P0_STEP 8192
+#define	FR_BZ_TIMER_COMMAND_P0_ROWS 1024
+/* TIMER_COMMAND_REG_KER: Timer Command Registers */
+#define	FR_AA_TIMER_COMMAND_KER 0x00000420
+#define	FR_AA_TIMER_COMMAND_KER_STEP 8192
+#define	FR_AA_TIMER_COMMAND_KER_ROWS 4
+/* TIMER_COMMAND_REGP123: Timer Command Registers */
+#define	FR_BB_TIMER_COMMAND_P123 0x01000420
+#define	FR_BB_TIMER_COMMAND_P123_STEP 8192
+#define	FR_BB_TIMER_COMMAND_P123_ROWS 3072
+#define	FRF_CZ_TC_TIMER_MODE_LBN 14
+#define	FRF_CZ_TC_TIMER_MODE_WIDTH 2
+#define	FRF_AB_TC_TIMER_MODE_LBN 12
+#define	FRF_AB_TC_TIMER_MODE_WIDTH 2
+#define	FRF_CZ_TC_TIMER_VAL_LBN 0
+#define	FRF_CZ_TC_TIMER_VAL_WIDTH 14
+#define	FRF_AB_TC_TIMER_VAL_LBN 0
+#define	FRF_AB_TC_TIMER_VAL_WIDTH 12
+
+/* DRV_EV_REG: Driver generated event register */
+#define	FR_AZ_DRV_EV 0x00000440
+#define	FRF_AZ_DRV_EV_QID_LBN 64
+#define	FRF_AZ_DRV_EV_QID_WIDTH 12
+#define	FRF_AZ_DRV_EV_DATA_LBN 0
+#define	FRF_AZ_DRV_EV_DATA_WIDTH 64
+
+/* EVQ_CTL_REG: Event queue control register */
+#define	FR_AZ_EVQ_CTL 0x00000450
+#define	FRF_CZ_RX_EVQ_WAKEUP_MASK_LBN 15
+#define	FRF_CZ_RX_EVQ_WAKEUP_MASK_WIDTH 10
+#define	FRF_BB_RX_EVQ_WAKEUP_MASK_LBN 15
+#define	FRF_BB_RX_EVQ_WAKEUP_MASK_WIDTH 6
+#define	FRF_AZ_EVQ_OWNERR_CTL_LBN 14
+#define	FRF_AZ_EVQ_OWNERR_CTL_WIDTH 1
+#define	FRF_AZ_EVQ_FIFO_AF_TH_LBN 7
+#define	FRF_AZ_EVQ_FIFO_AF_TH_WIDTH 7
+#define	FRF_AZ_EVQ_FIFO_NOTAF_TH_LBN 0
+#define	FRF_AZ_EVQ_FIFO_NOTAF_TH_WIDTH 7
+
+/* EVQ_CNT1_REG: Event counter 1 register */
+#define	FR_AZ_EVQ_CNT1 0x00000460
+#define	FRF_AZ_EVQ_CNT_PRE_FIFO_LBN 120
+#define	FRF_AZ_EVQ_CNT_PRE_FIFO_WIDTH 7
+#define	FRF_AZ_EVQ_CNT_TOBIU_LBN 100
+#define	FRF_AZ_EVQ_CNT_TOBIU_WIDTH 20
+#define	FRF_AZ_EVQ_TX_REQ_CNT_LBN 80
+#define	FRF_AZ_EVQ_TX_REQ_CNT_WIDTH 20
+#define	FRF_AZ_EVQ_RX_REQ_CNT_LBN 60
+#define	FRF_AZ_EVQ_RX_REQ_CNT_WIDTH 20
+#define	FRF_AZ_EVQ_EM_REQ_CNT_LBN 40
+#define	FRF_AZ_EVQ_EM_REQ_CNT_WIDTH 20
+#define	FRF_AZ_EVQ_CSR_REQ_CNT_LBN 20
+#define	FRF_AZ_EVQ_CSR_REQ_CNT_WIDTH 20
+#define	FRF_AZ_EVQ_ERR_REQ_CNT_LBN 0
+#define	FRF_AZ_EVQ_ERR_REQ_CNT_WIDTH 20
+
+/* EVQ_CNT2_REG: Event counter 2 register */
+#define	FR_AZ_EVQ_CNT2 0x00000470
+#define	FRF_AZ_EVQ_UPD_REQ_CNT_LBN 104
+#define	FRF_AZ_EVQ_UPD_REQ_CNT_WIDTH 20
+#define	FRF_AZ_EVQ_CLR_REQ_CNT_LBN 84
+#define	FRF_AZ_EVQ_CLR_REQ_CNT_WIDTH 20
+#define	FRF_AZ_EVQ_RDY_CNT_LBN 80
+#define	FRF_AZ_EVQ_RDY_CNT_WIDTH 4
+#define	FRF_AZ_EVQ_WU_REQ_CNT_LBN 60
+#define	FRF_AZ_EVQ_WU_REQ_CNT_WIDTH 20
+#define	FRF_AZ_EVQ_WET_REQ_CNT_LBN 40
+#define	FRF_AZ_EVQ_WET_REQ_CNT_WIDTH 20
+#define	FRF_AZ_EVQ_INIT_REQ_CNT_LBN 20
+#define	FRF_AZ_EVQ_INIT_REQ_CNT_WIDTH 20
+#define	FRF_AZ_EVQ_TM_REQ_CNT_LBN 0
+#define	FRF_AZ_EVQ_TM_REQ_CNT_WIDTH 20
+
+/* USR_EV_REG: Event mailbox register */
+#define	FR_CZ_USR_EV 0x00000540
+#define	FR_CZ_USR_EV_STEP 8192
+#define	FR_CZ_USR_EV_ROWS 1024
+#define	FRF_CZ_USR_EV_DATA_LBN 0
+#define	FRF_CZ_USR_EV_DATA_WIDTH 32
+
+/* BUF_TBL_CFG_REG: Buffer table configuration register */
+#define	FR_AZ_BUF_TBL_CFG 0x00000600
+#define	FRF_AZ_BUF_TBL_MODE_LBN 3
+#define	FRF_AZ_BUF_TBL_MODE_WIDTH 1
+
+/* SRM_RX_DC_CFG_REG: SRAM receive descriptor cache configuration register */
+#define	FR_AZ_SRM_RX_DC_CFG 0x00000610
+#define	FRF_AZ_SRM_CLK_TMP_EN_LBN 21
+#define	FRF_AZ_SRM_CLK_TMP_EN_WIDTH 1
+#define	FRF_AZ_SRM_RX_DC_BASE_ADR_LBN 0
+#define	FRF_AZ_SRM_RX_DC_BASE_ADR_WIDTH 21
+
+/* SRM_TX_DC_CFG_REG: SRAM transmit descriptor cache configuration register */
+#define	FR_AZ_SRM_TX_DC_CFG 0x00000620
+#define	FRF_AZ_SRM_TX_DC_BASE_ADR_LBN 0
+#define	FRF_AZ_SRM_TX_DC_BASE_ADR_WIDTH 21
+
+/* SRM_CFG_REG: SRAM configuration register */
+#define	FR_AZ_SRM_CFG 0x00000630
+#define	FRF_AZ_SRM_OOB_ADR_INTEN_LBN 5
+#define	FRF_AZ_SRM_OOB_ADR_INTEN_WIDTH 1
+#define	FRF_AZ_SRM_OOB_BUF_INTEN_LBN 4
+#define	FRF_AZ_SRM_OOB_BUF_INTEN_WIDTH 1
+#define	FRF_AZ_SRM_INIT_EN_LBN 3
+#define	FRF_AZ_SRM_INIT_EN_WIDTH 1
+#define	FRF_AZ_SRM_NUM_BANK_LBN 2
+#define	FRF_AZ_SRM_NUM_BANK_WIDTH 1
+#define	FRF_AZ_SRM_BANK_SIZE_LBN 0
+#define	FRF_AZ_SRM_BANK_SIZE_WIDTH 2
+
+/* BUF_TBL_UPD_REG: Buffer table update register */
+#define	FR_AZ_BUF_TBL_UPD 0x00000650
+#define	FRF_AZ_BUF_UPD_CMD_LBN 63
+#define	FRF_AZ_BUF_UPD_CMD_WIDTH 1
+#define	FRF_AZ_BUF_CLR_CMD_LBN 62
+#define	FRF_AZ_BUF_CLR_CMD_WIDTH 1
+#define	FRF_AZ_BUF_CLR_END_ID_LBN 32
+#define	FRF_AZ_BUF_CLR_END_ID_WIDTH 20
+#define	FRF_AZ_BUF_CLR_START_ID_LBN 0
+#define	FRF_AZ_BUF_CLR_START_ID_WIDTH 20
+
+/* SRM_UPD_EVQ_REG: Buffer table update register */
+#define	FR_AZ_SRM_UPD_EVQ 0x00000660
+#define	FRF_AZ_SRM_UPD_EVQ_ID_LBN 0
+#define	FRF_AZ_SRM_UPD_EVQ_ID_WIDTH 12
+
+/* SRAM_PARITY_REG: SRAM parity register. */
+#define	FR_AZ_SRAM_PARITY 0x00000670
+#define	FRF_CZ_BYPASS_ECC_LBN 3
+#define	FRF_CZ_BYPASS_ECC_WIDTH 1
+#define	FRF_CZ_SEC_INT_LBN 2
+#define	FRF_CZ_SEC_INT_WIDTH 1
+#define	FRF_CZ_FORCE_SRAM_DOUBLE_ERR_LBN 1
+#define	FRF_CZ_FORCE_SRAM_DOUBLE_ERR_WIDTH 1
+#define	FRF_AB_FORCE_SRAM_PERR_LBN 0
+#define	FRF_AB_FORCE_SRAM_PERR_WIDTH 1
+#define	FRF_CZ_FORCE_SRAM_SINGLE_ERR_LBN 0
+#define	FRF_CZ_FORCE_SRAM_SINGLE_ERR_WIDTH 1
+
+/* RX_CFG_REG: Receive configuration register */
+#define	FR_AZ_RX_CFG 0x00000800
+#define	FRF_CZ_RX_MIN_KBUF_SIZE_LBN 72
+#define	FRF_CZ_RX_MIN_KBUF_SIZE_WIDTH 14
+#define	FRF_CZ_RX_HDR_SPLIT_EN_LBN 71
+#define	FRF_CZ_RX_HDR_SPLIT_EN_WIDTH 1
+#define	FRF_CZ_RX_HDR_SPLIT_PLD_BUF_SIZE_LBN 62
+#define	FRF_CZ_RX_HDR_SPLIT_PLD_BUF_SIZE_WIDTH 9
+#define	FRF_CZ_RX_HDR_SPLIT_HDR_BUF_SIZE_LBN 53
+#define	FRF_CZ_RX_HDR_SPLIT_HDR_BUF_SIZE_WIDTH 9
+#define	FRF_CZ_RX_PRE_RFF_IPG_LBN 49
+#define	FRF_CZ_RX_PRE_RFF_IPG_WIDTH 4
+#define	FRF_BZ_RX_TCP_SUP_LBN 48
+#define	FRF_BZ_RX_TCP_SUP_WIDTH 1
+#define	FRF_BZ_RX_INGR_EN_LBN 47
+#define	FRF_BZ_RX_INGR_EN_WIDTH 1
+#define	FRF_BZ_RX_IP_HASH_LBN 46
+#define	FRF_BZ_RX_IP_HASH_WIDTH 1
+#define	FRF_BZ_RX_HASH_ALG_LBN 45
+#define	FRF_BZ_RX_HASH_ALG_WIDTH 1
+#define	FRF_BZ_RX_HASH_INSRT_HDR_LBN 44
+#define	FRF_BZ_RX_HASH_INSRT_HDR_WIDTH 1
+#define	FRF_BZ_RX_DESC_PUSH_EN_LBN 43
+#define	FRF_BZ_RX_DESC_PUSH_EN_WIDTH 1
+#define	FRF_BZ_RX_RDW_PATCH_EN_LBN 42
+#define	FRF_BZ_RX_RDW_PATCH_EN_WIDTH 1
+#define	FRF_BB_RX_PCI_BURST_SIZE_LBN 39
+#define	FRF_BB_RX_PCI_BURST_SIZE_WIDTH 3
+#define	FRF_BZ_RX_OWNERR_CTL_LBN 38
+#define	FRF_BZ_RX_OWNERR_CTL_WIDTH 1
+#define	FRF_BZ_RX_XON_TX_TH_LBN 33
+#define	FRF_BZ_RX_XON_TX_TH_WIDTH 5
+#define	FRF_AA_RX_DESC_PUSH_EN_LBN 35
+#define	FRF_AA_RX_DESC_PUSH_EN_WIDTH 1
+#define	FRF_AA_RX_RDW_PATCH_EN_LBN 34
+#define	FRF_AA_RX_RDW_PATCH_EN_WIDTH 1
+#define	FRF_AA_RX_PCI_BURST_SIZE_LBN 31
+#define	FRF_AA_RX_PCI_BURST_SIZE_WIDTH 3
+#define	FRF_BZ_RX_XOFF_TX_TH_LBN 28
+#define	FRF_BZ_RX_XOFF_TX_TH_WIDTH 5
+#define	FRF_AA_RX_OWNERR_CTL_LBN 30
+#define	FRF_AA_RX_OWNERR_CTL_WIDTH 1
+#define	FRF_AA_RX_XON_TX_TH_LBN 25
+#define	FRF_AA_RX_XON_TX_TH_WIDTH 5
+#define	FRF_BZ_RX_USR_BUF_SIZE_LBN 19
+#define	FRF_BZ_RX_USR_BUF_SIZE_WIDTH 9
+#define	FRF_AA_RX_XOFF_TX_TH_LBN 20
+#define	FRF_AA_RX_XOFF_TX_TH_WIDTH 5
+#define	FRF_AA_RX_USR_BUF_SIZE_LBN 11
+#define	FRF_AA_RX_USR_BUF_SIZE_WIDTH 9
+#define	FRF_BZ_RX_XON_MAC_TH_LBN 10
+#define	FRF_BZ_RX_XON_MAC_TH_WIDTH 9
+#define	FRF_AA_RX_XON_MAC_TH_LBN 6
+#define	FRF_AA_RX_XON_MAC_TH_WIDTH 5
+#define	FRF_BZ_RX_XOFF_MAC_TH_LBN 1
+#define	FRF_BZ_RX_XOFF_MAC_TH_WIDTH 9
+#define	FRF_AA_RX_XOFF_MAC_TH_LBN 1
+#define	FRF_AA_RX_XOFF_MAC_TH_WIDTH 5
+#define	FRF_AZ_RX_XOFF_MAC_EN_LBN 0
+#define	FRF_AZ_RX_XOFF_MAC_EN_WIDTH 1
+
+/* RX_FILTER_CTL_REG: Receive filter control registers */
+#define	FR_BZ_RX_FILTER_CTL 0x00000810
+#define	FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT_LBN 94
+#define	FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT_WIDTH 8
+#define	FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT_LBN 86
+#define	FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT_WIDTH 8
+#define	FRF_CZ_RX_FILTER_ALL_VLAN_ETHERTYPES_LBN 85
+#define	FRF_CZ_RX_FILTER_ALL_VLAN_ETHERTYPES_WIDTH 1
+#define	FRF_CZ_RX_VLAN_MATCH_ETHERTYPE_LBN 69
+#define	FRF_CZ_RX_VLAN_MATCH_ETHERTYPE_WIDTH 16
+#define	FRF_CZ_MULTICAST_NOMATCH_Q_ID_LBN 57
+#define	FRF_CZ_MULTICAST_NOMATCH_Q_ID_WIDTH 12
+#define	FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED_LBN 56
+#define	FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED_WIDTH 1
+#define	FRF_CZ_MULTICAST_NOMATCH_IP_OVERRIDE_LBN 55
+#define	FRF_CZ_MULTICAST_NOMATCH_IP_OVERRIDE_WIDTH 1
+#define	FRF_CZ_UNICAST_NOMATCH_Q_ID_LBN 43
+#define	FRF_CZ_UNICAST_NOMATCH_Q_ID_WIDTH 12
+#define	FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED_LBN 42
+#define	FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED_WIDTH 1
+#define	FRF_CZ_UNICAST_NOMATCH_IP_OVERRIDE_LBN 41
+#define	FRF_CZ_UNICAST_NOMATCH_IP_OVERRIDE_WIDTH 1
+#define	FRF_BZ_SCATTER_ENBL_NO_MATCH_Q_LBN 40
+#define	FRF_BZ_SCATTER_ENBL_NO_MATCH_Q_WIDTH 1
+#define	FRF_BZ_UDP_FULL_SRCH_LIMIT_LBN 32
+#define	FRF_BZ_UDP_FULL_SRCH_LIMIT_WIDTH 8
+#define	FRF_BZ_NUM_KER_LBN 24
+#define	FRF_BZ_NUM_KER_WIDTH 2
+#define	FRF_BZ_UDP_WILD_SRCH_LIMIT_LBN 16
+#define	FRF_BZ_UDP_WILD_SRCH_LIMIT_WIDTH 8
+#define	FRF_BZ_TCP_WILD_SRCH_LIMIT_LBN 8
+#define	FRF_BZ_TCP_WILD_SRCH_LIMIT_WIDTH 8
+#define	FRF_BZ_TCP_FULL_SRCH_LIMIT_LBN 0
+#define	FRF_BZ_TCP_FULL_SRCH_LIMIT_WIDTH 8
+
+/* RX_FLUSH_DESCQ_REG: Receive flush descriptor queue register */
+#define	FR_AZ_RX_FLUSH_DESCQ 0x00000820
+#define	FRF_AZ_RX_FLUSH_DESCQ_CMD_LBN 24
+#define	FRF_AZ_RX_FLUSH_DESCQ_CMD_WIDTH 1
+#define	FRF_AZ_RX_FLUSH_DESCQ_LBN 0
+#define	FRF_AZ_RX_FLUSH_DESCQ_WIDTH 12
+
+/* RX_DESC_UPD_REGP0: Receive descriptor update register. */
+#define	FR_BZ_RX_DESC_UPD_P0 0x00000830
+#define	FR_BZ_RX_DESC_UPD_P0_STEP 8192
+#define	FR_BZ_RX_DESC_UPD_P0_ROWS 1024
+/* RX_DESC_UPD_REG_KER: Receive descriptor update register. */
+#define	FR_AA_RX_DESC_UPD_KER 0x00000830
+#define	FR_AA_RX_DESC_UPD_KER_STEP 8192
+#define	FR_AA_RX_DESC_UPD_KER_ROWS 4
+/* RX_DESC_UPD_REGP123: Receive descriptor update register. */
+#define	FR_BB_RX_DESC_UPD_P123 0x01000830
+#define	FR_BB_RX_DESC_UPD_P123_STEP 8192
+#define	FR_BB_RX_DESC_UPD_P123_ROWS 3072
+#define	FRF_AZ_RX_DESC_WPTR_LBN 96
+#define	FRF_AZ_RX_DESC_WPTR_WIDTH 12
+#define	FRF_AZ_RX_DESC_PUSH_CMD_LBN 95
+#define	FRF_AZ_RX_DESC_PUSH_CMD_WIDTH 1
+#define	FRF_AZ_RX_DESC_LBN 0
+#define	FRF_AZ_RX_DESC_WIDTH 64
+
+/* RX_DC_CFG_REG: Receive descriptor cache configuration register */
+#define	FR_AZ_RX_DC_CFG 0x00000840
+#define	FRF_AB_RX_MAX_PF_LBN 2
+#define	FRF_AB_RX_MAX_PF_WIDTH 2
+#define	FRF_AZ_RX_DC_SIZE_LBN 0
+#define	FRF_AZ_RX_DC_SIZE_WIDTH 2
+#define	FFE_AZ_RX_DC_SIZE_64 3
+#define	FFE_AZ_RX_DC_SIZE_32 2
+#define	FFE_AZ_RX_DC_SIZE_16 1
+#define	FFE_AZ_RX_DC_SIZE_8 0
+
+/* RX_DC_PF_WM_REG: Receive descriptor cache pre-fetch watermark register */
+#define	FR_AZ_RX_DC_PF_WM 0x00000850
+#define	FRF_AZ_RX_DC_PF_HWM_LBN 6
+#define	FRF_AZ_RX_DC_PF_HWM_WIDTH 6
+#define	FRF_AZ_RX_DC_PF_LWM_LBN 0
+#define	FRF_AZ_RX_DC_PF_LWM_WIDTH 6
+
+/* RX_RSS_TKEY_REG: RSS Toeplitz hash key */
+#define	FR_BZ_RX_RSS_TKEY 0x00000860
+#define	FRF_BZ_RX_RSS_TKEY_HI_LBN 64
+#define	FRF_BZ_RX_RSS_TKEY_HI_WIDTH 64
+#define	FRF_BZ_RX_RSS_TKEY_LO_LBN 0
+#define	FRF_BZ_RX_RSS_TKEY_LO_WIDTH 64
+
+/* RX_NODESC_DROP_REG: Receive dropped packet counter register */
+#define	FR_AZ_RX_NODESC_DROP 0x00000880
+#define	FRF_CZ_RX_NODESC_DROP_CNT_LBN 0
+#define	FRF_CZ_RX_NODESC_DROP_CNT_WIDTH 32
+#define	FRF_AB_RX_NODESC_DROP_CNT_LBN 0
+#define	FRF_AB_RX_NODESC_DROP_CNT_WIDTH 16
+
+/* RX_SELF_RST_REG: Receive self reset register */
+#define	FR_AA_RX_SELF_RST 0x00000890
+#define	FRF_AA_RX_ISCSI_DIS_LBN 17
+#define	FRF_AA_RX_ISCSI_DIS_WIDTH 1
+#define	FRF_AA_RX_SW_RST_REG_LBN 16
+#define	FRF_AA_RX_SW_RST_REG_WIDTH 1
+#define FRF_AA_RX_NODESC_WAIT_DIS_LBN 9
+#define FRF_AA_RX_NODESC_WAIT_DIS_WIDTH 1
+#define	FRF_AA_RX_SELF_RST_EN_LBN 8
+#define	FRF_AA_RX_SELF_RST_EN_WIDTH 1
+#define	FRF_AA_RX_MAX_PF_LAT_LBN 4
+#define	FRF_AA_RX_MAX_PF_LAT_WIDTH 4
+#define	FRF_AA_RX_MAX_LU_LAT_LBN 0
+#define	FRF_AA_RX_MAX_LU_LAT_WIDTH 4
+
+/* RX_DEBUG_REG: undocumented register */
+#define	FR_AZ_RX_DEBUG 0x000008a0
+#define	FRF_AZ_RX_DEBUG_LBN 0
+#define	FRF_AZ_RX_DEBUG_WIDTH 64
+
+/* RX_PUSH_DROP_REG: Receive descriptor push dropped counter register */
+#define	FR_AZ_RX_PUSH_DROP 0x000008b0
+#define	FRF_AZ_RX_PUSH_DROP_CNT_LBN 0
+#define	FRF_AZ_RX_PUSH_DROP_CNT_WIDTH 32
+
+/* RX_RSS_IPV6_REG1: IPv6 RSS Toeplitz hash key low bytes */
+#define	FR_CZ_RX_RSS_IPV6_REG1 0x000008d0
+#define	FRF_CZ_RX_RSS_IPV6_TKEY_LO_LBN 0
+#define	FRF_CZ_RX_RSS_IPV6_TKEY_LO_WIDTH 128
+
+/* RX_RSS_IPV6_REG2: IPv6 RSS Toeplitz hash key middle bytes */
+#define	FR_CZ_RX_RSS_IPV6_REG2 0x000008e0
+#define	FRF_CZ_RX_RSS_IPV6_TKEY_MID_LBN 0
+#define	FRF_CZ_RX_RSS_IPV6_TKEY_MID_WIDTH 128
+
+/* RX_RSS_IPV6_REG3: IPv6 RSS Toeplitz hash key upper bytes and IPv6 RSS settings */
+#define	FR_CZ_RX_RSS_IPV6_REG3 0x000008f0
+#define	FRF_CZ_RX_RSS_IPV6_THASH_ENABLE_LBN 66
+#define	FRF_CZ_RX_RSS_IPV6_THASH_ENABLE_WIDTH 1
+#define	FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE_LBN 65
+#define	FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE_WIDTH 1
+#define	FRF_CZ_RX_RSS_IPV6_TCP_SUPPRESS_LBN 64
+#define	FRF_CZ_RX_RSS_IPV6_TCP_SUPPRESS_WIDTH 1
+#define	FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN 0
+#define	FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH 64
+
+/* TX_FLUSH_DESCQ_REG: Transmit flush descriptor queue register */
+#define	FR_AZ_TX_FLUSH_DESCQ 0x00000a00
+#define	FRF_AZ_TX_FLUSH_DESCQ_CMD_LBN 12
+#define	FRF_AZ_TX_FLUSH_DESCQ_CMD_WIDTH 1
+#define	FRF_AZ_TX_FLUSH_DESCQ_LBN 0
+#define	FRF_AZ_TX_FLUSH_DESCQ_WIDTH 12
+
+/* TX_DESC_UPD_REGP0: Transmit descriptor update register. */
+#define	FR_BZ_TX_DESC_UPD_P0 0x00000a10
+#define	FR_BZ_TX_DESC_UPD_P0_STEP 8192
+#define	FR_BZ_TX_DESC_UPD_P0_ROWS 1024
+/* TX_DESC_UPD_REG_KER: Transmit descriptor update register. */
+#define	FR_AA_TX_DESC_UPD_KER 0x00000a10
+#define	FR_AA_TX_DESC_UPD_KER_STEP 8192
+#define	FR_AA_TX_DESC_UPD_KER_ROWS 8
+/* TX_DESC_UPD_REGP123: Transmit descriptor update register. */
+#define	FR_BB_TX_DESC_UPD_P123 0x01000a10
+#define	FR_BB_TX_DESC_UPD_P123_STEP 8192
+#define	FR_BB_TX_DESC_UPD_P123_ROWS 3072
+#define	FRF_AZ_TX_DESC_WPTR_LBN 96
+#define	FRF_AZ_TX_DESC_WPTR_WIDTH 12
+#define	FRF_AZ_TX_DESC_PUSH_CMD_LBN 95
+#define	FRF_AZ_TX_DESC_PUSH_CMD_WIDTH 1
+#define	FRF_AZ_TX_DESC_LBN 0
+#define	FRF_AZ_TX_DESC_WIDTH 95
+
+/* TX_DC_CFG_REG: Transmit descriptor cache configuration register */
+#define	FR_AZ_TX_DC_CFG 0x00000a20
+#define	FRF_AZ_TX_DC_SIZE_LBN 0
+#define	FRF_AZ_TX_DC_SIZE_WIDTH 2
+#define	FFE_AZ_TX_DC_SIZE_32 2
+#define	FFE_AZ_TX_DC_SIZE_16 1
+#define	FFE_AZ_TX_DC_SIZE_8 0
+
+/* TX_CHKSM_CFG_REG: Transmit checksum configuration register */
+#define	FR_AA_TX_CHKSM_CFG 0x00000a30
+#define	FRF_AA_TX_Q_CHKSM_DIS_96_127_LBN 96
+#define	FRF_AA_TX_Q_CHKSM_DIS_96_127_WIDTH 32
+#define	FRF_AA_TX_Q_CHKSM_DIS_64_95_LBN 64
+#define	FRF_AA_TX_Q_CHKSM_DIS_64_95_WIDTH 32
+#define	FRF_AA_TX_Q_CHKSM_DIS_32_63_LBN 32
+#define	FRF_AA_TX_Q_CHKSM_DIS_32_63_WIDTH 32
+#define	FRF_AA_TX_Q_CHKSM_DIS_0_31_LBN 0
+#define	FRF_AA_TX_Q_CHKSM_DIS_0_31_WIDTH 32
+
+/* TX_CFG_REG: Transmit configuration register */
+#define	FR_AZ_TX_CFG 0x00000a50
+#define	FRF_CZ_TX_CONT_LOOKUP_THRESH_RANGE_LBN 114
+#define	FRF_CZ_TX_CONT_LOOKUP_THRESH_RANGE_WIDTH 8
+#define	FRF_CZ_TX_FILTER_TEST_MODE_BIT_LBN 113
+#define	FRF_CZ_TX_FILTER_TEST_MODE_BIT_WIDTH 1
+#define	FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE_LBN 105
+#define	FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE_WIDTH 8
+#define	FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE_LBN 97
+#define	FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE_WIDTH 8
+#define	FRF_CZ_TX_UDPIP_FILTER_WILD_SEARCH_RANGE_LBN 89
+#define	FRF_CZ_TX_UDPIP_FILTER_WILD_SEARCH_RANGE_WIDTH 8
+#define	FRF_CZ_TX_UDPIP_FILTER_FULL_SEARCH_RANGE_LBN 81
+#define	FRF_CZ_TX_UDPIP_FILTER_FULL_SEARCH_RANGE_WIDTH 8
+#define	FRF_CZ_TX_TCPIP_FILTER_WILD_SEARCH_RANGE_LBN 73
+#define	FRF_CZ_TX_TCPIP_FILTER_WILD_SEARCH_RANGE_WIDTH 8
+#define	FRF_CZ_TX_TCPIP_FILTER_FULL_SEARCH_RANGE_LBN 65
+#define	FRF_CZ_TX_TCPIP_FILTER_FULL_SEARCH_RANGE_WIDTH 8
+#define	FRF_CZ_TX_FILTER_ALL_VLAN_ETHERTYPES_BIT_LBN 64
+#define	FRF_CZ_TX_FILTER_ALL_VLAN_ETHERTYPES_BIT_WIDTH 1
+#define	FRF_CZ_TX_VLAN_MATCH_ETHERTYPE_RANGE_LBN 48
+#define	FRF_CZ_TX_VLAN_MATCH_ETHERTYPE_RANGE_WIDTH 16
+#define	FRF_CZ_TX_FILTER_EN_BIT_LBN 47
+#define	FRF_CZ_TX_FILTER_EN_BIT_WIDTH 1
+#define	FRF_AZ_TX_IP_ID_P0_OFS_LBN 16
+#define	FRF_AZ_TX_IP_ID_P0_OFS_WIDTH 15
+#define	FRF_AZ_TX_NO_EOP_DISC_EN_LBN 5
+#define	FRF_AZ_TX_NO_EOP_DISC_EN_WIDTH 1
+#define	FRF_AZ_TX_P1_PRI_EN_LBN 4
+#define	FRF_AZ_TX_P1_PRI_EN_WIDTH 1
+#define	FRF_AZ_TX_OWNERR_CTL_LBN 2
+#define	FRF_AZ_TX_OWNERR_CTL_WIDTH 1
+#define	FRF_AA_TX_NON_IP_DROP_DIS_LBN 1
+#define	FRF_AA_TX_NON_IP_DROP_DIS_WIDTH 1
+#define	FRF_AZ_TX_IP_ID_REP_EN_LBN 0
+#define	FRF_AZ_TX_IP_ID_REP_EN_WIDTH 1
+
+/* TX_PUSH_DROP_REG: Transmit push dropped register */
+#define	FR_AZ_TX_PUSH_DROP 0x00000a60
+#define	FRF_AZ_TX_PUSH_DROP_CNT_LBN 0
+#define	FRF_AZ_TX_PUSH_DROP_CNT_WIDTH 32
+
+/* TX_RESERVED_REG: Transmit configuration register */
+#define	FR_AZ_TX_RESERVED 0x00000a80
+#define	FRF_AZ_TX_EVT_CNT_LBN 121
+#define	FRF_AZ_TX_EVT_CNT_WIDTH 7
+#define	FRF_AZ_TX_PREF_AGE_CNT_LBN 119
+#define	FRF_AZ_TX_PREF_AGE_CNT_WIDTH 2
+#define	FRF_AZ_TX_RD_COMP_TMR_LBN 96
+#define	FRF_AZ_TX_RD_COMP_TMR_WIDTH 23
+#define	FRF_AZ_TX_PUSH_EN_LBN 89
+#define	FRF_AZ_TX_PUSH_EN_WIDTH 1
+#define	FRF_AZ_TX_PUSH_CHK_DIS_LBN 88
+#define	FRF_AZ_TX_PUSH_CHK_DIS_WIDTH 1
+#define	FRF_AZ_TX_D_FF_FULL_P0_LBN 85
+#define	FRF_AZ_TX_D_FF_FULL_P0_WIDTH 1
+#define	FRF_AZ_TX_DMAR_ST_P0_LBN 81
+#define	FRF_AZ_TX_DMAR_ST_P0_WIDTH 1
+#define	FRF_AZ_TX_DMAQ_ST_LBN 78
+#define	FRF_AZ_TX_DMAQ_ST_WIDTH 1
+#define	FRF_AZ_TX_RX_SPACER_LBN 64
+#define	FRF_AZ_TX_RX_SPACER_WIDTH 8
+#define	FRF_AZ_TX_DROP_ABORT_EN_LBN 60
+#define	FRF_AZ_TX_DROP_ABORT_EN_WIDTH 1
+#define	FRF_AZ_TX_SOFT_EVT_EN_LBN 59
+#define	FRF_AZ_TX_SOFT_EVT_EN_WIDTH 1
+#define	FRF_AZ_TX_PS_EVT_DIS_LBN 58
+#define	FRF_AZ_TX_PS_EVT_DIS_WIDTH 1
+#define	FRF_AZ_TX_RX_SPACER_EN_LBN 57
+#define	FRF_AZ_TX_RX_SPACER_EN_WIDTH 1
+#define	FRF_AZ_TX_XP_TIMER_LBN 52
+#define	FRF_AZ_TX_XP_TIMER_WIDTH 5
+#define	FRF_AZ_TX_PREF_SPACER_LBN 44
+#define	FRF_AZ_TX_PREF_SPACER_WIDTH 8
+#define	FRF_AZ_TX_PREF_WD_TMR_LBN 22
+#define	FRF_AZ_TX_PREF_WD_TMR_WIDTH 22
+#define	FRF_AZ_TX_ONLY1TAG_LBN 21
+#define	FRF_AZ_TX_ONLY1TAG_WIDTH 1
+#define	FRF_AZ_TX_PREF_THRESHOLD_LBN 19
+#define	FRF_AZ_TX_PREF_THRESHOLD_WIDTH 2
+#define	FRF_AZ_TX_ONE_PKT_PER_Q_LBN 18
+#define	FRF_AZ_TX_ONE_PKT_PER_Q_WIDTH 1
+#define	FRF_AZ_TX_DIS_NON_IP_EV_LBN 17
+#define	FRF_AZ_TX_DIS_NON_IP_EV_WIDTH 1
+#define	FRF_AA_TX_DMA_FF_THR_LBN 16
+#define	FRF_AA_TX_DMA_FF_THR_WIDTH 1
+#define	FRF_AZ_TX_DMA_SPACER_LBN 8
+#define	FRF_AZ_TX_DMA_SPACER_WIDTH 8
+#define	FRF_AA_TX_TCP_DIS_LBN 7
+#define	FRF_AA_TX_TCP_DIS_WIDTH 1
+#define	FRF_BZ_TX_FLUSH_MIN_LEN_EN_LBN 7
+#define	FRF_BZ_TX_FLUSH_MIN_LEN_EN_WIDTH 1
+#define	FRF_AA_TX_IP_DIS_LBN 6
+#define	FRF_AA_TX_IP_DIS_WIDTH 1
+#define	FRF_AZ_TX_MAX_CPL_LBN 2
+#define	FRF_AZ_TX_MAX_CPL_WIDTH 2
+#define	FFE_AZ_TX_MAX_CPL_16 3
+#define	FFE_AZ_TX_MAX_CPL_8 2
+#define	FFE_AZ_TX_MAX_CPL_4 1
+#define	FFE_AZ_TX_MAX_CPL_NOLIMIT 0
+#define	FRF_AZ_TX_MAX_PREF_LBN 0
+#define	FRF_AZ_TX_MAX_PREF_WIDTH 2
+#define	FFE_AZ_TX_MAX_PREF_32 3
+#define	FFE_AZ_TX_MAX_PREF_16 2
+#define	FFE_AZ_TX_MAX_PREF_8 1
+#define	FFE_AZ_TX_MAX_PREF_OFF 0
+
+/* TX_PACE_REG: Transmit pace control register */
+#define	FR_BZ_TX_PACE 0x00000a90
+#define	FRF_BZ_TX_PACE_SB_NOT_AF_LBN 19
+#define	FRF_BZ_TX_PACE_SB_NOT_AF_WIDTH 10
+#define	FRF_BZ_TX_PACE_SB_AF_LBN 9
+#define	FRF_BZ_TX_PACE_SB_AF_WIDTH 10
+#define	FRF_BZ_TX_PACE_FB_BASE_LBN 5
+#define	FRF_BZ_TX_PACE_FB_BASE_WIDTH 4
+#define	FRF_BZ_TX_PACE_BIN_TH_LBN 0
+#define	FRF_BZ_TX_PACE_BIN_TH_WIDTH 5
+
+/* TX_PACE_DROP_QID_REG: PACE Drop QID Counter */
+#define	FR_BZ_TX_PACE_DROP_QID 0x00000aa0
+#define	FRF_BZ_TX_PACE_QID_DRP_CNT_LBN 0
+#define	FRF_BZ_TX_PACE_QID_DRP_CNT_WIDTH 16
+
+/* TX_VLAN_REG: Transmit VLAN tag register */
+#define	FR_BB_TX_VLAN 0x00000ae0
+#define	FRF_BB_TX_VLAN_EN_LBN 127
+#define	FRF_BB_TX_VLAN_EN_WIDTH 1
+#define	FRF_BB_TX_VLAN7_PORT1_EN_LBN 125
+#define	FRF_BB_TX_VLAN7_PORT1_EN_WIDTH 1
+#define	FRF_BB_TX_VLAN7_PORT0_EN_LBN 124
+#define	FRF_BB_TX_VLAN7_PORT0_EN_WIDTH 1
+#define	FRF_BB_TX_VLAN7_LBN 112
+#define	FRF_BB_TX_VLAN7_WIDTH 12
+#define	FRF_BB_TX_VLAN6_PORT1_EN_LBN 109
+#define	FRF_BB_TX_VLAN6_PORT1_EN_WIDTH 1
+#define	FRF_BB_TX_VLAN6_PORT0_EN_LBN 108
+#define	FRF_BB_TX_VLAN6_PORT0_EN_WIDTH 1
+#define	FRF_BB_TX_VLAN6_LBN 96
+#define	FRF_BB_TX_VLAN6_WIDTH 12
+#define	FRF_BB_TX_VLAN5_PORT1_EN_LBN 93
+#define	FRF_BB_TX_VLAN5_PORT1_EN_WIDTH 1
+#define	FRF_BB_TX_VLAN5_PORT0_EN_LBN 92
+#define	FRF_BB_TX_VLAN5_PORT0_EN_WIDTH 1
+#define	FRF_BB_TX_VLAN5_LBN 80
+#define	FRF_BB_TX_VLAN5_WIDTH 12
+#define	FRF_BB_TX_VLAN4_PORT1_EN_LBN 77
+#define	FRF_BB_TX_VLAN4_PORT1_EN_WIDTH 1
+#define	FRF_BB_TX_VLAN4_PORT0_EN_LBN 76
+#define	FRF_BB_TX_VLAN4_PORT0_EN_WIDTH 1
+#define	FRF_BB_TX_VLAN4_LBN 64
+#define	FRF_BB_TX_VLAN4_WIDTH 12
+#define	FRF_BB_TX_VLAN3_PORT1_EN_LBN 61
+#define	FRF_BB_TX_VLAN3_PORT1_EN_WIDTH 1
+#define	FRF_BB_TX_VLAN3_PORT0_EN_LBN 60
+#define	FRF_BB_TX_VLAN3_PORT0_EN_WIDTH 1
+#define	FRF_BB_TX_VLAN3_LBN 48
+#define	FRF_BB_TX_VLAN3_WIDTH 12
+#define	FRF_BB_TX_VLAN2_PORT1_EN_LBN 45
+#define	FRF_BB_TX_VLAN2_PORT1_EN_WIDTH 1
+#define	FRF_BB_TX_VLAN2_PORT0_EN_LBN 44
+#define	FRF_BB_TX_VLAN2_PORT0_EN_WIDTH 1
+#define	FRF_BB_TX_VLAN2_LBN 32
+#define	FRF_BB_TX_VLAN2_WIDTH 12
+#define	FRF_BB_TX_VLAN1_PORT1_EN_LBN 29
+#define	FRF_BB_TX_VLAN1_PORT1_EN_WIDTH 1
+#define	FRF_BB_TX_VLAN1_PORT0_EN_LBN 28
+#define	FRF_BB_TX_VLAN1_PORT0_EN_WIDTH 1
+#define	FRF_BB_TX_VLAN1_LBN 16
+#define	FRF_BB_TX_VLAN1_WIDTH 12
+#define	FRF_BB_TX_VLAN0_PORT1_EN_LBN 13
+#define	FRF_BB_TX_VLAN0_PORT1_EN_WIDTH 1
+#define	FRF_BB_TX_VLAN0_PORT0_EN_LBN 12
+#define	FRF_BB_TX_VLAN0_PORT0_EN_WIDTH 1
+#define	FRF_BB_TX_VLAN0_LBN 0
+#define	FRF_BB_TX_VLAN0_WIDTH 12
+
+/* TX_IPFIL_PORTEN_REG: Transmit filter control register */
+#define	FR_BZ_TX_IPFIL_PORTEN 0x00000af0
+#define	FRF_BZ_TX_MADR0_FIL_EN_LBN 64
+#define	FRF_BZ_TX_MADR0_FIL_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL31_PORT_EN_LBN 62
+#define	FRF_BB_TX_IPFIL31_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL30_PORT_EN_LBN 60
+#define	FRF_BB_TX_IPFIL30_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL29_PORT_EN_LBN 58
+#define	FRF_BB_TX_IPFIL29_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL28_PORT_EN_LBN 56
+#define	FRF_BB_TX_IPFIL28_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL27_PORT_EN_LBN 54
+#define	FRF_BB_TX_IPFIL27_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL26_PORT_EN_LBN 52
+#define	FRF_BB_TX_IPFIL26_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL25_PORT_EN_LBN 50
+#define	FRF_BB_TX_IPFIL25_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL24_PORT_EN_LBN 48
+#define	FRF_BB_TX_IPFIL24_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL23_PORT_EN_LBN 46
+#define	FRF_BB_TX_IPFIL23_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL22_PORT_EN_LBN 44
+#define	FRF_BB_TX_IPFIL22_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL21_PORT_EN_LBN 42
+#define	FRF_BB_TX_IPFIL21_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL20_PORT_EN_LBN 40
+#define	FRF_BB_TX_IPFIL20_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL19_PORT_EN_LBN 38
+#define	FRF_BB_TX_IPFIL19_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL18_PORT_EN_LBN 36
+#define	FRF_BB_TX_IPFIL18_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL17_PORT_EN_LBN 34
+#define	FRF_BB_TX_IPFIL17_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL16_PORT_EN_LBN 32
+#define	FRF_BB_TX_IPFIL16_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL15_PORT_EN_LBN 30
+#define	FRF_BB_TX_IPFIL15_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL14_PORT_EN_LBN 28
+#define	FRF_BB_TX_IPFIL14_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL13_PORT_EN_LBN 26
+#define	FRF_BB_TX_IPFIL13_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL12_PORT_EN_LBN 24
+#define	FRF_BB_TX_IPFIL12_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL11_PORT_EN_LBN 22
+#define	FRF_BB_TX_IPFIL11_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL10_PORT_EN_LBN 20
+#define	FRF_BB_TX_IPFIL10_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL9_PORT_EN_LBN 18
+#define	FRF_BB_TX_IPFIL9_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL8_PORT_EN_LBN 16
+#define	FRF_BB_TX_IPFIL8_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL7_PORT_EN_LBN 14
+#define	FRF_BB_TX_IPFIL7_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL6_PORT_EN_LBN 12
+#define	FRF_BB_TX_IPFIL6_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL5_PORT_EN_LBN 10
+#define	FRF_BB_TX_IPFIL5_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL4_PORT_EN_LBN 8
+#define	FRF_BB_TX_IPFIL4_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL3_PORT_EN_LBN 6
+#define	FRF_BB_TX_IPFIL3_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL2_PORT_EN_LBN 4
+#define	FRF_BB_TX_IPFIL2_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL1_PORT_EN_LBN 2
+#define	FRF_BB_TX_IPFIL1_PORT_EN_WIDTH 1
+#define	FRF_BB_TX_IPFIL0_PORT_EN_LBN 0
+#define	FRF_BB_TX_IPFIL0_PORT_EN_WIDTH 1
+
+/* TX_IPFIL_TBL: Transmit IP source address filter table */
+#define	FR_BB_TX_IPFIL_TBL 0x00000b00
+#define	FR_BB_TX_IPFIL_TBL_STEP 16
+#define	FR_BB_TX_IPFIL_TBL_ROWS 16
+#define	FRF_BB_TX_IPFIL_MASK_1_LBN 96
+#define	FRF_BB_TX_IPFIL_MASK_1_WIDTH 32
+#define	FRF_BB_TX_IP_SRC_ADR_1_LBN 64
+#define	FRF_BB_TX_IP_SRC_ADR_1_WIDTH 32
+#define	FRF_BB_TX_IPFIL_MASK_0_LBN 32
+#define	FRF_BB_TX_IPFIL_MASK_0_WIDTH 32
+#define	FRF_BB_TX_IP_SRC_ADR_0_LBN 0
+#define	FRF_BB_TX_IP_SRC_ADR_0_WIDTH 32
+
+/* MD_TXD_REG: PHY management transmit data register */
+#define	FR_AB_MD_TXD 0x00000c00
+#define	FRF_AB_MD_TXD_LBN 0
+#define	FRF_AB_MD_TXD_WIDTH 16
+
+/* MD_RXD_REG: PHY management receive data register */
+#define	FR_AB_MD_RXD 0x00000c10
+#define	FRF_AB_MD_RXD_LBN 0
+#define	FRF_AB_MD_RXD_WIDTH 16
+
+/* MD_CS_REG: PHY management configuration & status register */
+#define	FR_AB_MD_CS 0x00000c20
+#define	FRF_AB_MD_RD_EN_CMD_LBN 15
+#define	FRF_AB_MD_RD_EN_CMD_WIDTH 1
+#define	FRF_AB_MD_WR_EN_CMD_LBN 14
+#define	FRF_AB_MD_WR_EN_CMD_WIDTH 1
+#define	FRF_AB_MD_ADDR_CMD_LBN 13
+#define	FRF_AB_MD_ADDR_CMD_WIDTH 1
+#define	FRF_AB_MD_PT_LBN 7
+#define	FRF_AB_MD_PT_WIDTH 3
+#define	FRF_AB_MD_PL_LBN 6
+#define	FRF_AB_MD_PL_WIDTH 1
+#define	FRF_AB_MD_INT_CLR_LBN 5
+#define	FRF_AB_MD_INT_CLR_WIDTH 1
+#define	FRF_AB_MD_GC_LBN 4
+#define	FRF_AB_MD_GC_WIDTH 1
+#define	FRF_AB_MD_PRSP_LBN 3
+#define	FRF_AB_MD_PRSP_WIDTH 1
+#define	FRF_AB_MD_RIC_LBN 2
+#define	FRF_AB_MD_RIC_WIDTH 1
+#define	FRF_AB_MD_RDC_LBN 1
+#define	FRF_AB_MD_RDC_WIDTH 1
+#define	FRF_AB_MD_WRC_LBN 0
+#define	FRF_AB_MD_WRC_WIDTH 1
+
+/* MD_PHY_ADR_REG: PHY management PHY address register */
+#define	FR_AB_MD_PHY_ADR 0x00000c30
+#define	FRF_AB_MD_PHY_ADR_LBN 0
+#define	FRF_AB_MD_PHY_ADR_WIDTH 16
+
+/* MD_ID_REG: PHY management ID register */
+#define	FR_AB_MD_ID 0x00000c40
+#define	FRF_AB_MD_PRT_ADR_LBN 11
+#define	FRF_AB_MD_PRT_ADR_WIDTH 5
+#define	FRF_AB_MD_DEV_ADR_LBN 6
+#define	FRF_AB_MD_DEV_ADR_WIDTH 5
+
+/* MD_STAT_REG: PHY management status & mask register */
+#define	FR_AB_MD_STAT 0x00000c50
+#define	FRF_AB_MD_PINT_LBN 4
+#define	FRF_AB_MD_PINT_WIDTH 1
+#define	FRF_AB_MD_DONE_LBN 3
+#define	FRF_AB_MD_DONE_WIDTH 1
+#define	FRF_AB_MD_BSERR_LBN 2
+#define	FRF_AB_MD_BSERR_WIDTH 1
+#define	FRF_AB_MD_LNFL_LBN 1
+#define	FRF_AB_MD_LNFL_WIDTH 1
+#define	FRF_AB_MD_BSY_LBN 0
+#define	FRF_AB_MD_BSY_WIDTH 1
+
+/* MAC_STAT_DMA_REG: Port MAC statistical counter DMA register */
+#define	FR_AB_MAC_STAT_DMA 0x00000c60
+#define	FRF_AB_MAC_STAT_DMA_CMD_LBN 48
+#define	FRF_AB_MAC_STAT_DMA_CMD_WIDTH 1
+#define	FRF_AB_MAC_STAT_DMA_ADR_LBN 0
+#define	FRF_AB_MAC_STAT_DMA_ADR_WIDTH 48
+
+/* MAC_CTRL_REG: Port MAC control register */
+#define	FR_AB_MAC_CTRL 0x00000c80
+#define	FRF_AB_MAC_XOFF_VAL_LBN 16
+#define	FRF_AB_MAC_XOFF_VAL_WIDTH 16
+#define	FRF_BB_TXFIFO_DRAIN_EN_LBN 7
+#define	FRF_BB_TXFIFO_DRAIN_EN_WIDTH 1
+#define	FRF_AB_MAC_XG_DISTXCRC_LBN 5
+#define	FRF_AB_MAC_XG_DISTXCRC_WIDTH 1
+#define	FRF_AB_MAC_BCAD_ACPT_LBN 4
+#define	FRF_AB_MAC_BCAD_ACPT_WIDTH 1
+#define	FRF_AB_MAC_UC_PROM_LBN 3
+#define	FRF_AB_MAC_UC_PROM_WIDTH 1
+#define	FRF_AB_MAC_LINK_STATUS_LBN 2
+#define	FRF_AB_MAC_LINK_STATUS_WIDTH 1
+#define	FRF_AB_MAC_SPEED_LBN 0
+#define	FRF_AB_MAC_SPEED_WIDTH 2
+#define	FFE_AB_MAC_SPEED_10G 3
+#define	FFE_AB_MAC_SPEED_1G 2
+#define	FFE_AB_MAC_SPEED_100M 1
+#define	FFE_AB_MAC_SPEED_10M 0
+
+/* GEN_MODE_REG: General Purpose mode register (external interrupt mask) */
+#define	FR_BB_GEN_MODE 0x00000c90
+#define	FRF_BB_XFP_PHY_INT_POL_SEL_LBN 3
+#define	FRF_BB_XFP_PHY_INT_POL_SEL_WIDTH 1
+#define	FRF_BB_XG_PHY_INT_POL_SEL_LBN 2
+#define	FRF_BB_XG_PHY_INT_POL_SEL_WIDTH 1
+#define	FRF_BB_XFP_PHY_INT_MASK_LBN 1
+#define	FRF_BB_XFP_PHY_INT_MASK_WIDTH 1
+#define	FRF_BB_XG_PHY_INT_MASK_LBN 0
+#define	FRF_BB_XG_PHY_INT_MASK_WIDTH 1
+
+/* MAC_MC_HASH_REG0: Multicast address hash table */
+#define	FR_AB_MAC_MC_HASH_REG0 0x00000ca0
+#define	FRF_AB_MAC_MCAST_HASH0_LBN 0
+#define	FRF_AB_MAC_MCAST_HASH0_WIDTH 128
+
+/* MAC_MC_HASH_REG1: Multicast address hash table */
+#define	FR_AB_MAC_MC_HASH_REG1 0x00000cb0
+#define	FRF_AB_MAC_MCAST_HASH1_LBN 0
+#define	FRF_AB_MAC_MCAST_HASH1_WIDTH 128
+
+/* GM_CFG1_REG: GMAC configuration register 1 */
+#define	FR_AB_GM_CFG1 0x00000e00
+#define	FRF_AB_GM_SW_RST_LBN 31
+#define	FRF_AB_GM_SW_RST_WIDTH 1
+#define	FRF_AB_GM_SIM_RST_LBN 30
+#define	FRF_AB_GM_SIM_RST_WIDTH 1
+#define	FRF_AB_GM_RST_RX_MAC_CTL_LBN 19
+#define	FRF_AB_GM_RST_RX_MAC_CTL_WIDTH 1
+#define	FRF_AB_GM_RST_TX_MAC_CTL_LBN 18
+#define	FRF_AB_GM_RST_TX_MAC_CTL_WIDTH 1
+#define	FRF_AB_GM_RST_RX_FUNC_LBN 17
+#define	FRF_AB_GM_RST_RX_FUNC_WIDTH 1
+#define	FRF_AB_GM_RST_TX_FUNC_LBN 16
+#define	FRF_AB_GM_RST_TX_FUNC_WIDTH 1
+#define	FRF_AB_GM_LOOP_LBN 8
+#define	FRF_AB_GM_LOOP_WIDTH 1
+#define	FRF_AB_GM_RX_FC_EN_LBN 5
+#define	FRF_AB_GM_RX_FC_EN_WIDTH 1
+#define	FRF_AB_GM_TX_FC_EN_LBN 4
+#define	FRF_AB_GM_TX_FC_EN_WIDTH 1
+#define	FRF_AB_GM_SYNC_RXEN_LBN 3
+#define	FRF_AB_GM_SYNC_RXEN_WIDTH 1
+#define	FRF_AB_GM_RX_EN_LBN 2
+#define	FRF_AB_GM_RX_EN_WIDTH 1
+#define	FRF_AB_GM_SYNC_TXEN_LBN 1
+#define	FRF_AB_GM_SYNC_TXEN_WIDTH 1
+#define	FRF_AB_GM_TX_EN_LBN 0
+#define	FRF_AB_GM_TX_EN_WIDTH 1
+
+/* GM_CFG2_REG: GMAC configuration register 2 */
+#define	FR_AB_GM_CFG2 0x00000e10
+#define	FRF_AB_GM_PAMBL_LEN_LBN 12
+#define	FRF_AB_GM_PAMBL_LEN_WIDTH 4
+#define	FRF_AB_GM_IF_MODE_LBN 8
+#define	FRF_AB_GM_IF_MODE_WIDTH 2
+#define	FFE_AB_IF_MODE_BYTE_MODE 2
+#define	FFE_AB_IF_MODE_NIBBLE_MODE 1
+#define	FRF_AB_GM_HUGE_FRM_EN_LBN 5
+#define	FRF_AB_GM_HUGE_FRM_EN_WIDTH 1
+#define	FRF_AB_GM_LEN_CHK_LBN 4
+#define	FRF_AB_GM_LEN_CHK_WIDTH 1
+#define	FRF_AB_GM_PAD_CRC_EN_LBN 2
+#define	FRF_AB_GM_PAD_CRC_EN_WIDTH 1
+#define	FRF_AB_GM_CRC_EN_LBN 1
+#define	FRF_AB_GM_CRC_EN_WIDTH 1
+#define	FRF_AB_GM_FD_LBN 0
+#define	FRF_AB_GM_FD_WIDTH 1
+
+/* GM_IPG_REG: GMAC IPG register */
+#define	FR_AB_GM_IPG 0x00000e20
+#define	FRF_AB_GM_NONB2B_IPG1_LBN 24
+#define	FRF_AB_GM_NONB2B_IPG1_WIDTH 7
+#define	FRF_AB_GM_NONB2B_IPG2_LBN 16
+#define	FRF_AB_GM_NONB2B_IPG2_WIDTH 7
+#define	FRF_AB_GM_MIN_IPG_ENF_LBN 8
+#define	FRF_AB_GM_MIN_IPG_ENF_WIDTH 8
+#define	FRF_AB_GM_B2B_IPG_LBN 0
+#define	FRF_AB_GM_B2B_IPG_WIDTH 7
+
+/* GM_HD_REG: GMAC half duplex register */
+#define	FR_AB_GM_HD 0x00000e30
+#define	FRF_AB_GM_ALT_BOFF_VAL_LBN 20
+#define	FRF_AB_GM_ALT_BOFF_VAL_WIDTH 4
+#define	FRF_AB_GM_ALT_BOFF_EN_LBN 19
+#define	FRF_AB_GM_ALT_BOFF_EN_WIDTH 1
+#define	FRF_AB_GM_BP_NO_BOFF_LBN 18
+#define	FRF_AB_GM_BP_NO_BOFF_WIDTH 1
+#define	FRF_AB_GM_DIS_BOFF_LBN 17
+#define	FRF_AB_GM_DIS_BOFF_WIDTH 1
+#define	FRF_AB_GM_EXDEF_TX_EN_LBN 16
+#define	FRF_AB_GM_EXDEF_TX_EN_WIDTH 1
+#define	FRF_AB_GM_RTRY_LIMIT_LBN 12
+#define	FRF_AB_GM_RTRY_LIMIT_WIDTH 4
+#define	FRF_AB_GM_COL_WIN_LBN 0
+#define	FRF_AB_GM_COL_WIN_WIDTH 10
+
+/* GM_MAX_FLEN_REG: GMAC maximum frame length register */
+#define	FR_AB_GM_MAX_FLEN 0x00000e40
+#define	FRF_AB_GM_MAX_FLEN_LBN 0
+#define	FRF_AB_GM_MAX_FLEN_WIDTH 16
+
+/* GM_TEST_REG: GMAC test register */
+#define	FR_AB_GM_TEST 0x00000e70
+#define	FRF_AB_GM_MAX_BOFF_LBN 3
+#define	FRF_AB_GM_MAX_BOFF_WIDTH 1
+#define	FRF_AB_GM_REG_TX_FLOW_EN_LBN 2
+#define	FRF_AB_GM_REG_TX_FLOW_EN_WIDTH 1
+#define	FRF_AB_GM_TEST_PAUSE_LBN 1
+#define	FRF_AB_GM_TEST_PAUSE_WIDTH 1
+#define	FRF_AB_GM_SHORT_SLOT_LBN 0
+#define	FRF_AB_GM_SHORT_SLOT_WIDTH 1
+
+/* GM_ADR1_REG: GMAC station address register 1 */
+#define	FR_AB_GM_ADR1 0x00000f00
+#define	FRF_AB_GM_ADR_B0_LBN 24
+#define	FRF_AB_GM_ADR_B0_WIDTH 8
+#define	FRF_AB_GM_ADR_B1_LBN 16
+#define	FRF_AB_GM_ADR_B1_WIDTH 8
+#define	FRF_AB_GM_ADR_B2_LBN 8
+#define	FRF_AB_GM_ADR_B2_WIDTH 8
+#define	FRF_AB_GM_ADR_B3_LBN 0
+#define	FRF_AB_GM_ADR_B3_WIDTH 8
+
+/* GM_ADR2_REG: GMAC station address register 2 */
+#define	FR_AB_GM_ADR2 0x00000f10
+#define	FRF_AB_GM_ADR_B4_LBN 24
+#define	FRF_AB_GM_ADR_B4_WIDTH 8
+#define	FRF_AB_GM_ADR_B5_LBN 16
+#define	FRF_AB_GM_ADR_B5_WIDTH 8
+
+/* GMF_CFG0_REG: GMAC FIFO configuration register 0 */
+#define	FR_AB_GMF_CFG0 0x00000f20
+#define	FRF_AB_GMF_FTFENRPLY_LBN 20
+#define	FRF_AB_GMF_FTFENRPLY_WIDTH 1
+#define	FRF_AB_GMF_STFENRPLY_LBN 19
+#define	FRF_AB_GMF_STFENRPLY_WIDTH 1
+#define	FRF_AB_GMF_FRFENRPLY_LBN 18
+#define	FRF_AB_GMF_FRFENRPLY_WIDTH 1
+#define	FRF_AB_GMF_SRFENRPLY_LBN 17
+#define	FRF_AB_GMF_SRFENRPLY_WIDTH 1
+#define	FRF_AB_GMF_WTMENRPLY_LBN 16
+#define	FRF_AB_GMF_WTMENRPLY_WIDTH 1
+#define	FRF_AB_GMF_FTFENREQ_LBN 12
+#define	FRF_AB_GMF_FTFENREQ_WIDTH 1
+#define	FRF_AB_GMF_STFENREQ_LBN 11
+#define	FRF_AB_GMF_STFENREQ_WIDTH 1
+#define	FRF_AB_GMF_FRFENREQ_LBN 10
+#define	FRF_AB_GMF_FRFENREQ_WIDTH 1
+#define	FRF_AB_GMF_SRFENREQ_LBN 9
+#define	FRF_AB_GMF_SRFENREQ_WIDTH 1
+#define	FRF_AB_GMF_WTMENREQ_LBN 8
+#define	FRF_AB_GMF_WTMENREQ_WIDTH 1
+#define	FRF_AB_GMF_HSTRSTFT_LBN 4
+#define	FRF_AB_GMF_HSTRSTFT_WIDTH 1
+#define	FRF_AB_GMF_HSTRSTST_LBN 3
+#define	FRF_AB_GMF_HSTRSTST_WIDTH 1
+#define	FRF_AB_GMF_HSTRSTFR_LBN 2
+#define	FRF_AB_GMF_HSTRSTFR_WIDTH 1
+#define	FRF_AB_GMF_HSTRSTSR_LBN 1
+#define	FRF_AB_GMF_HSTRSTSR_WIDTH 1
+#define	FRF_AB_GMF_HSTRSTWT_LBN 0
+#define	FRF_AB_GMF_HSTRSTWT_WIDTH 1
+
+/* GMF_CFG1_REG: GMAC FIFO configuration register 1 */
+#define	FR_AB_GMF_CFG1 0x00000f30
+#define	FRF_AB_GMF_CFGFRTH_LBN 16
+#define	FRF_AB_GMF_CFGFRTH_WIDTH 5
+#define	FRF_AB_GMF_CFGXOFFRTX_LBN 0
+#define	FRF_AB_GMF_CFGXOFFRTX_WIDTH 16
+
+/* GMF_CFG2_REG: GMAC FIFO configuration register 2 */
+#define	FR_AB_GMF_CFG2 0x00000f40
+#define	FRF_AB_GMF_CFGHWM_LBN 16
+#define	FRF_AB_GMF_CFGHWM_WIDTH 6
+#define	FRF_AB_GMF_CFGLWM_LBN 0
+#define	FRF_AB_GMF_CFGLWM_WIDTH 6
+
+/* GMF_CFG3_REG: GMAC FIFO configuration register 3 */
+#define	FR_AB_GMF_CFG3 0x00000f50
+#define	FRF_AB_GMF_CFGHWMFT_LBN 16
+#define	FRF_AB_GMF_CFGHWMFT_WIDTH 6
+#define	FRF_AB_GMF_CFGFTTH_LBN 0
+#define	FRF_AB_GMF_CFGFTTH_WIDTH 6
+
+/* GMF_CFG4_REG: GMAC FIFO configuration register 4 */
+#define	FR_AB_GMF_CFG4 0x00000f60
+#define	FRF_AB_GMF_HSTFLTRFRM_LBN 0
+#define	FRF_AB_GMF_HSTFLTRFRM_WIDTH 18
+
+/* GMF_CFG5_REG: GMAC FIFO configuration register 5 */
+#define	FR_AB_GMF_CFG5 0x00000f70
+#define	FRF_AB_GMF_CFGHDPLX_LBN 22
+#define	FRF_AB_GMF_CFGHDPLX_WIDTH 1
+#define	FRF_AB_GMF_SRFULL_LBN 21
+#define	FRF_AB_GMF_SRFULL_WIDTH 1
+#define	FRF_AB_GMF_HSTSRFULLCLR_LBN 20
+#define	FRF_AB_GMF_HSTSRFULLCLR_WIDTH 1
+#define	FRF_AB_GMF_CFGBYTMODE_LBN 19
+#define	FRF_AB_GMF_CFGBYTMODE_WIDTH 1
+#define	FRF_AB_GMF_HSTDRPLT64_LBN 18
+#define	FRF_AB_GMF_HSTDRPLT64_WIDTH 1
+#define	FRF_AB_GMF_HSTFLTRFRMDC_LBN 0
+#define	FRF_AB_GMF_HSTFLTRFRMDC_WIDTH 18
+
+/* TX_SRC_MAC_TBL: Transmit IP source address filter table */
+#define	FR_BB_TX_SRC_MAC_TBL 0x00001000
+#define	FR_BB_TX_SRC_MAC_TBL_STEP 16
+#define	FR_BB_TX_SRC_MAC_TBL_ROWS 16
+#define	FRF_BB_TX_SRC_MAC_ADR_1_LBN 64
+#define	FRF_BB_TX_SRC_MAC_ADR_1_WIDTH 48
+#define	FRF_BB_TX_SRC_MAC_ADR_0_LBN 0
+#define	FRF_BB_TX_SRC_MAC_ADR_0_WIDTH 48
+
+/* TX_SRC_MAC_CTL_REG: Transmit MAC source address filter control */
+#define	FR_BB_TX_SRC_MAC_CTL 0x00001100
+#define	FRF_BB_TX_SRC_DROP_CTR_LBN 16
+#define	FRF_BB_TX_SRC_DROP_CTR_WIDTH 16
+#define	FRF_BB_TX_SRC_FLTR_EN_LBN 15
+#define	FRF_BB_TX_SRC_FLTR_EN_WIDTH 1
+#define	FRF_BB_TX_DROP_CTR_CLR_LBN 12
+#define	FRF_BB_TX_DROP_CTR_CLR_WIDTH 1
+#define	FRF_BB_TX_MAC_QID_SEL_LBN 0
+#define	FRF_BB_TX_MAC_QID_SEL_WIDTH 3
+
+/* XM_ADR_LO_REG: XGMAC address register low */
+#define	FR_AB_XM_ADR_LO 0x00001200
+#define	FRF_AB_XM_ADR_LO_LBN 0
+#define	FRF_AB_XM_ADR_LO_WIDTH 32
+
+/* XM_ADR_HI_REG: XGMAC address register high */
+#define	FR_AB_XM_ADR_HI 0x00001210
+#define	FRF_AB_XM_ADR_HI_LBN 0
+#define	FRF_AB_XM_ADR_HI_WIDTH 16
+
+/* XM_GLB_CFG_REG: XGMAC global configuration */
+#define	FR_AB_XM_GLB_CFG 0x00001220
+#define	FRF_AB_XM_RMTFLT_GEN_LBN 17
+#define	FRF_AB_XM_RMTFLT_GEN_WIDTH 1
+#define	FRF_AB_XM_DEBUG_MODE_LBN 16
+#define	FRF_AB_XM_DEBUG_MODE_WIDTH 1
+#define	FRF_AB_XM_RX_STAT_EN_LBN 11
+#define	FRF_AB_XM_RX_STAT_EN_WIDTH 1
+#define	FRF_AB_XM_TX_STAT_EN_LBN 10
+#define	FRF_AB_XM_TX_STAT_EN_WIDTH 1
+#define	FRF_AB_XM_RX_JUMBO_MODE_LBN 6
+#define	FRF_AB_XM_RX_JUMBO_MODE_WIDTH 1
+#define	FRF_AB_XM_WAN_MODE_LBN 5
+#define	FRF_AB_XM_WAN_MODE_WIDTH 1
+#define	FRF_AB_XM_INTCLR_MODE_LBN 3
+#define	FRF_AB_XM_INTCLR_MODE_WIDTH 1
+#define	FRF_AB_XM_CORE_RST_LBN 0
+#define	FRF_AB_XM_CORE_RST_WIDTH 1
+
+/* XM_TX_CFG_REG: XGMAC transmit configuration */
+#define	FR_AB_XM_TX_CFG 0x00001230
+#define	FRF_AB_XM_TX_PROG_LBN 24
+#define	FRF_AB_XM_TX_PROG_WIDTH 1
+#define	FRF_AB_XM_IPG_LBN 16
+#define	FRF_AB_XM_IPG_WIDTH 4
+#define	FRF_AB_XM_FCNTL_LBN 10
+#define	FRF_AB_XM_FCNTL_WIDTH 1
+#define	FRF_AB_XM_TXCRC_LBN 8
+#define	FRF_AB_XM_TXCRC_WIDTH 1
+#define	FRF_AB_XM_EDRC_LBN 6
+#define	FRF_AB_XM_EDRC_WIDTH 1
+#define	FRF_AB_XM_AUTO_PAD_LBN 5
+#define	FRF_AB_XM_AUTO_PAD_WIDTH 1
+#define	FRF_AB_XM_TX_PRMBL_LBN 2
+#define	FRF_AB_XM_TX_PRMBL_WIDTH 1
+#define	FRF_AB_XM_TXEN_LBN 1
+#define	FRF_AB_XM_TXEN_WIDTH 1
+#define	FRF_AB_XM_TX_RST_LBN 0
+#define	FRF_AB_XM_TX_RST_WIDTH 1
+
+/* XM_RX_CFG_REG: XGMAC receive configuration */
+#define	FR_AB_XM_RX_CFG 0x00001240
+#define	FRF_AB_XM_PASS_LENERR_LBN 26
+#define	FRF_AB_XM_PASS_LENERR_WIDTH 1
+#define	FRF_AB_XM_PASS_CRC_ERR_LBN 25
+#define	FRF_AB_XM_PASS_CRC_ERR_WIDTH 1
+#define	FRF_AB_XM_PASS_PRMBLE_ERR_LBN 24
+#define	FRF_AB_XM_PASS_PRMBLE_ERR_WIDTH 1
+#define	FRF_AB_XM_REJ_BCAST_LBN 20
+#define	FRF_AB_XM_REJ_BCAST_WIDTH 1
+#define	FRF_AB_XM_ACPT_ALL_MCAST_LBN 11
+#define	FRF_AB_XM_ACPT_ALL_MCAST_WIDTH 1
+#define	FRF_AB_XM_ACPT_ALL_UCAST_LBN 9
+#define	FRF_AB_XM_ACPT_ALL_UCAST_WIDTH 1
+#define	FRF_AB_XM_AUTO_DEPAD_LBN 8
+#define	FRF_AB_XM_AUTO_DEPAD_WIDTH 1
+#define	FRF_AB_XM_RXCRC_LBN 3
+#define	FRF_AB_XM_RXCRC_WIDTH 1
+#define	FRF_AB_XM_RX_PRMBL_LBN 2
+#define	FRF_AB_XM_RX_PRMBL_WIDTH 1
+#define	FRF_AB_XM_RXEN_LBN 1
+#define	FRF_AB_XM_RXEN_WIDTH 1
+#define	FRF_AB_XM_RX_RST_LBN 0
+#define	FRF_AB_XM_RX_RST_WIDTH 1
+
+/* XM_MGT_INT_MASK: documentation to be written for sum_XM_MGT_INT_MASK */
+#define	FR_AB_XM_MGT_INT_MASK 0x00001250
+#define	FRF_AB_XM_MSK_STA_INTR_LBN 16
+#define	FRF_AB_XM_MSK_STA_INTR_WIDTH 1
+#define	FRF_AB_XM_MSK_STAT_CNTR_HF_LBN 9
+#define	FRF_AB_XM_MSK_STAT_CNTR_HF_WIDTH 1
+#define	FRF_AB_XM_MSK_STAT_CNTR_OF_LBN 8
+#define	FRF_AB_XM_MSK_STAT_CNTR_OF_WIDTH 1
+#define	FRF_AB_XM_MSK_PRMBLE_ERR_LBN 2
+#define	FRF_AB_XM_MSK_PRMBLE_ERR_WIDTH 1
+#define	FRF_AB_XM_MSK_RMTFLT_LBN 1
+#define	FRF_AB_XM_MSK_RMTFLT_WIDTH 1
+#define	FRF_AB_XM_MSK_LCLFLT_LBN 0
+#define	FRF_AB_XM_MSK_LCLFLT_WIDTH 1
+
+/* XM_FC_REG: XGMAC flow control register */
+#define	FR_AB_XM_FC 0x00001270
+#define	FRF_AB_XM_PAUSE_TIME_LBN 16
+#define	FRF_AB_XM_PAUSE_TIME_WIDTH 16
+#define	FRF_AB_XM_RX_MAC_STAT_LBN 11
+#define	FRF_AB_XM_RX_MAC_STAT_WIDTH 1
+#define	FRF_AB_XM_TX_MAC_STAT_LBN 10
+#define	FRF_AB_XM_TX_MAC_STAT_WIDTH 1
+#define	FRF_AB_XM_MCNTL_PASS_LBN 8
+#define	FRF_AB_XM_MCNTL_PASS_WIDTH 2
+#define	FRF_AB_XM_REJ_CNTL_UCAST_LBN 6
+#define	FRF_AB_XM_REJ_CNTL_UCAST_WIDTH 1
+#define	FRF_AB_XM_REJ_CNTL_MCAST_LBN 5
+#define	FRF_AB_XM_REJ_CNTL_MCAST_WIDTH 1
+#define	FRF_AB_XM_ZPAUSE_LBN 2
+#define	FRF_AB_XM_ZPAUSE_WIDTH 1
+#define	FRF_AB_XM_XMIT_PAUSE_LBN 1
+#define	FRF_AB_XM_XMIT_PAUSE_WIDTH 1
+#define	FRF_AB_XM_DIS_FCNTL_LBN 0
+#define	FRF_AB_XM_DIS_FCNTL_WIDTH 1
+
+/* XM_PAUSE_TIME_REG: XGMAC pause time register */
+#define	FR_AB_XM_PAUSE_TIME 0x00001290
+#define	FRF_AB_XM_TX_PAUSE_CNT_LBN 16
+#define	FRF_AB_XM_TX_PAUSE_CNT_WIDTH 16
+#define	FRF_AB_XM_RX_PAUSE_CNT_LBN 0
+#define	FRF_AB_XM_RX_PAUSE_CNT_WIDTH 16
+
+/* XM_TX_PARAM_REG: XGMAC transmit parameter register */
+#define	FR_AB_XM_TX_PARAM 0x000012d0
+#define	FRF_AB_XM_TX_JUMBO_MODE_LBN 31
+#define	FRF_AB_XM_TX_JUMBO_MODE_WIDTH 1
+#define	FRF_AB_XM_MAX_TX_FRM_SIZE_HI_LBN 19
+#define	FRF_AB_XM_MAX_TX_FRM_SIZE_HI_WIDTH 11
+#define	FRF_AB_XM_MAX_TX_FRM_SIZE_LO_LBN 16
+#define	FRF_AB_XM_MAX_TX_FRM_SIZE_LO_WIDTH 3
+#define	FRF_AB_XM_PAD_CHAR_LBN 0
+#define	FRF_AB_XM_PAD_CHAR_WIDTH 8
+
+/* XM_RX_PARAM_REG: XGMAC receive parameter register */
+#define	FR_AB_XM_RX_PARAM 0x000012e0
+#define	FRF_AB_XM_MAX_RX_FRM_SIZE_HI_LBN 3
+#define	FRF_AB_XM_MAX_RX_FRM_SIZE_HI_WIDTH 11
+#define	FRF_AB_XM_MAX_RX_FRM_SIZE_LO_LBN 0
+#define	FRF_AB_XM_MAX_RX_FRM_SIZE_LO_WIDTH 3
+
+/* XM_MGT_INT_MSK_REG: XGMAC management interrupt mask register */
+#define	FR_AB_XM_MGT_INT_MSK 0x000012f0
+#define	FRF_AB_XM_STAT_CNTR_OF_LBN 9
+#define	FRF_AB_XM_STAT_CNTR_OF_WIDTH 1
+#define	FRF_AB_XM_STAT_CNTR_HF_LBN 8
+#define	FRF_AB_XM_STAT_CNTR_HF_WIDTH 1
+#define	FRF_AB_XM_PRMBLE_ERR_LBN 2
+#define	FRF_AB_XM_PRMBLE_ERR_WIDTH 1
+#define	FRF_AB_XM_RMTFLT_LBN 1
+#define	FRF_AB_XM_RMTFLT_WIDTH 1
+#define	FRF_AB_XM_LCLFLT_LBN 0
+#define	FRF_AB_XM_LCLFLT_WIDTH 1
+
+/* XX_PWR_RST_REG: XGXS/XAUI powerdown/reset register */
+#define	FR_AB_XX_PWR_RST 0x00001300
+#define	FRF_AB_XX_PWRDND_SIG_LBN 31
+#define	FRF_AB_XX_PWRDND_SIG_WIDTH 1
+#define	FRF_AB_XX_PWRDNC_SIG_LBN 30
+#define	FRF_AB_XX_PWRDNC_SIG_WIDTH 1
+#define	FRF_AB_XX_PWRDNB_SIG_LBN 29
+#define	FRF_AB_XX_PWRDNB_SIG_WIDTH 1
+#define	FRF_AB_XX_PWRDNA_SIG_LBN 28
+#define	FRF_AB_XX_PWRDNA_SIG_WIDTH 1
+#define	FRF_AB_XX_SIM_MODE_LBN 27
+#define	FRF_AB_XX_SIM_MODE_WIDTH 1
+#define	FRF_AB_XX_RSTPLLCD_SIG_LBN 25
+#define	FRF_AB_XX_RSTPLLCD_SIG_WIDTH 1
+#define	FRF_AB_XX_RSTPLLAB_SIG_LBN 24
+#define	FRF_AB_XX_RSTPLLAB_SIG_WIDTH 1
+#define	FRF_AB_XX_RESETD_SIG_LBN 23
+#define	FRF_AB_XX_RESETD_SIG_WIDTH 1
+#define	FRF_AB_XX_RESETC_SIG_LBN 22
+#define	FRF_AB_XX_RESETC_SIG_WIDTH 1
+#define	FRF_AB_XX_RESETB_SIG_LBN 21
+#define	FRF_AB_XX_RESETB_SIG_WIDTH 1
+#define	FRF_AB_XX_RESETA_SIG_LBN 20
+#define	FRF_AB_XX_RESETA_SIG_WIDTH 1
+#define	FRF_AB_XX_RSTXGXSRX_SIG_LBN 18
+#define	FRF_AB_XX_RSTXGXSRX_SIG_WIDTH 1
+#define	FRF_AB_XX_RSTXGXSTX_SIG_LBN 17
+#define	FRF_AB_XX_RSTXGXSTX_SIG_WIDTH 1
+#define	FRF_AB_XX_SD_RST_ACT_LBN 16
+#define	FRF_AB_XX_SD_RST_ACT_WIDTH 1
+#define	FRF_AB_XX_PWRDND_EN_LBN 15
+#define	FRF_AB_XX_PWRDND_EN_WIDTH 1
+#define	FRF_AB_XX_PWRDNC_EN_LBN 14
+#define	FRF_AB_XX_PWRDNC_EN_WIDTH 1
+#define	FRF_AB_XX_PWRDNB_EN_LBN 13
+#define	FRF_AB_XX_PWRDNB_EN_WIDTH 1
+#define	FRF_AB_XX_PWRDNA_EN_LBN 12
+#define	FRF_AB_XX_PWRDNA_EN_WIDTH 1
+#define	FRF_AB_XX_RSTPLLCD_EN_LBN 9
+#define	FRF_AB_XX_RSTPLLCD_EN_WIDTH 1
+#define	FRF_AB_XX_RSTPLLAB_EN_LBN 8
+#define	FRF_AB_XX_RSTPLLAB_EN_WIDTH 1
+#define	FRF_AB_XX_RESETD_EN_LBN 7
+#define	FRF_AB_XX_RESETD_EN_WIDTH 1
+#define	FRF_AB_XX_RESETC_EN_LBN 6
+#define	FRF_AB_XX_RESETC_EN_WIDTH 1
+#define	FRF_AB_XX_RESETB_EN_LBN 5
+#define	FRF_AB_XX_RESETB_EN_WIDTH 1
+#define	FRF_AB_XX_RESETA_EN_LBN 4
+#define	FRF_AB_XX_RESETA_EN_WIDTH 1
+#define	FRF_AB_XX_RSTXGXSRX_EN_LBN 2
+#define	FRF_AB_XX_RSTXGXSRX_EN_WIDTH 1
+#define	FRF_AB_XX_RSTXGXSTX_EN_LBN 1
+#define	FRF_AB_XX_RSTXGXSTX_EN_WIDTH 1
+#define	FRF_AB_XX_RST_XX_EN_LBN 0
+#define	FRF_AB_XX_RST_XX_EN_WIDTH 1
+
+/* XX_SD_CTL_REG: XGXS/XAUI powerdown/reset control register */
+#define	FR_AB_XX_SD_CTL 0x00001310
+#define	FRF_AB_XX_TERMADJ1_LBN 17
+#define	FRF_AB_XX_TERMADJ1_WIDTH 1
+#define	FRF_AB_XX_TERMADJ0_LBN 16
+#define	FRF_AB_XX_TERMADJ0_WIDTH 1
+#define	FRF_AB_XX_HIDRVD_LBN 15
+#define	FRF_AB_XX_HIDRVD_WIDTH 1
+#define	FRF_AB_XX_LODRVD_LBN 14
+#define	FRF_AB_XX_LODRVD_WIDTH 1
+#define	FRF_AB_XX_HIDRVC_LBN 13
+#define	FRF_AB_XX_HIDRVC_WIDTH 1
+#define	FRF_AB_XX_LODRVC_LBN 12
+#define	FRF_AB_XX_LODRVC_WIDTH 1
+#define	FRF_AB_XX_HIDRVB_LBN 11
+#define	FRF_AB_XX_HIDRVB_WIDTH 1
+#define	FRF_AB_XX_LODRVB_LBN 10
+#define	FRF_AB_XX_LODRVB_WIDTH 1
+#define	FRF_AB_XX_HIDRVA_LBN 9
+#define	FRF_AB_XX_HIDRVA_WIDTH 1
+#define	FRF_AB_XX_LODRVA_LBN 8
+#define	FRF_AB_XX_LODRVA_WIDTH 1
+#define	FRF_AB_XX_LPBKD_LBN 3
+#define	FRF_AB_XX_LPBKD_WIDTH 1
+#define	FRF_AB_XX_LPBKC_LBN 2
+#define	FRF_AB_XX_LPBKC_WIDTH 1
+#define	FRF_AB_XX_LPBKB_LBN 1
+#define	FRF_AB_XX_LPBKB_WIDTH 1
+#define	FRF_AB_XX_LPBKA_LBN 0
+#define	FRF_AB_XX_LPBKA_WIDTH 1
+
+/* XX_TXDRV_CTL_REG: XAUI SerDes transmit drive control register */
+#define	FR_AB_XX_TXDRV_CTL 0x00001320
+#define	FRF_AB_XX_DEQD_LBN 28
+#define	FRF_AB_XX_DEQD_WIDTH 4
+#define	FRF_AB_XX_DEQC_LBN 24
+#define	FRF_AB_XX_DEQC_WIDTH 4
+#define	FRF_AB_XX_DEQB_LBN 20
+#define	FRF_AB_XX_DEQB_WIDTH 4
+#define	FRF_AB_XX_DEQA_LBN 16
+#define	FRF_AB_XX_DEQA_WIDTH 4
+#define	FRF_AB_XX_DTXD_LBN 12
+#define	FRF_AB_XX_DTXD_WIDTH 4
+#define	FRF_AB_XX_DTXC_LBN 8
+#define	FRF_AB_XX_DTXC_WIDTH 4
+#define	FRF_AB_XX_DTXB_LBN 4
+#define	FRF_AB_XX_DTXB_WIDTH 4
+#define	FRF_AB_XX_DTXA_LBN 0
+#define	FRF_AB_XX_DTXA_WIDTH 4
+
+/* XX_PRBS_CTL_REG: documentation to be written for sum_XX_PRBS_CTL_REG */
+#define	FR_AB_XX_PRBS_CTL 0x00001330
+#define	FRF_AB_XX_CH3_RX_PRBS_SEL_LBN 30
+#define	FRF_AB_XX_CH3_RX_PRBS_SEL_WIDTH 2
+#define	FRF_AB_XX_CH3_RX_PRBS_INV_LBN 29
+#define	FRF_AB_XX_CH3_RX_PRBS_INV_WIDTH 1
+#define	FRF_AB_XX_CH3_RX_PRBS_CHKEN_LBN 28
+#define	FRF_AB_XX_CH3_RX_PRBS_CHKEN_WIDTH 1
+#define	FRF_AB_XX_CH2_RX_PRBS_SEL_LBN 26
+#define	FRF_AB_XX_CH2_RX_PRBS_SEL_WIDTH 2
+#define	FRF_AB_XX_CH2_RX_PRBS_INV_LBN 25
+#define	FRF_AB_XX_CH2_RX_PRBS_INV_WIDTH 1
+#define	FRF_AB_XX_CH2_RX_PRBS_CHKEN_LBN 24
+#define	FRF_AB_XX_CH2_RX_PRBS_CHKEN_WIDTH 1
+#define	FRF_AB_XX_CH1_RX_PRBS_SEL_LBN 22
+#define	FRF_AB_XX_CH1_RX_PRBS_SEL_WIDTH 2
+#define	FRF_AB_XX_CH1_RX_PRBS_INV_LBN 21
+#define	FRF_AB_XX_CH1_RX_PRBS_INV_WIDTH 1
+#define	FRF_AB_XX_CH1_RX_PRBS_CHKEN_LBN 20
+#define	FRF_AB_XX_CH1_RX_PRBS_CHKEN_WIDTH 1
+#define	FRF_AB_XX_CH0_RX_PRBS_SEL_LBN 18
+#define	FRF_AB_XX_CH0_RX_PRBS_SEL_WIDTH 2
+#define	FRF_AB_XX_CH0_RX_PRBS_INV_LBN 17
+#define	FRF_AB_XX_CH0_RX_PRBS_INV_WIDTH 1
+#define	FRF_AB_XX_CH0_RX_PRBS_CHKEN_LBN 16
+#define	FRF_AB_XX_CH0_RX_PRBS_CHKEN_WIDTH 1
+#define	FRF_AB_XX_CH3_TX_PRBS_SEL_LBN 14
+#define	FRF_AB_XX_CH3_TX_PRBS_SEL_WIDTH 2
+#define	FRF_AB_XX_CH3_TX_PRBS_INV_LBN 13
+#define	FRF_AB_XX_CH3_TX_PRBS_INV_WIDTH 1
+#define	FRF_AB_XX_CH3_TX_PRBS_CHKEN_LBN 12
+#define	FRF_AB_XX_CH3_TX_PRBS_CHKEN_WIDTH 1
+#define	FRF_AB_XX_CH2_TX_PRBS_SEL_LBN 10
+#define	FRF_AB_XX_CH2_TX_PRBS_SEL_WIDTH 2
+#define	FRF_AB_XX_CH2_TX_PRBS_INV_LBN 9
+#define	FRF_AB_XX_CH2_TX_PRBS_INV_WIDTH 1
+#define	FRF_AB_XX_CH2_TX_PRBS_CHKEN_LBN 8
+#define	FRF_AB_XX_CH2_TX_PRBS_CHKEN_WIDTH 1
+#define	FRF_AB_XX_CH1_TX_PRBS_SEL_LBN 6
+#define	FRF_AB_XX_CH1_TX_PRBS_SEL_WIDTH 2
+#define	FRF_AB_XX_CH1_TX_PRBS_INV_LBN 5
+#define	FRF_AB_XX_CH1_TX_PRBS_INV_WIDTH 1
+#define	FRF_AB_XX_CH1_TX_PRBS_CHKEN_LBN 4
+#define	FRF_AB_XX_CH1_TX_PRBS_CHKEN_WIDTH 1
+#define	FRF_AB_XX_CH0_TX_PRBS_SEL_LBN 2
+#define	FRF_AB_XX_CH0_TX_PRBS_SEL_WIDTH 2
+#define	FRF_AB_XX_CH0_TX_PRBS_INV_LBN 1
+#define	FRF_AB_XX_CH0_TX_PRBS_INV_WIDTH 1
+#define	FRF_AB_XX_CH0_TX_PRBS_CHKEN_LBN 0
+#define	FRF_AB_XX_CH0_TX_PRBS_CHKEN_WIDTH 1
+
+/* XX_PRBS_CHK_REG: documentation to be written for sum_XX_PRBS_CHK_REG */
+#define	FR_AB_XX_PRBS_CHK 0x00001340
+#define	FRF_AB_XX_REV_LB_EN_LBN 16
+#define	FRF_AB_XX_REV_LB_EN_WIDTH 1
+#define	FRF_AB_XX_CH3_DEG_DET_LBN 15
+#define	FRF_AB_XX_CH3_DEG_DET_WIDTH 1
+#define	FRF_AB_XX_CH3_LFSR_LOCK_IND_LBN 14
+#define	FRF_AB_XX_CH3_LFSR_LOCK_IND_WIDTH 1
+#define	FRF_AB_XX_CH3_PRBS_FRUN_LBN 13
+#define	FRF_AB_XX_CH3_PRBS_FRUN_WIDTH 1
+#define	FRF_AB_XX_CH3_ERR_CHK_LBN 12
+#define	FRF_AB_XX_CH3_ERR_CHK_WIDTH 1
+#define	FRF_AB_XX_CH2_DEG_DET_LBN 11
+#define	FRF_AB_XX_CH2_DEG_DET_WIDTH 1
+#define	FRF_AB_XX_CH2_LFSR_LOCK_IND_LBN 10
+#define	FRF_AB_XX_CH2_LFSR_LOCK_IND_WIDTH 1
+#define	FRF_AB_XX_CH2_PRBS_FRUN_LBN 9
+#define	FRF_AB_XX_CH2_PRBS_FRUN_WIDTH 1
+#define	FRF_AB_XX_CH2_ERR_CHK_LBN 8
+#define	FRF_AB_XX_CH2_ERR_CHK_WIDTH 1
+#define	FRF_AB_XX_CH1_DEG_DET_LBN 7
+#define	FRF_AB_XX_CH1_DEG_DET_WIDTH 1
+#define	FRF_AB_XX_CH1_LFSR_LOCK_IND_LBN 6
+#define	FRF_AB_XX_CH1_LFSR_LOCK_IND_WIDTH 1
+#define	FRF_AB_XX_CH1_PRBS_FRUN_LBN 5
+#define	FRF_AB_XX_CH1_PRBS_FRUN_WIDTH 1
+#define	FRF_AB_XX_CH1_ERR_CHK_LBN 4
+#define	FRF_AB_XX_CH1_ERR_CHK_WIDTH 1
+#define	FRF_AB_XX_CH0_DEG_DET_LBN 3
+#define	FRF_AB_XX_CH0_DEG_DET_WIDTH 1
+#define	FRF_AB_XX_CH0_LFSR_LOCK_IND_LBN 2
+#define	FRF_AB_XX_CH0_LFSR_LOCK_IND_WIDTH 1
+#define	FRF_AB_XX_CH0_PRBS_FRUN_LBN 1
+#define	FRF_AB_XX_CH0_PRBS_FRUN_WIDTH 1
+#define	FRF_AB_XX_CH0_ERR_CHK_LBN 0
+#define	FRF_AB_XX_CH0_ERR_CHK_WIDTH 1
+
+/* XX_PRBS_ERR_REG: documentation to be written for sum_XX_PRBS_ERR_REG */
+#define	FR_AB_XX_PRBS_ERR 0x00001350
+#define	FRF_AB_XX_CH3_PRBS_ERR_CNT_LBN 24
+#define	FRF_AB_XX_CH3_PRBS_ERR_CNT_WIDTH 8
+#define	FRF_AB_XX_CH2_PRBS_ERR_CNT_LBN 16
+#define	FRF_AB_XX_CH2_PRBS_ERR_CNT_WIDTH 8
+#define	FRF_AB_XX_CH1_PRBS_ERR_CNT_LBN 8
+#define	FRF_AB_XX_CH1_PRBS_ERR_CNT_WIDTH 8
+#define	FRF_AB_XX_CH0_PRBS_ERR_CNT_LBN 0
+#define	FRF_AB_XX_CH0_PRBS_ERR_CNT_WIDTH 8
+
+/* XX_CORE_STAT_REG: XAUI XGXS core status register */
+#define	FR_AB_XX_CORE_STAT 0x00001360
+#define	FRF_AB_XX_FORCE_SIG3_LBN 31
+#define	FRF_AB_XX_FORCE_SIG3_WIDTH 1
+#define	FRF_AB_XX_FORCE_SIG3_VAL_LBN 30
+#define	FRF_AB_XX_FORCE_SIG3_VAL_WIDTH 1
+#define	FRF_AB_XX_FORCE_SIG2_LBN 29
+#define	FRF_AB_XX_FORCE_SIG2_WIDTH 1
+#define	FRF_AB_XX_FORCE_SIG2_VAL_LBN 28
+#define	FRF_AB_XX_FORCE_SIG2_VAL_WIDTH 1
+#define	FRF_AB_XX_FORCE_SIG1_LBN 27
+#define	FRF_AB_XX_FORCE_SIG1_WIDTH 1
+#define	FRF_AB_XX_FORCE_SIG1_VAL_LBN 26
+#define	FRF_AB_XX_FORCE_SIG1_VAL_WIDTH 1
+#define	FRF_AB_XX_FORCE_SIG0_LBN 25
+#define	FRF_AB_XX_FORCE_SIG0_WIDTH 1
+#define	FRF_AB_XX_FORCE_SIG0_VAL_LBN 24
+#define	FRF_AB_XX_FORCE_SIG0_VAL_WIDTH 1
+#define	FRF_AB_XX_XGXS_LB_EN_LBN 23
+#define	FRF_AB_XX_XGXS_LB_EN_WIDTH 1
+#define	FRF_AB_XX_XGMII_LB_EN_LBN 22
+#define	FRF_AB_XX_XGMII_LB_EN_WIDTH 1
+#define	FRF_AB_XX_MATCH_FAULT_LBN 21
+#define	FRF_AB_XX_MATCH_FAULT_WIDTH 1
+#define	FRF_AB_XX_ALIGN_DONE_LBN 20
+#define	FRF_AB_XX_ALIGN_DONE_WIDTH 1
+#define	FRF_AB_XX_SYNC_STAT3_LBN 19
+#define	FRF_AB_XX_SYNC_STAT3_WIDTH 1
+#define	FRF_AB_XX_SYNC_STAT2_LBN 18
+#define	FRF_AB_XX_SYNC_STAT2_WIDTH 1
+#define	FRF_AB_XX_SYNC_STAT1_LBN 17
+#define	FRF_AB_XX_SYNC_STAT1_WIDTH 1
+#define	FRF_AB_XX_SYNC_STAT0_LBN 16
+#define	FRF_AB_XX_SYNC_STAT0_WIDTH 1
+#define	FRF_AB_XX_COMMA_DET_CH3_LBN 15
+#define	FRF_AB_XX_COMMA_DET_CH3_WIDTH 1
+#define	FRF_AB_XX_COMMA_DET_CH2_LBN 14
+#define	FRF_AB_XX_COMMA_DET_CH2_WIDTH 1
+#define	FRF_AB_XX_COMMA_DET_CH1_LBN 13
+#define	FRF_AB_XX_COMMA_DET_CH1_WIDTH 1
+#define	FRF_AB_XX_COMMA_DET_CH0_LBN 12
+#define	FRF_AB_XX_COMMA_DET_CH0_WIDTH 1
+#define	FRF_AB_XX_CGRP_ALIGN_CH3_LBN 11
+#define	FRF_AB_XX_CGRP_ALIGN_CH3_WIDTH 1
+#define	FRF_AB_XX_CGRP_ALIGN_CH2_LBN 10
+#define	FRF_AB_XX_CGRP_ALIGN_CH2_WIDTH 1
+#define	FRF_AB_XX_CGRP_ALIGN_CH1_LBN 9
+#define	FRF_AB_XX_CGRP_ALIGN_CH1_WIDTH 1
+#define	FRF_AB_XX_CGRP_ALIGN_CH0_LBN 8
+#define	FRF_AB_XX_CGRP_ALIGN_CH0_WIDTH 1
+#define	FRF_AB_XX_CHAR_ERR_CH3_LBN 7
+#define	FRF_AB_XX_CHAR_ERR_CH3_WIDTH 1
+#define	FRF_AB_XX_CHAR_ERR_CH2_LBN 6
+#define	FRF_AB_XX_CHAR_ERR_CH2_WIDTH 1
+#define	FRF_AB_XX_CHAR_ERR_CH1_LBN 5
+#define	FRF_AB_XX_CHAR_ERR_CH1_WIDTH 1
+#define	FRF_AB_XX_CHAR_ERR_CH0_LBN 4
+#define	FRF_AB_XX_CHAR_ERR_CH0_WIDTH 1
+#define	FRF_AB_XX_DISPERR_CH3_LBN 3
+#define	FRF_AB_XX_DISPERR_CH3_WIDTH 1
+#define	FRF_AB_XX_DISPERR_CH2_LBN 2
+#define	FRF_AB_XX_DISPERR_CH2_WIDTH 1
+#define	FRF_AB_XX_DISPERR_CH1_LBN 1
+#define	FRF_AB_XX_DISPERR_CH1_WIDTH 1
+#define	FRF_AB_XX_DISPERR_CH0_LBN 0
+#define	FRF_AB_XX_DISPERR_CH0_WIDTH 1
+
+/* RX_DESC_PTR_TBL_KER: Receive descriptor pointer table */
+#define	FR_AA_RX_DESC_PTR_TBL_KER 0x00011800
+#define	FR_AA_RX_DESC_PTR_TBL_KER_STEP 16
+#define	FR_AA_RX_DESC_PTR_TBL_KER_ROWS 4
+/* RX_DESC_PTR_TBL: Receive descriptor pointer table */
+#define	FR_BZ_RX_DESC_PTR_TBL 0x00f40000
+#define	FR_BZ_RX_DESC_PTR_TBL_STEP 16
+#define	FR_BB_RX_DESC_PTR_TBL_ROWS 4096
+#define	FR_CZ_RX_DESC_PTR_TBL_ROWS 1024
+#define	FRF_CZ_RX_HDR_SPLIT_LBN 90
+#define	FRF_CZ_RX_HDR_SPLIT_WIDTH 1
+#define	FRF_AA_RX_RESET_LBN 89
+#define	FRF_AA_RX_RESET_WIDTH 1
+#define	FRF_AZ_RX_ISCSI_DDIG_EN_LBN 88
+#define	FRF_AZ_RX_ISCSI_DDIG_EN_WIDTH 1
+#define	FRF_AZ_RX_ISCSI_HDIG_EN_LBN 87
+#define	FRF_AZ_RX_ISCSI_HDIG_EN_WIDTH 1
+#define	FRF_AZ_RX_DESC_PREF_ACT_LBN 86
+#define	FRF_AZ_RX_DESC_PREF_ACT_WIDTH 1
+#define	FRF_AZ_RX_DC_HW_RPTR_LBN 80
+#define	FRF_AZ_RX_DC_HW_RPTR_WIDTH 6
+#define	FRF_AZ_RX_DESCQ_HW_RPTR_LBN 68
+#define	FRF_AZ_RX_DESCQ_HW_RPTR_WIDTH 12
+#define	FRF_AZ_RX_DESCQ_SW_WPTR_LBN 56
+#define	FRF_AZ_RX_DESCQ_SW_WPTR_WIDTH 12
+#define	FRF_AZ_RX_DESCQ_BUF_BASE_ID_LBN 36
+#define	FRF_AZ_RX_DESCQ_BUF_BASE_ID_WIDTH 20
+#define	FRF_AZ_RX_DESCQ_EVQ_ID_LBN 24
+#define	FRF_AZ_RX_DESCQ_EVQ_ID_WIDTH 12
+#define	FRF_AZ_RX_DESCQ_OWNER_ID_LBN 10
+#define	FRF_AZ_RX_DESCQ_OWNER_ID_WIDTH 14
+#define	FRF_AZ_RX_DESCQ_LABEL_LBN 5
+#define	FRF_AZ_RX_DESCQ_LABEL_WIDTH 5
+#define	FRF_AZ_RX_DESCQ_SIZE_LBN 3
+#define	FRF_AZ_RX_DESCQ_SIZE_WIDTH 2
+#define	FFE_AZ_RX_DESCQ_SIZE_4K 3
+#define	FFE_AZ_RX_DESCQ_SIZE_2K 2
+#define	FFE_AZ_RX_DESCQ_SIZE_1K 1
+#define	FFE_AZ_RX_DESCQ_SIZE_512 0
+#define	FRF_AZ_RX_DESCQ_TYPE_LBN 2
+#define	FRF_AZ_RX_DESCQ_TYPE_WIDTH 1
+#define	FRF_AZ_RX_DESCQ_JUMBO_LBN 1
+#define	FRF_AZ_RX_DESCQ_JUMBO_WIDTH 1
+#define	FRF_AZ_RX_DESCQ_EN_LBN 0
+#define	FRF_AZ_RX_DESCQ_EN_WIDTH 1
+
+/* TX_DESC_PTR_TBL_KER: Transmit descriptor pointer */
+#define	FR_AA_TX_DESC_PTR_TBL_KER 0x00011900
+#define	FR_AA_TX_DESC_PTR_TBL_KER_STEP 16
+#define	FR_AA_TX_DESC_PTR_TBL_KER_ROWS 8
+/* TX_DESC_PTR_TBL: Transmit descriptor pointer */
+#define	FR_BZ_TX_DESC_PTR_TBL 0x00f50000
+#define	FR_BZ_TX_DESC_PTR_TBL_STEP 16
+#define	FR_BB_TX_DESC_PTR_TBL_ROWS 4096
+#define	FR_CZ_TX_DESC_PTR_TBL_ROWS 1024
+#define	FRF_CZ_TX_DPT_Q_MASK_WIDTH_LBN 94
+#define	FRF_CZ_TX_DPT_Q_MASK_WIDTH_WIDTH 2
+#define	FRF_CZ_TX_DPT_ETH_FILT_EN_LBN 93
+#define	FRF_CZ_TX_DPT_ETH_FILT_EN_WIDTH 1
+#define	FRF_CZ_TX_DPT_IP_FILT_EN_LBN 92
+#define	FRF_CZ_TX_DPT_IP_FILT_EN_WIDTH 1
+#define	FRF_BZ_TX_NON_IP_DROP_DIS_LBN 91
+#define	FRF_BZ_TX_NON_IP_DROP_DIS_WIDTH 1
+#define	FRF_BZ_TX_IP_CHKSM_DIS_LBN 90
+#define	FRF_BZ_TX_IP_CHKSM_DIS_WIDTH 1
+#define	FRF_BZ_TX_TCP_CHKSM_DIS_LBN 89
+#define	FRF_BZ_TX_TCP_CHKSM_DIS_WIDTH 1
+#define	FRF_AZ_TX_DESCQ_EN_LBN 88
+#define	FRF_AZ_TX_DESCQ_EN_WIDTH 1
+#define	FRF_AZ_TX_ISCSI_DDIG_EN_LBN 87
+#define	FRF_AZ_TX_ISCSI_DDIG_EN_WIDTH 1
+#define	FRF_AZ_TX_ISCSI_HDIG_EN_LBN 86
+#define	FRF_AZ_TX_ISCSI_HDIG_EN_WIDTH 1
+#define	FRF_AZ_TX_DC_HW_RPTR_LBN 80
+#define	FRF_AZ_TX_DC_HW_RPTR_WIDTH 6
+#define	FRF_AZ_TX_DESCQ_HW_RPTR_LBN 68
+#define	FRF_AZ_TX_DESCQ_HW_RPTR_WIDTH 12
+#define	FRF_AZ_TX_DESCQ_SW_WPTR_LBN 56
+#define	FRF_AZ_TX_DESCQ_SW_WPTR_WIDTH 12
+#define	FRF_AZ_TX_DESCQ_BUF_BASE_ID_LBN 36
+#define	FRF_AZ_TX_DESCQ_BUF_BASE_ID_WIDTH 20
+#define	FRF_AZ_TX_DESCQ_EVQ_ID_LBN 24
+#define	FRF_AZ_TX_DESCQ_EVQ_ID_WIDTH 12
+#define	FRF_AZ_TX_DESCQ_OWNER_ID_LBN 10
+#define	FRF_AZ_TX_DESCQ_OWNER_ID_WIDTH 14
+#define	FRF_AZ_TX_DESCQ_LABEL_LBN 5
+#define	FRF_AZ_TX_DESCQ_LABEL_WIDTH 5
+#define	FRF_AZ_TX_DESCQ_SIZE_LBN 3
+#define	FRF_AZ_TX_DESCQ_SIZE_WIDTH 2
+#define	FFE_AZ_TX_DESCQ_SIZE_4K 3
+#define	FFE_AZ_TX_DESCQ_SIZE_2K 2
+#define	FFE_AZ_TX_DESCQ_SIZE_1K 1
+#define	FFE_AZ_TX_DESCQ_SIZE_512 0
+#define	FRF_AZ_TX_DESCQ_TYPE_LBN 1
+#define	FRF_AZ_TX_DESCQ_TYPE_WIDTH 2
+#define	FRF_AZ_TX_DESCQ_FLUSH_LBN 0
+#define	FRF_AZ_TX_DESCQ_FLUSH_WIDTH 1
+
+/* EVQ_PTR_TBL_KER: Event queue pointer table */
+#define	FR_AA_EVQ_PTR_TBL_KER 0x00011a00
+#define	FR_AA_EVQ_PTR_TBL_KER_STEP 16
+#define	FR_AA_EVQ_PTR_TBL_KER_ROWS 4
+/* EVQ_PTR_TBL: Event queue pointer table */
+#define	FR_BZ_EVQ_PTR_TBL 0x00f60000
+#define	FR_BZ_EVQ_PTR_TBL_STEP 16
+#define	FR_CZ_EVQ_PTR_TBL_ROWS 1024
+#define	FR_BB_EVQ_PTR_TBL_ROWS 4096
+#define	FRF_BZ_EVQ_RPTR_IGN_LBN 40
+#define	FRF_BZ_EVQ_RPTR_IGN_WIDTH 1
+#define	FRF_AB_EVQ_WKUP_OR_INT_EN_LBN 39
+#define	FRF_AB_EVQ_WKUP_OR_INT_EN_WIDTH 1
+#define	FRF_CZ_EVQ_DOS_PROTECT_EN_LBN 39
+#define	FRF_CZ_EVQ_DOS_PROTECT_EN_WIDTH 1
+#define	FRF_AZ_EVQ_NXT_WPTR_LBN 24
+#define	FRF_AZ_EVQ_NXT_WPTR_WIDTH 15
+#define	FRF_AZ_EVQ_EN_LBN 23
+#define	FRF_AZ_EVQ_EN_WIDTH 1
+#define	FRF_AZ_EVQ_SIZE_LBN 20
+#define	FRF_AZ_EVQ_SIZE_WIDTH 3
+#define	FFE_AZ_EVQ_SIZE_32K 6
+#define	FFE_AZ_EVQ_SIZE_16K 5
+#define	FFE_AZ_EVQ_SIZE_8K 4
+#define	FFE_AZ_EVQ_SIZE_4K 3
+#define	FFE_AZ_EVQ_SIZE_2K 2
+#define	FFE_AZ_EVQ_SIZE_1K 1
+#define	FFE_AZ_EVQ_SIZE_512 0
+#define	FRF_AZ_EVQ_BUF_BASE_ID_LBN 0
+#define	FRF_AZ_EVQ_BUF_BASE_ID_WIDTH 20
+
+/* BUF_HALF_TBL_KER: Buffer table in half buffer table mode direct access by driver */
+#define	FR_AA_BUF_HALF_TBL_KER 0x00018000
+#define	FR_AA_BUF_HALF_TBL_KER_STEP 8
+#define	FR_AA_BUF_HALF_TBL_KER_ROWS 4096
+/* BUF_HALF_TBL: Buffer table in half buffer table mode direct access by driver */
+#define	FR_BZ_BUF_HALF_TBL 0x00800000
+#define	FR_BZ_BUF_HALF_TBL_STEP 8
+#define	FR_CZ_BUF_HALF_TBL_ROWS 147456
+#define	FR_BB_BUF_HALF_TBL_ROWS 524288
+#define	FRF_AZ_BUF_ADR_HBUF_ODD_LBN 44
+#define	FRF_AZ_BUF_ADR_HBUF_ODD_WIDTH 20
+#define	FRF_AZ_BUF_OWNER_ID_HBUF_ODD_LBN 32
+#define	FRF_AZ_BUF_OWNER_ID_HBUF_ODD_WIDTH 12
+#define	FRF_AZ_BUF_ADR_HBUF_EVEN_LBN 12
+#define	FRF_AZ_BUF_ADR_HBUF_EVEN_WIDTH 20
+#define	FRF_AZ_BUF_OWNER_ID_HBUF_EVEN_LBN 0
+#define	FRF_AZ_BUF_OWNER_ID_HBUF_EVEN_WIDTH 12
+
+/* BUF_FULL_TBL_KER: Buffer table in full buffer table mode direct access by driver */
+#define	FR_AA_BUF_FULL_TBL_KER 0x00018000
+#define	FR_AA_BUF_FULL_TBL_KER_STEP 8
+#define	FR_AA_BUF_FULL_TBL_KER_ROWS 4096
+/* BUF_FULL_TBL: Buffer table in full buffer table mode direct access by driver */
+#define	FR_BZ_BUF_FULL_TBL 0x00800000
+#define	FR_BZ_BUF_FULL_TBL_STEP 8
+#define	FR_CZ_BUF_FULL_TBL_ROWS 147456
+#define	FR_BB_BUF_FULL_TBL_ROWS 917504
+#define	FRF_AZ_BUF_FULL_UNUSED_LBN 51
+#define	FRF_AZ_BUF_FULL_UNUSED_WIDTH 13
+#define	FRF_AZ_IP_DAT_BUF_SIZE_LBN 50
+#define	FRF_AZ_IP_DAT_BUF_SIZE_WIDTH 1
+#define	FRF_AZ_BUF_ADR_REGION_LBN 48
+#define	FRF_AZ_BUF_ADR_REGION_WIDTH 2
+#define	FFE_AZ_BUF_ADR_REGN3 3
+#define	FFE_AZ_BUF_ADR_REGN2 2
+#define	FFE_AZ_BUF_ADR_REGN1 1
+#define	FFE_AZ_BUF_ADR_REGN0 0
+#define	FRF_AZ_BUF_ADR_FBUF_LBN 14
+#define	FRF_AZ_BUF_ADR_FBUF_WIDTH 34
+#define	FRF_AZ_BUF_OWNER_ID_FBUF_LBN 0
+#define	FRF_AZ_BUF_OWNER_ID_FBUF_WIDTH 14
+
+/* RX_FILTER_TBL0: TCP/IPv4 Receive filter table */
+#define	FR_BZ_RX_FILTER_TBL0 0x00f00000
+#define	FR_BZ_RX_FILTER_TBL0_STEP 32
+#define	FR_BZ_RX_FILTER_TBL0_ROWS 8192
+/* RX_FILTER_TBL1: TCP/IPv4 Receive filter table */
+#define	FR_BB_RX_FILTER_TBL1 0x00f00010
+#define	FR_BB_RX_FILTER_TBL1_STEP 32
+#define	FR_BB_RX_FILTER_TBL1_ROWS 8192
+#define	FRF_BZ_RSS_EN_LBN 110
+#define	FRF_BZ_RSS_EN_WIDTH 1
+#define	FRF_BZ_SCATTER_EN_LBN 109
+#define	FRF_BZ_SCATTER_EN_WIDTH 1
+#define	FRF_BZ_TCP_UDP_LBN 108
+#define	FRF_BZ_TCP_UDP_WIDTH 1
+#define	FRF_BZ_RXQ_ID_LBN 96
+#define	FRF_BZ_RXQ_ID_WIDTH 12
+#define	FRF_BZ_DEST_IP_LBN 64
+#define	FRF_BZ_DEST_IP_WIDTH 32
+#define	FRF_BZ_DEST_PORT_TCP_LBN 48
+#define	FRF_BZ_DEST_PORT_TCP_WIDTH 16
+#define	FRF_BZ_SRC_IP_LBN 16
+#define	FRF_BZ_SRC_IP_WIDTH 32
+#define	FRF_BZ_SRC_TCP_DEST_UDP_LBN 0
+#define	FRF_BZ_SRC_TCP_DEST_UDP_WIDTH 16
+
+/* RX_MAC_FILTER_TBL0: Receive Ethernet filter table */
+#define	FR_CZ_RX_MAC_FILTER_TBL0 0x00f00010
+#define	FR_CZ_RX_MAC_FILTER_TBL0_STEP 32
+#define	FR_CZ_RX_MAC_FILTER_TBL0_ROWS 512
+#define	FRF_CZ_RMFT_RSS_EN_LBN 75
+#define	FRF_CZ_RMFT_RSS_EN_WIDTH 1
+#define	FRF_CZ_RMFT_SCATTER_EN_LBN 74
+#define	FRF_CZ_RMFT_SCATTER_EN_WIDTH 1
+#define	FRF_CZ_RMFT_IP_OVERRIDE_LBN 73
+#define	FRF_CZ_RMFT_IP_OVERRIDE_WIDTH 1
+#define	FRF_CZ_RMFT_RXQ_ID_LBN 61
+#define	FRF_CZ_RMFT_RXQ_ID_WIDTH 12
+#define	FRF_CZ_RMFT_WILDCARD_MATCH_LBN 60
+#define	FRF_CZ_RMFT_WILDCARD_MATCH_WIDTH 1
+#define	FRF_CZ_RMFT_DEST_MAC_LBN 12
+#define	FRF_CZ_RMFT_DEST_MAC_WIDTH 48
+#define	FRF_CZ_RMFT_VLAN_ID_LBN 0
+#define	FRF_CZ_RMFT_VLAN_ID_WIDTH 12
+
+/* TIMER_TBL: Timer table */
+#define	FR_BZ_TIMER_TBL 0x00f70000
+#define	FR_BZ_TIMER_TBL_STEP 16
+#define	FR_CZ_TIMER_TBL_ROWS 1024
+#define	FR_BB_TIMER_TBL_ROWS 4096
+#define	FRF_CZ_TIMER_Q_EN_LBN 33
+#define	FRF_CZ_TIMER_Q_EN_WIDTH 1
+#define	FRF_CZ_INT_ARMD_LBN 32
+#define	FRF_CZ_INT_ARMD_WIDTH 1
+#define	FRF_CZ_INT_PEND_LBN 31
+#define	FRF_CZ_INT_PEND_WIDTH 1
+#define	FRF_CZ_HOST_NOTIFY_MODE_LBN 30
+#define	FRF_CZ_HOST_NOTIFY_MODE_WIDTH 1
+#define	FRF_CZ_RELOAD_TIMER_VAL_LBN 16
+#define	FRF_CZ_RELOAD_TIMER_VAL_WIDTH 14
+#define	FRF_CZ_TIMER_MODE_LBN 14
+#define	FRF_CZ_TIMER_MODE_WIDTH 2
+#define	FFE_CZ_TIMER_MODE_INT_HLDOFF 3
+#define	FFE_CZ_TIMER_MODE_TRIG_START 2
+#define	FFE_CZ_TIMER_MODE_IMMED_START 1
+#define	FFE_CZ_TIMER_MODE_DIS 0
+#define	FRF_BB_TIMER_MODE_LBN 12
+#define	FRF_BB_TIMER_MODE_WIDTH 2
+#define	FFE_BB_TIMER_MODE_INT_HLDOFF 2
+#define	FFE_BB_TIMER_MODE_TRIG_START 2
+#define	FFE_BB_TIMER_MODE_IMMED_START 1
+#define	FFE_BB_TIMER_MODE_DIS 0
+#define	FRF_CZ_TIMER_VAL_LBN 0
+#define	FRF_CZ_TIMER_VAL_WIDTH 14
+#define	FRF_BB_TIMER_VAL_LBN 0
+#define	FRF_BB_TIMER_VAL_WIDTH 12
+
+/* TX_PACE_TBL: Transmit pacing table */
+#define	FR_BZ_TX_PACE_TBL 0x00f80000
+#define	FR_BZ_TX_PACE_TBL_STEP 16
+#define	FR_CZ_TX_PACE_TBL_ROWS 1024
+#define	FR_BB_TX_PACE_TBL_ROWS 4096
+#define	FRF_BZ_TX_PACE_LBN 0
+#define	FRF_BZ_TX_PACE_WIDTH 5
+
+/* RX_INDIRECTION_TBL: RX Indirection Table */
+#define	FR_BZ_RX_INDIRECTION_TBL 0x00fb0000
+#define	FR_BZ_RX_INDIRECTION_TBL_STEP 16
+#define	FR_BZ_RX_INDIRECTION_TBL_ROWS 128
+#define	FRF_BZ_IT_QUEUE_LBN 0
+#define	FRF_BZ_IT_QUEUE_WIDTH 6
+
+/* TX_FILTER_TBL0: TCP/IPv4 Transmit filter table */
+#define	FR_CZ_TX_FILTER_TBL0 0x00fc0000
+#define	FR_CZ_TX_FILTER_TBL0_STEP 16
+#define	FR_CZ_TX_FILTER_TBL0_ROWS 8192
+#define	FRF_CZ_TIFT_TCP_UDP_LBN 108
+#define	FRF_CZ_TIFT_TCP_UDP_WIDTH 1
+#define	FRF_CZ_TIFT_TXQ_ID_LBN 96
+#define	FRF_CZ_TIFT_TXQ_ID_WIDTH 12
+#define	FRF_CZ_TIFT_DEST_IP_LBN 64
+#define	FRF_CZ_TIFT_DEST_IP_WIDTH 32
+#define	FRF_CZ_TIFT_DEST_PORT_TCP_LBN 48
+#define	FRF_CZ_TIFT_DEST_PORT_TCP_WIDTH 16
+#define	FRF_CZ_TIFT_SRC_IP_LBN 16
+#define	FRF_CZ_TIFT_SRC_IP_WIDTH 32
+#define	FRF_CZ_TIFT_SRC_TCP_DEST_UDP_LBN 0
+#define	FRF_CZ_TIFT_SRC_TCP_DEST_UDP_WIDTH 16
+
+/* TX_MAC_FILTER_TBL0: Transmit Ethernet filter table */
+#define	FR_CZ_TX_MAC_FILTER_TBL0 0x00fe0000
+#define	FR_CZ_TX_MAC_FILTER_TBL0_STEP 16
+#define	FR_CZ_TX_MAC_FILTER_TBL0_ROWS 512
+#define	FRF_CZ_TMFT_TXQ_ID_LBN 61
+#define	FRF_CZ_TMFT_TXQ_ID_WIDTH 12
+#define	FRF_CZ_TMFT_WILDCARD_MATCH_LBN 60
+#define	FRF_CZ_TMFT_WILDCARD_MATCH_WIDTH 1
+#define	FRF_CZ_TMFT_SRC_MAC_LBN 12
+#define	FRF_CZ_TMFT_SRC_MAC_WIDTH 48
+#define	FRF_CZ_TMFT_VLAN_ID_LBN 0
+#define	FRF_CZ_TMFT_VLAN_ID_WIDTH 12
+
+/* MC_TREG_SMEM: MC Shared Memory */
+#define	FR_CZ_MC_TREG_SMEM 0x00ff0000
+#define	FR_CZ_MC_TREG_SMEM_STEP 4
+#define	FR_CZ_MC_TREG_SMEM_ROWS 512
+#define	FRF_CZ_MC_TREG_SMEM_ROW_LBN 0
+#define	FRF_CZ_MC_TREG_SMEM_ROW_WIDTH 32
+
+/* MSIX_VECTOR_TABLE: MSIX Vector Table */
+#define	FR_BB_MSIX_VECTOR_TABLE 0x00ff0000
+#define	FR_BZ_MSIX_VECTOR_TABLE_STEP 16
+#define	FR_BB_MSIX_VECTOR_TABLE_ROWS 64
+/* MSIX_VECTOR_TABLE: MSIX Vector Table */
+#define	FR_CZ_MSIX_VECTOR_TABLE 0x00000000
+/* FR_BZ_MSIX_VECTOR_TABLE_STEP 16 */
+#define	FR_CZ_MSIX_VECTOR_TABLE_ROWS 1024
+#define	FRF_BZ_MSIX_VECTOR_RESERVED_LBN 97
+#define	FRF_BZ_MSIX_VECTOR_RESERVED_WIDTH 31
+#define	FRF_BZ_MSIX_VECTOR_MASK_LBN 96
+#define	FRF_BZ_MSIX_VECTOR_MASK_WIDTH 1
+#define	FRF_BZ_MSIX_MESSAGE_DATA_LBN 64
+#define	FRF_BZ_MSIX_MESSAGE_DATA_WIDTH 32
+#define	FRF_BZ_MSIX_MESSAGE_ADDRESS_HI_LBN 32
+#define	FRF_BZ_MSIX_MESSAGE_ADDRESS_HI_WIDTH 32
+#define	FRF_BZ_MSIX_MESSAGE_ADDRESS_LO_LBN 0
+#define	FRF_BZ_MSIX_MESSAGE_ADDRESS_LO_WIDTH 32
+
+/* MSIX_PBA_TABLE: MSIX Pending Bit Array */
+#define	FR_BB_MSIX_PBA_TABLE 0x00ff2000
+#define	FR_BZ_MSIX_PBA_TABLE_STEP 4
+#define	FR_BB_MSIX_PBA_TABLE_ROWS 2
+/* MSIX_PBA_TABLE: MSIX Pending Bit Array */
+#define	FR_CZ_MSIX_PBA_TABLE 0x00008000
+/* FR_BZ_MSIX_PBA_TABLE_STEP 4 */
+#define	FR_CZ_MSIX_PBA_TABLE_ROWS 32
+#define	FRF_BZ_MSIX_PBA_PEND_DWORD_LBN 0
+#define	FRF_BZ_MSIX_PBA_PEND_DWORD_WIDTH 32
+
+/* SRM_DBG_REG: SRAM debug access */
+#define	FR_BZ_SRM_DBG 0x03000000
+#define	FR_BZ_SRM_DBG_STEP 8
+#define	FR_CZ_SRM_DBG_ROWS 262144
+#define	FR_BB_SRM_DBG_ROWS 2097152
+#define	FRF_BZ_SRM_DBG_LBN 0
+#define	FRF_BZ_SRM_DBG_WIDTH 64
+
+/* TB_MSIX_PBA_TABLE: MSIX Pending Bit Array */
+#define	FR_CZ_TB_MSIX_PBA_TABLE 0x00008000
+#define	FR_CZ_TB_MSIX_PBA_TABLE_STEP 4
+#define	FR_CZ_TB_MSIX_PBA_TABLE_ROWS 1024
+#define	FRF_CZ_TB_MSIX_PBA_PEND_DWORD_LBN 0
+#define	FRF_CZ_TB_MSIX_PBA_PEND_DWORD_WIDTH 32
+
+/* DRIVER_EV */
+#define	FSF_AZ_DRIVER_EV_SUBCODE_LBN 56
+#define	FSF_AZ_DRIVER_EV_SUBCODE_WIDTH 4
+#define	FSE_BZ_TX_DSC_ERROR_EV 15
+#define	FSE_BZ_RX_DSC_ERROR_EV 14
+#define	FSE_AA_RX_RECOVER_EV 11
+#define	FSE_AZ_TIMER_EV 10
+#define	FSE_AZ_TX_PKT_NON_TCP_UDP 9
+#define	FSE_AZ_WAKE_UP_EV 6
+#define	FSE_AZ_SRM_UPD_DONE_EV 5
+#define	FSE_AB_EVQ_NOT_EN_EV 3
+#define	FSE_AZ_EVQ_INIT_DONE_EV 2
+#define	FSE_AZ_RX_DESCQ_FLS_DONE_EV 1
+#define	FSE_AZ_TX_DESCQ_FLS_DONE_EV 0
+#define	FSF_AZ_DRIVER_EV_SUBDATA_LBN 0
+#define	FSF_AZ_DRIVER_EV_SUBDATA_WIDTH 14
+
+/* EVENT_ENTRY */
+#define	FSF_AZ_EV_CODE_LBN 60
+#define	FSF_AZ_EV_CODE_WIDTH 4
+#define	FSE_CZ_EV_CODE_MCDI_EV 12
+#define	FSE_CZ_EV_CODE_USER_EV 8
+#define	FSE_AZ_EV_CODE_DRV_GEN_EV 7
+#define	FSE_AZ_EV_CODE_GLOBAL_EV 6
+#define	FSE_AZ_EV_CODE_DRIVER_EV 5
+#define	FSE_AZ_EV_CODE_TX_EV 2
+#define	FSE_AZ_EV_CODE_RX_EV 0
+#define	FSF_AZ_EV_DATA_LBN 0
+#define	FSF_AZ_EV_DATA_WIDTH 60
+
+/* GLOBAL_EV */
+#define	FSF_BB_GLB_EV_RX_RECOVERY_LBN 12
+#define	FSF_BB_GLB_EV_RX_RECOVERY_WIDTH 1
+#define	FSF_AA_GLB_EV_RX_RECOVERY_LBN 11
+#define	FSF_AA_GLB_EV_RX_RECOVERY_WIDTH 1
+#define	FSF_BB_GLB_EV_XG_MGT_INTR_LBN 11
+#define	FSF_BB_GLB_EV_XG_MGT_INTR_WIDTH 1
+#define	FSF_AB_GLB_EV_XFP_PHY0_INTR_LBN 10
+#define	FSF_AB_GLB_EV_XFP_PHY0_INTR_WIDTH 1
+#define	FSF_AB_GLB_EV_XG_PHY0_INTR_LBN 9
+#define	FSF_AB_GLB_EV_XG_PHY0_INTR_WIDTH 1
+#define	FSF_AB_GLB_EV_G_PHY0_INTR_LBN 7
+#define	FSF_AB_GLB_EV_G_PHY0_INTR_WIDTH 1
+
+/* LEGACY_INT_VEC */
+#define	FSF_AZ_NET_IVEC_FATAL_INT_LBN 64
+#define	FSF_AZ_NET_IVEC_FATAL_INT_WIDTH 1
+#define	FSF_AZ_NET_IVEC_INT_Q_LBN 40
+#define	FSF_AZ_NET_IVEC_INT_Q_WIDTH 4
+#define	FSF_AZ_NET_IVEC_INT_FLAG_LBN 32
+#define	FSF_AZ_NET_IVEC_INT_FLAG_WIDTH 1
+#define	FSF_AZ_NET_IVEC_EVQ_FIFO_HF_LBN 1
+#define	FSF_AZ_NET_IVEC_EVQ_FIFO_HF_WIDTH 1
+#define	FSF_AZ_NET_IVEC_EVQ_FIFO_AF_LBN 0
+#define	FSF_AZ_NET_IVEC_EVQ_FIFO_AF_WIDTH 1
+
+/* MC_XGMAC_FLTR_RULE_DEF */
+#define	FSF_CZ_MC_XFRC_MODE_LBN 416
+#define	FSF_CZ_MC_XFRC_MODE_WIDTH 1
+#define	FSE_CZ_MC_XFRC_MODE_LAYERED 1
+#define	FSE_CZ_MC_XFRC_MODE_SIMPLE 0
+#define	FSF_CZ_MC_XFRC_HASH_LBN 384
+#define	FSF_CZ_MC_XFRC_HASH_WIDTH 32
+#define	FSF_CZ_MC_XFRC_LAYER4_BYTE_MASK_LBN 256
+#define	FSF_CZ_MC_XFRC_LAYER4_BYTE_MASK_WIDTH 128
+#define	FSF_CZ_MC_XFRC_LAYER3_BYTE_MASK_LBN 128
+#define	FSF_CZ_MC_XFRC_LAYER3_BYTE_MASK_WIDTH 128
+#define	FSF_CZ_MC_XFRC_LAYER2_OR_SIMPLE_BYTE_MASK_LBN 0
+#define	FSF_CZ_MC_XFRC_LAYER2_OR_SIMPLE_BYTE_MASK_WIDTH 128
+
+/* RX_EV */
+#define	FSF_CZ_RX_EV_PKT_NOT_PARSED_LBN 58
+#define	FSF_CZ_RX_EV_PKT_NOT_PARSED_WIDTH 1
+#define	FSF_CZ_RX_EV_IPV6_PKT_LBN 57
+#define	FSF_CZ_RX_EV_IPV6_PKT_WIDTH 1
+#define	FSF_AZ_RX_EV_PKT_OK_LBN 56
+#define	FSF_AZ_RX_EV_PKT_OK_WIDTH 1
+#define	FSF_AZ_RX_EV_PAUSE_FRM_ERR_LBN 55
+#define	FSF_AZ_RX_EV_PAUSE_FRM_ERR_WIDTH 1
+#define	FSF_AZ_RX_EV_BUF_OWNER_ID_ERR_LBN 54
+#define	FSF_AZ_RX_EV_BUF_OWNER_ID_ERR_WIDTH 1
+#define	FSF_AZ_RX_EV_IP_FRAG_ERR_LBN 53
+#define	FSF_AZ_RX_EV_IP_FRAG_ERR_WIDTH 1
+#define	FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR_LBN 52
+#define	FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR_WIDTH 1
+#define	FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR_LBN 51
+#define	FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR_WIDTH 1
+#define	FSF_AZ_RX_EV_ETH_CRC_ERR_LBN 50
+#define	FSF_AZ_RX_EV_ETH_CRC_ERR_WIDTH 1
+#define	FSF_AZ_RX_EV_FRM_TRUNC_LBN 49
+#define	FSF_AZ_RX_EV_FRM_TRUNC_WIDTH 1
+#define	FSF_AA_RX_EV_DRIB_NIB_LBN 49
+#define	FSF_AA_RX_EV_DRIB_NIB_WIDTH 1
+#define	FSF_AZ_RX_EV_TOBE_DISC_LBN 47
+#define	FSF_AZ_RX_EV_TOBE_DISC_WIDTH 1
+#define	FSF_AZ_RX_EV_PKT_TYPE_LBN 44
+#define	FSF_AZ_RX_EV_PKT_TYPE_WIDTH 3
+#define	FSE_AZ_RX_EV_PKT_TYPE_VLAN_JUMBO 5
+#define	FSE_AZ_RX_EV_PKT_TYPE_VLAN_LLC 4
+#define	FSE_AZ_RX_EV_PKT_TYPE_VLAN 3
+#define	FSE_AZ_RX_EV_PKT_TYPE_JUMBO 2
+#define	FSE_AZ_RX_EV_PKT_TYPE_LLC 1
+#define	FSE_AZ_RX_EV_PKT_TYPE_ETH 0
+#define	FSF_AZ_RX_EV_HDR_TYPE_LBN 42
+#define	FSF_AZ_RX_EV_HDR_TYPE_WIDTH 2
+#define	FSE_AZ_RX_EV_HDR_TYPE_OTHER 3
+#define	FSE_AB_RX_EV_HDR_TYPE_IPV4_OTHER 2
+#define	FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER 2
+#define	FSE_AB_RX_EV_HDR_TYPE_IPV4_UDP 1
+#define	FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP 1
+#define	FSE_AB_RX_EV_HDR_TYPE_IPV4_TCP 0
+#define	FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP 0
+#define	FSF_AZ_RX_EV_DESC_Q_EMPTY_LBN 41
+#define	FSF_AZ_RX_EV_DESC_Q_EMPTY_WIDTH 1
+#define	FSF_AZ_RX_EV_MCAST_HASH_MATCH_LBN 40
+#define	FSF_AZ_RX_EV_MCAST_HASH_MATCH_WIDTH 1
+#define	FSF_AZ_RX_EV_MCAST_PKT_LBN 39
+#define	FSF_AZ_RX_EV_MCAST_PKT_WIDTH 1
+#define	FSF_AA_RX_EV_RECOVERY_FLAG_LBN 37
+#define	FSF_AA_RX_EV_RECOVERY_FLAG_WIDTH 1
+#define	FSF_AZ_RX_EV_Q_LABEL_LBN 32
+#define	FSF_AZ_RX_EV_Q_LABEL_WIDTH 5
+#define	FSF_AZ_RX_EV_JUMBO_CONT_LBN 31
+#define	FSF_AZ_RX_EV_JUMBO_CONT_WIDTH 1
+#define	FSF_AZ_RX_EV_PORT_LBN 30
+#define	FSF_AZ_RX_EV_PORT_WIDTH 1
+#define	FSF_AZ_RX_EV_BYTE_CNT_LBN 16
+#define	FSF_AZ_RX_EV_BYTE_CNT_WIDTH 14
+#define	FSF_AZ_RX_EV_SOP_LBN 15
+#define	FSF_AZ_RX_EV_SOP_WIDTH 1
+#define	FSF_AZ_RX_EV_ISCSI_PKT_OK_LBN 14
+#define	FSF_AZ_RX_EV_ISCSI_PKT_OK_WIDTH 1
+#define	FSF_AZ_RX_EV_ISCSI_DDIG_ERR_LBN 13
+#define	FSF_AZ_RX_EV_ISCSI_DDIG_ERR_WIDTH 1
+#define	FSF_AZ_RX_EV_ISCSI_HDIG_ERR_LBN 12
+#define	FSF_AZ_RX_EV_ISCSI_HDIG_ERR_WIDTH 1
+#define	FSF_AZ_RX_EV_DESC_PTR_LBN 0
+#define	FSF_AZ_RX_EV_DESC_PTR_WIDTH 12
+
+/* RX_KER_DESC */
+#define	FSF_AZ_RX_KER_BUF_SIZE_LBN 48
+#define	FSF_AZ_RX_KER_BUF_SIZE_WIDTH 14
+#define	FSF_AZ_RX_KER_BUF_REGION_LBN 46
+#define	FSF_AZ_RX_KER_BUF_REGION_WIDTH 2
+#define	FSF_AZ_RX_KER_BUF_ADDR_LBN 0
+#define	FSF_AZ_RX_KER_BUF_ADDR_WIDTH 46
+
+/* RX_USER_DESC */
+#define	FSF_AZ_RX_USER_2BYTE_OFFSET_LBN 20
+#define	FSF_AZ_RX_USER_2BYTE_OFFSET_WIDTH 12
+#define	FSF_AZ_RX_USER_BUF_ID_LBN 0
+#define	FSF_AZ_RX_USER_BUF_ID_WIDTH 20
+
+/* TX_EV */
+#define	FSF_AZ_TX_EV_PKT_ERR_LBN 38
+#define	FSF_AZ_TX_EV_PKT_ERR_WIDTH 1
+#define	FSF_AZ_TX_EV_PKT_TOO_BIG_LBN 37
+#define	FSF_AZ_TX_EV_PKT_TOO_BIG_WIDTH 1
+#define	FSF_AZ_TX_EV_Q_LABEL_LBN 32
+#define	FSF_AZ_TX_EV_Q_LABEL_WIDTH 5
+#define	FSF_AZ_TX_EV_PORT_LBN 16
+#define	FSF_AZ_TX_EV_PORT_WIDTH 1
+#define	FSF_AZ_TX_EV_WQ_FF_FULL_LBN 15
+#define	FSF_AZ_TX_EV_WQ_FF_FULL_WIDTH 1
+#define	FSF_AZ_TX_EV_BUF_OWNER_ID_ERR_LBN 14
+#define	FSF_AZ_TX_EV_BUF_OWNER_ID_ERR_WIDTH 1
+#define	FSF_AZ_TX_EV_COMP_LBN 12
+#define	FSF_AZ_TX_EV_COMP_WIDTH 1
+#define	FSF_AZ_TX_EV_DESC_PTR_LBN 0
+#define	FSF_AZ_TX_EV_DESC_PTR_WIDTH 12
+
+/* TX_KER_DESC */
+#define	FSF_AZ_TX_KER_CONT_LBN 62
+#define	FSF_AZ_TX_KER_CONT_WIDTH 1
+#define	FSF_AZ_TX_KER_BYTE_COUNT_LBN 48
+#define	FSF_AZ_TX_KER_BYTE_COUNT_WIDTH 14
+#define	FSF_AZ_TX_KER_BUF_REGION_LBN 46
+#define	FSF_AZ_TX_KER_BUF_REGION_WIDTH 2
+#define	FSF_AZ_TX_KER_BUF_ADDR_LBN 0
+#define	FSF_AZ_TX_KER_BUF_ADDR_WIDTH 46
+
+/* TX_USER_DESC */
+#define	FSF_AZ_TX_USER_SW_EV_EN_LBN 48
+#define	FSF_AZ_TX_USER_SW_EV_EN_WIDTH 1
+#define	FSF_AZ_TX_USER_CONT_LBN 46
+#define	FSF_AZ_TX_USER_CONT_WIDTH 1
+#define	FSF_AZ_TX_USER_BYTE_CNT_LBN 33
+#define	FSF_AZ_TX_USER_BYTE_CNT_WIDTH 13
+#define	FSF_AZ_TX_USER_BUF_ID_LBN 13
+#define	FSF_AZ_TX_USER_BUF_ID_WIDTH 20
+#define	FSF_AZ_TX_USER_BYTE_OFS_LBN 0
+#define	FSF_AZ_TX_USER_BYTE_OFS_WIDTH 13
+
+/* USER_EV */
+#define	FSF_CZ_USER_QID_LBN 32
+#define	FSF_CZ_USER_QID_WIDTH 10
+#define	FSF_CZ_USER_EV_REG_VALUE_LBN 0
+#define	FSF_CZ_USER_EV_REG_VALUE_WIDTH 32
+
+/**************************************************************************
+ *
+ * Falcon B0 PCIe core indirect registers
+ *
+ **************************************************************************
+ */
+
+#define FPCR_BB_PCIE_DEVICE_CTRL_STAT 0x68
+
+#define FPCR_BB_PCIE_LINK_CTRL_STAT 0x70
+
+#define FPCR_BB_ACK_RPL_TIMER 0x700
+#define FPCRF_BB_ACK_TL_LBN 0
+#define FPCRF_BB_ACK_TL_WIDTH 16
+#define FPCRF_BB_RPL_TL_LBN 16
+#define FPCRF_BB_RPL_TL_WIDTH 16
+
+#define FPCR_BB_ACK_FREQ 0x70C
+#define FPCRF_BB_ACK_FREQ_LBN 0
+#define FPCRF_BB_ACK_FREQ_WIDTH 7
+
+/**************************************************************************
+ *
+ * Pseudo-registers and fields
+ *
+ **************************************************************************
+ */
+
+/* Interrupt acknowledge work-around register (A0/A1 only) */
+#define FR_AA_WORK_AROUND_BROKEN_PCI_READS 0x0070
+
+/* EE_SPI_HCMD_REG: SPI host command register */
+/* Values for the EE_SPI_HCMD_SF_SEL register field */
+#define FFE_AB_SPI_DEVICE_EEPROM 0
+#define FFE_AB_SPI_DEVICE_FLASH 1
+
+/* NIC_STAT_REG: NIC status register */
+#define FRF_AB_STRAP_10G_LBN 2
+#define FRF_AB_STRAP_10G_WIDTH 1
+#define FRF_AA_STRAP_PCIE_LBN 0
+#define FRF_AA_STRAP_PCIE_WIDTH 1
+
+/* FATAL_INTR_REG_KER: Fatal interrupt register for Kernel */
+#define FRF_AZ_FATAL_INTR_LBN 0
+#define FRF_AZ_FATAL_INTR_WIDTH 12
+
+/* SRM_CFG_REG: SRAM configuration register */
+/* We treat the number of SRAM banks and bank size as a single field */
+#define	FRF_AZ_SRM_NB_SZ_LBN FRF_AZ_SRM_BANK_SIZE_LBN
+#define	FRF_AZ_SRM_NB_SZ_WIDTH \
+	(FRF_AZ_SRM_BANK_SIZE_WIDTH + FRF_AZ_SRM_NUM_BANK_WIDTH)
+#define FFE_AB_SRM_NB1_SZ2M 0
+#define FFE_AB_SRM_NB1_SZ4M 1
+#define FFE_AB_SRM_NB1_SZ8M 2
+#define FFE_AB_SRM_NB_SZ_DEF 3
+#define FFE_AB_SRM_NB2_SZ4M 4
+#define FFE_AB_SRM_NB2_SZ8M 5
+#define FFE_AB_SRM_NB2_SZ16M 6
+#define FFE_AB_SRM_NB_SZ_RES 7
+
+/* RX_DESC_UPD_REGP0: Receive descriptor update register. */
+/* We write just the last dword of these registers */
+#define	FR_AZ_RX_DESC_UPD_DWORD_P0 \
+	(BUILD_BUG_ON_ZERO(FR_AA_RX_DESC_UPD_KER != FR_BZ_RX_DESC_UPD_P0) + \
+	 FR_BZ_RX_DESC_UPD_P0 + 3 * 4)
+#define	FRF_AZ_RX_DESC_WPTR_DWORD_LBN (FRF_AZ_RX_DESC_WPTR_LBN - 3 * 32)
+#define	FRF_AZ_RX_DESC_WPTR_DWORD_WIDTH FRF_AZ_RX_DESC_WPTR_WIDTH
+
+/* TX_DESC_UPD_REGP0: Transmit descriptor update register. */
+#define FR_AZ_TX_DESC_UPD_DWORD_P0 \
+	(BUILD_BUG_ON_ZERO(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0) + \
+	 FR_BZ_TX_DESC_UPD_P0 + 3 * 4)
+#define	FRF_AZ_TX_DESC_WPTR_DWORD_LBN (FRF_AZ_TX_DESC_WPTR_LBN - 3 * 32)
+#define	FRF_AZ_TX_DESC_WPTR_DWORD_WIDTH FRF_AZ_TX_DESC_WPTR_WIDTH
+
+/* GMF_CFG4_REG: GMAC FIFO configuration register 4 */
+#define FRF_AB_GMF_HSTFLTRFRM_PAUSE_LBN 12
+#define FRF_AB_GMF_HSTFLTRFRM_PAUSE_WIDTH 1
+
+/* GMF_CFG5_REG: GMAC FIFO configuration register 5 */
+#define FRF_AB_GMF_HSTFLTRFRMDC_PAUSE_LBN 12
+#define FRF_AB_GMF_HSTFLTRFRMDC_PAUSE_WIDTH 1
+
+/* XM_TX_PARAM_REG: XGMAC transmit parameter register */
+#define	FRF_AB_XM_MAX_TX_FRM_SIZE_LBN FRF_AB_XM_MAX_TX_FRM_SIZE_LO_LBN
+#define	FRF_AB_XM_MAX_TX_FRM_SIZE_WIDTH (FRF_AB_XM_MAX_TX_FRM_SIZE_HI_WIDTH + \
+					 FRF_AB_XM_MAX_TX_FRM_SIZE_LO_WIDTH)
+
+/* XM_RX_PARAM_REG: XGMAC receive parameter register */
+#define	FRF_AB_XM_MAX_RX_FRM_SIZE_LBN FRF_AB_XM_MAX_RX_FRM_SIZE_LO_LBN
+#define	FRF_AB_XM_MAX_RX_FRM_SIZE_WIDTH (FRF_AB_XM_MAX_RX_FRM_SIZE_HI_WIDTH + \
+					 FRF_AB_XM_MAX_RX_FRM_SIZE_LO_WIDTH)
+
+/* XX_TXDRV_CTL_REG: XAUI SerDes transmit drive control register */
+/* Default values */
+#define FFE_AB_XX_TXDRV_DEQ_DEF 0xe /* deq=.6 */
+#define FFE_AB_XX_TXDRV_DTX_DEF 0x5 /* 1.25 */
+#define FFE_AB_XX_SD_CTL_DRV_DEF 0  /* 20mA */
+
+/* XX_CORE_STAT_REG: XAUI XGXS core status register */
+/* XGXS all-lanes status fields */
+#define	FRF_AB_XX_SYNC_STAT_LBN FRF_AB_XX_SYNC_STAT0_LBN
+#define	FRF_AB_XX_SYNC_STAT_WIDTH 4
+#define	FRF_AB_XX_COMMA_DET_LBN FRF_AB_XX_COMMA_DET_CH0_LBN
+#define	FRF_AB_XX_COMMA_DET_WIDTH 4
+#define	FRF_AB_XX_CHAR_ERR_LBN FRF_AB_XX_CHAR_ERR_CH0_LBN
+#define	FRF_AB_XX_CHAR_ERR_WIDTH 4
+#define	FRF_AB_XX_DISPERR_LBN FRF_AB_XX_DISPERR_CH0_LBN
+#define	FRF_AB_XX_DISPERR_WIDTH 4
+#define	FFE_AB_XX_STAT_ALL_LANES 0xf
+#define	FRF_AB_XX_FORCE_SIG_LBN FRF_AB_XX_FORCE_SIG0_VAL_LBN
+#define	FRF_AB_XX_FORCE_SIG_WIDTH 8
+#define	FFE_AB_XX_FORCE_SIG_ALL_LANES 0xff
+
+/* RX_MAC_FILTER_TBL0 */
+/* RMFT_DEST_MAC is wider than 32 bits */
+#define FRF_CZ_RMFT_DEST_MAC_LO_LBN FRF_CZ_RMFT_DEST_MAC_LBN
+#define FRF_CZ_RMFT_DEST_MAC_LO_WIDTH 32
+#define FRF_CZ_RMFT_DEST_MAC_HI_LBN (FRF_CZ_RMFT_DEST_MAC_LBN + 32)
+#define FRF_CZ_RMFT_DEST_MAC_HI_WIDTH (FRF_CZ_RMFT_DEST_MAC_WIDTH - 32)
+
+/* TX_MAC_FILTER_TBL0 */
+/* TMFT_SRC_MAC is wider than 32 bits */
+#define FRF_CZ_TMFT_SRC_MAC_LO_LBN FRF_CZ_TMFT_SRC_MAC_LBN
+#define FRF_CZ_TMFT_SRC_MAC_LO_WIDTH 32
+#define FRF_CZ_TMFT_SRC_MAC_HI_LBN (FRF_CZ_TMFT_SRC_MAC_LBN + 32)
+#define FRF_CZ_TMFT_SRC_MAC_HI_WIDTH (FRF_CZ_TMFT_SRC_MAC_WIDTH - 32)
+
+/* TX_PACE_TBL */
+/* Values >20 are documented as reserved, but will result in a queue going
+ * into the fast bin with a pace value of zero. */
+#define FFE_BZ_TX_PACE_OFF 0
+#define FFE_BZ_TX_PACE_RESERVED 21
+
+/* DRIVER_EV */
+/* Sub-fields of an RX flush completion event */
+#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_LBN 12
+#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_WIDTH 1
+#define FSF_AZ_DRIVER_EV_RX_DESCQ_ID_LBN 0
+#define FSF_AZ_DRIVER_EV_RX_DESCQ_ID_WIDTH 12
+
+/* EVENT_ENTRY */
+/* Magic number field for event test */
+#define FSF_AZ_DRV_GEN_EV_MAGIC_LBN 0
+#define FSF_AZ_DRV_GEN_EV_MAGIC_WIDTH 32
+
+/* RX packet prefix */
+#define FS_BZ_RX_PREFIX_HASH_OFST 12
+#define FS_BZ_RX_PREFIX_SIZE 16
+
+#endif /* EFX_FARCH_REGS_H */
diff --git a/drivers/net/ethernet/sfc/siena/filter.h b/drivers/net/ethernet/sfc/siena/filter.h
new file mode 100644
index 000000000000..40b2af8bfb81
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/filter.h
@@ -0,0 +1,309 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2013 Solarflare Communications Inc.
+ */
+
+#ifndef EFX_FILTER_H
+#define EFX_FILTER_H
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+#include <asm/byteorder.h>
+
+/**
+ * enum efx_filter_match_flags - Flags for hardware filter match type
+ * @EFX_FILTER_MATCH_REM_HOST: Match by remote IP host address
+ * @EFX_FILTER_MATCH_LOC_HOST: Match by local IP host address
+ * @EFX_FILTER_MATCH_REM_MAC: Match by remote MAC address
+ * @EFX_FILTER_MATCH_REM_PORT: Match by remote TCP/UDP port
+ * @EFX_FILTER_MATCH_LOC_MAC: Match by local MAC address
+ * @EFX_FILTER_MATCH_LOC_PORT: Match by local TCP/UDP port
+ * @EFX_FILTER_MATCH_ETHER_TYPE: Match by Ether-type
+ * @EFX_FILTER_MATCH_INNER_VID: Match by inner VLAN ID
+ * @EFX_FILTER_MATCH_OUTER_VID: Match by outer VLAN ID
+ * @EFX_FILTER_MATCH_IP_PROTO: Match by IP transport protocol
+ * @EFX_FILTER_MATCH_LOC_MAC_IG: Match by local MAC address I/G bit.
+ * @EFX_FILTER_MATCH_ENCAP_TYPE: Match by encapsulation type.
+ *	Used for RX default unicast and multicast/broadcast filters.
+ *
+ * Only some combinations are supported, depending on NIC type:
+ *
+ * - Falcon supports RX filters matching by {TCP,UDP}/IPv4 4-tuple or
+ *   local 2-tuple (only implemented for Falcon B0)
+ *
+ * - Siena supports RX and TX filters matching by {TCP,UDP}/IPv4 4-tuple
+ *   or local 2-tuple, or local MAC with or without outer VID, and RX
+ *   default filters
+ *
+ * - Huntington supports filter matching controlled by firmware, potentially
+ *   using {TCP,UDP}/IPv{4,6} 4-tuple or local 2-tuple, local MAC or I/G bit,
+ *   with or without outer and inner VID
+ */
+enum efx_filter_match_flags {
+	EFX_FILTER_MATCH_REM_HOST =	0x0001,
+	EFX_FILTER_MATCH_LOC_HOST =	0x0002,
+	EFX_FILTER_MATCH_REM_MAC =	0x0004,
+	EFX_FILTER_MATCH_REM_PORT =	0x0008,
+	EFX_FILTER_MATCH_LOC_MAC =	0x0010,
+	EFX_FILTER_MATCH_LOC_PORT =	0x0020,
+	EFX_FILTER_MATCH_ETHER_TYPE =	0x0040,
+	EFX_FILTER_MATCH_INNER_VID =	0x0080,
+	EFX_FILTER_MATCH_OUTER_VID =	0x0100,
+	EFX_FILTER_MATCH_IP_PROTO =	0x0200,
+	EFX_FILTER_MATCH_LOC_MAC_IG =	0x0400,
+	EFX_FILTER_MATCH_ENCAP_TYPE =	0x0800,
+};
+
+/**
+ * enum efx_filter_priority - priority of a hardware filter specification
+ * @EFX_FILTER_PRI_HINT: Performance hint
+ * @EFX_FILTER_PRI_AUTO: Automatic filter based on device address list
+ *	or hardware requirements.  This may only be used by the filter
+ *	implementation for each NIC type.
+ * @EFX_FILTER_PRI_MANUAL: Manually configured filter
+ * @EFX_FILTER_PRI_REQUIRED: Required for correct behaviour (user-level
+ *	networking and SR-IOV)
+ */
+enum efx_filter_priority {
+	EFX_FILTER_PRI_HINT = 0,
+	EFX_FILTER_PRI_AUTO,
+	EFX_FILTER_PRI_MANUAL,
+	EFX_FILTER_PRI_REQUIRED,
+};
+
+/**
+ * enum efx_filter_flags - flags for hardware filter specifications
+ * @EFX_FILTER_FLAG_RX_RSS: Use RSS to spread across multiple queues.
+ *	By default, matching packets will be delivered only to the
+ *	specified queue. If this flag is set, they will be delivered
+ *	to a range of queues offset from the specified queue number
+ *	according to the indirection table.
+ * @EFX_FILTER_FLAG_RX_SCATTER: Enable DMA scatter on the receiving
+ *	queue.
+ * @EFX_FILTER_FLAG_RX_OVER_AUTO: Indicates a filter that is
+ *	overriding an automatic filter (priority
+ *	%EFX_FILTER_PRI_AUTO).  This may only be set by the filter
+ *	implementation for each type.  A removal request will restore
+ *	the automatic filter in its place.
+ * @EFX_FILTER_FLAG_RX: Filter is for RX
+ * @EFX_FILTER_FLAG_TX: Filter is for TX
+ */
+enum efx_filter_flags {
+	EFX_FILTER_FLAG_RX_RSS = 0x01,
+	EFX_FILTER_FLAG_RX_SCATTER = 0x02,
+	EFX_FILTER_FLAG_RX_OVER_AUTO = 0x04,
+	EFX_FILTER_FLAG_RX = 0x08,
+	EFX_FILTER_FLAG_TX = 0x10,
+};
+
+/** enum efx_encap_type - types of encapsulation
+ * @EFX_ENCAP_TYPE_NONE: no encapsulation
+ * @EFX_ENCAP_TYPE_VXLAN: VXLAN encapsulation
+ * @EFX_ENCAP_TYPE_NVGRE: NVGRE encapsulation
+ * @EFX_ENCAP_TYPE_GENEVE: GENEVE encapsulation
+ * @EFX_ENCAP_FLAG_IPV6: indicates IPv6 outer frame
+ *
+ * Contains both enumerated types and flags.
+ * To get just the type, OR with @EFX_ENCAP_TYPES_MASK.
+ */
+enum efx_encap_type {
+	EFX_ENCAP_TYPE_NONE = 0,
+	EFX_ENCAP_TYPE_VXLAN = 1,
+	EFX_ENCAP_TYPE_NVGRE = 2,
+	EFX_ENCAP_TYPE_GENEVE = 3,
+
+	EFX_ENCAP_TYPES_MASK = 7,
+	EFX_ENCAP_FLAG_IPV6 = 8,
+};
+
+/**
+ * struct efx_filter_spec - specification for a hardware filter
+ * @match_flags: Match type flags, from &enum efx_filter_match_flags
+ * @priority: Priority of the filter, from &enum efx_filter_priority
+ * @flags: Miscellaneous flags, from &enum efx_filter_flags
+ * @rss_context: RSS context to use, if %EFX_FILTER_FLAG_RX_RSS is set.  This
+ *	is a user_id (with 0 meaning the driver/default RSS context), not an
+ *	MCFW context_id.
+ * @dmaq_id: Source/target queue index, or %EFX_FILTER_RX_DMAQ_ID_DROP for
+ *	an RX drop filter
+ * @outer_vid: Outer VLAN ID to match, if %EFX_FILTER_MATCH_OUTER_VID is set
+ * @inner_vid: Inner VLAN ID to match, if %EFX_FILTER_MATCH_INNER_VID is set
+ * @loc_mac: Local MAC address to match, if %EFX_FILTER_MATCH_LOC_MAC or
+ *	%EFX_FILTER_MATCH_LOC_MAC_IG is set
+ * @rem_mac: Remote MAC address to match, if %EFX_FILTER_MATCH_REM_MAC is set
+ * @ether_type: Ether-type to match, if %EFX_FILTER_MATCH_ETHER_TYPE is set
+ * @ip_proto: IP transport protocol to match, if %EFX_FILTER_MATCH_IP_PROTO
+ *	is set
+ * @loc_host: Local IP host to match, if %EFX_FILTER_MATCH_LOC_HOST is set
+ * @rem_host: Remote IP host to match, if %EFX_FILTER_MATCH_REM_HOST is set
+ * @loc_port: Local TCP/UDP port to match, if %EFX_FILTER_MATCH_LOC_PORT is set
+ * @rem_port: Remote TCP/UDP port to match, if %EFX_FILTER_MATCH_REM_PORT is set
+ * @encap_type: Encapsulation type to match (from &enum efx_encap_type), if
+ *	%EFX_FILTER_MATCH_ENCAP_TYPE is set
+ *
+ * The efx_filter_init_rx() or efx_filter_init_tx() function *must* be
+ * used to initialise the structure.  The efx_filter_set_*() functions
+ * may then be used to set @rss_context, @match_flags and related
+ * fields.
+ *
+ * The @priority field is used by software to determine whether a new
+ * filter may replace an old one.  The hardware priority of a filter
+ * depends on which fields are matched.
+ */
+struct efx_filter_spec {
+	u32	match_flags:12;
+	u32	priority:2;
+	u32	flags:6;
+	u32	dmaq_id:12;
+	u32	rss_context;
+	__be16	outer_vid __aligned(4); /* allow jhash2() of match values */
+	__be16	inner_vid;
+	u8	loc_mac[ETH_ALEN];
+	u8	rem_mac[ETH_ALEN];
+	__be16	ether_type;
+	u8	ip_proto;
+	__be32	loc_host[4];
+	__be32	rem_host[4];
+	__be16	loc_port;
+	__be16	rem_port;
+	u32     encap_type:4;
+	/* total 65 bytes */
+};
+
+enum {
+	EFX_FILTER_RX_DMAQ_ID_DROP = 0xfff
+};
+
+static inline void efx_filter_init_rx(struct efx_filter_spec *spec,
+				      enum efx_filter_priority priority,
+				      enum efx_filter_flags flags,
+				      unsigned rxq_id)
+{
+	memset(spec, 0, sizeof(*spec));
+	spec->priority = priority;
+	spec->flags = EFX_FILTER_FLAG_RX | flags;
+	spec->rss_context = 0;
+	spec->dmaq_id = rxq_id;
+}
+
+static inline void efx_filter_init_tx(struct efx_filter_spec *spec,
+				      unsigned txq_id)
+{
+	memset(spec, 0, sizeof(*spec));
+	spec->priority = EFX_FILTER_PRI_REQUIRED;
+	spec->flags = EFX_FILTER_FLAG_TX;
+	spec->dmaq_id = txq_id;
+}
+
+/**
+ * efx_filter_set_ipv4_local - specify IPv4 host, transport protocol and port
+ * @spec: Specification to initialise
+ * @proto: Transport layer protocol number
+ * @host: Local host address (network byte order)
+ * @port: Local port (network byte order)
+ */
+static inline int
+efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto,
+			  __be32 host, __be16 port)
+{
+	spec->match_flags |=
+		EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
+		EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT;
+	spec->ether_type = htons(ETH_P_IP);
+	spec->ip_proto = proto;
+	spec->loc_host[0] = host;
+	spec->loc_port = port;
+	return 0;
+}
+
+/**
+ * efx_filter_set_ipv4_full - specify IPv4 hosts, transport protocol and ports
+ * @spec: Specification to initialise
+ * @proto: Transport layer protocol number
+ * @lhost: Local host address (network byte order)
+ * @lport: Local port (network byte order)
+ * @rhost: Remote host address (network byte order)
+ * @rport: Remote port (network byte order)
+ */
+static inline int
+efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto,
+			 __be32 lhost, __be16 lport,
+			 __be32 rhost, __be16 rport)
+{
+	spec->match_flags |=
+		EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
+		EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
+		EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
+	spec->ether_type = htons(ETH_P_IP);
+	spec->ip_proto = proto;
+	spec->loc_host[0] = lhost;
+	spec->loc_port = lport;
+	spec->rem_host[0] = rhost;
+	spec->rem_port = rport;
+	return 0;
+}
+
+enum {
+	EFX_FILTER_VID_UNSPEC = 0xffff,
+};
+
+/**
+ * efx_filter_set_eth_local - specify local Ethernet address and/or VID
+ * @spec: Specification to initialise
+ * @vid: Outer VLAN ID to match, or %EFX_FILTER_VID_UNSPEC
+ * @addr: Local Ethernet MAC address, or %NULL
+ */
+static inline int efx_filter_set_eth_local(struct efx_filter_spec *spec,
+					   u16 vid, const u8 *addr)
+{
+	if (vid == EFX_FILTER_VID_UNSPEC && addr == NULL)
+		return -EINVAL;
+
+	if (vid != EFX_FILTER_VID_UNSPEC) {
+		spec->match_flags |= EFX_FILTER_MATCH_OUTER_VID;
+		spec->outer_vid = htons(vid);
+	}
+	if (addr != NULL) {
+		spec->match_flags |= EFX_FILTER_MATCH_LOC_MAC;
+		ether_addr_copy(spec->loc_mac, addr);
+	}
+	return 0;
+}
+
+/**
+ * efx_filter_set_uc_def - specify matching otherwise-unmatched unicast
+ * @spec: Specification to initialise
+ */
+static inline int efx_filter_set_uc_def(struct efx_filter_spec *spec)
+{
+	spec->match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG;
+	return 0;
+}
+
+/**
+ * efx_filter_set_mc_def - specify matching otherwise-unmatched multicast
+ * @spec: Specification to initialise
+ */
+static inline int efx_filter_set_mc_def(struct efx_filter_spec *spec)
+{
+	spec->match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG;
+	spec->loc_mac[0] = 1;
+	return 0;
+}
+
+static inline void efx_filter_set_encap_type(struct efx_filter_spec *spec,
+					     enum efx_encap_type encap_type)
+{
+	spec->match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
+	spec->encap_type = encap_type;
+}
+
+static inline enum efx_encap_type efx_filter_get_encap_type(
+		const struct efx_filter_spec *spec)
+{
+	if (spec->match_flags & EFX_FILTER_MATCH_ENCAP_TYPE)
+		return spec->encap_type;
+	return EFX_ENCAP_TYPE_NONE;
+}
+#endif /* EFX_FILTER_H */
diff --git a/drivers/net/ethernet/sfc/siena/io.h b/drivers/net/ethernet/sfc/siena/io.h
new file mode 100644
index 000000000000..30439cc83a89
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/io.h
@@ -0,0 +1,310 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2013 Solarflare Communications Inc.
+ */
+
+#ifndef EFX_IO_H
+#define EFX_IO_H
+
+#include <linux/io.h>
+#include <linux/spinlock.h>
+
+/**************************************************************************
+ *
+ * NIC register I/O
+ *
+ **************************************************************************
+ *
+ * Notes on locking strategy for the Falcon architecture:
+ *
+ * Many CSRs are very wide and cannot be read or written atomically.
+ * Writes from the host are buffered by the Bus Interface Unit (BIU)
+ * up to 128 bits.  Whenever the host writes part of such a register,
+ * the BIU collects the written value and does not write to the
+ * underlying register until all 4 dwords have been written.  A
+ * similar buffering scheme applies to host access to the NIC's 64-bit
+ * SRAM.
+ *
+ * Writes to different CSRs and 64-bit SRAM words must be serialised,
+ * since interleaved access can result in lost writes.  We use
+ * efx_nic::biu_lock for this.
+ *
+ * We also serialise reads from 128-bit CSRs and SRAM with the same
+ * spinlock.  This may not be necessary, but it doesn't really matter
+ * as there are no such reads on the fast path.
+ *
+ * The DMA descriptor pointers (RX_DESC_UPD and TX_DESC_UPD) are
+ * 128-bit but are special-cased in the BIU to avoid the need for
+ * locking in the host:
+ *
+ * - They are write-only.
+ * - The semantics of writing to these registers are such that
+ *   replacing the low 96 bits with zero does not affect functionality.
+ * - If the host writes to the last dword address of such a register
+ *   (i.e. the high 32 bits) the underlying register will always be
+ *   written.  If the collector and the current write together do not
+ *   provide values for all 128 bits of the register, the low 96 bits
+ *   will be written as zero.
+ * - If the host writes to the address of any other part of such a
+ *   register while the collector already holds values for some other
+ *   register, the write is discarded and the collector maintains its
+ *   current state.
+ *
+ * The EF10 architecture exposes very few registers to the host and
+ * most of them are only 32 bits wide.  The only exceptions are the MC
+ * doorbell register pair, which has its own latching, and
+ * TX_DESC_UPD, which works in a similar way to the Falcon
+ * architecture.
+ */
+
+#if BITS_PER_LONG == 64
+#define EFX_USE_QWORD_IO 1
+#endif
+
+/* Hardware issue requires that only 64-bit naturally aligned writes
+ * are seen by hardware. Its not strictly necessary to restrict to
+ * x86_64 arch, but done for safety since unusual write combining behaviour
+ * can break PIO.
+ */
+#ifdef CONFIG_X86_64
+/* PIO is a win only if write-combining is possible */
+#ifdef ARCH_HAS_IOREMAP_WC
+#define EFX_USE_PIO 1
+#endif
+#endif
+
+static inline u32 efx_reg(struct efx_nic *efx, unsigned int reg)
+{
+	return efx->reg_base + reg;
+}
+
+#ifdef EFX_USE_QWORD_IO
+static inline void _efx_writeq(struct efx_nic *efx, __le64 value,
+				  unsigned int reg)
+{
+	__raw_writeq((__force u64)value, efx->membase + reg);
+}
+static inline __le64 _efx_readq(struct efx_nic *efx, unsigned int reg)
+{
+	return (__force __le64)__raw_readq(efx->membase + reg);
+}
+#endif
+
+static inline void _efx_writed(struct efx_nic *efx, __le32 value,
+				  unsigned int reg)
+{
+	__raw_writel((__force u32)value, efx->membase + reg);
+}
+static inline __le32 _efx_readd(struct efx_nic *efx, unsigned int reg)
+{
+	return (__force __le32)__raw_readl(efx->membase + reg);
+}
+
+/* Write a normal 128-bit CSR, locking as appropriate. */
+static inline void efx_writeo(struct efx_nic *efx, const efx_oword_t *value,
+			      unsigned int reg)
+{
+	unsigned long flags __attribute__ ((unused));
+
+	netif_vdbg(efx, hw, efx->net_dev,
+		   "writing register %x with " EFX_OWORD_FMT "\n", reg,
+		   EFX_OWORD_VAL(*value));
+
+	spin_lock_irqsave(&efx->biu_lock, flags);
+#ifdef EFX_USE_QWORD_IO
+	_efx_writeq(efx, value->u64[0], reg + 0);
+	_efx_writeq(efx, value->u64[1], reg + 8);
+#else
+	_efx_writed(efx, value->u32[0], reg + 0);
+	_efx_writed(efx, value->u32[1], reg + 4);
+	_efx_writed(efx, value->u32[2], reg + 8);
+	_efx_writed(efx, value->u32[3], reg + 12);
+#endif
+	spin_unlock_irqrestore(&efx->biu_lock, flags);
+}
+
+/* Write 64-bit SRAM through the supplied mapping, locking as appropriate. */
+static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase,
+				   const efx_qword_t *value, unsigned int index)
+{
+	unsigned int addr = index * sizeof(*value);
+	unsigned long flags __attribute__ ((unused));
+
+	netif_vdbg(efx, hw, efx->net_dev,
+		   "writing SRAM address %x with " EFX_QWORD_FMT "\n",
+		   addr, EFX_QWORD_VAL(*value));
+
+	spin_lock_irqsave(&efx->biu_lock, flags);
+#ifdef EFX_USE_QWORD_IO
+	__raw_writeq((__force u64)value->u64[0], membase + addr);
+#else
+	__raw_writel((__force u32)value->u32[0], membase + addr);
+	__raw_writel((__force u32)value->u32[1], membase + addr + 4);
+#endif
+	spin_unlock_irqrestore(&efx->biu_lock, flags);
+}
+
+/* Write a 32-bit CSR or the last dword of a special 128-bit CSR */
+static inline void efx_writed(struct efx_nic *efx, const efx_dword_t *value,
+			      unsigned int reg)
+{
+	netif_vdbg(efx, hw, efx->net_dev,
+		   "writing register %x with "EFX_DWORD_FMT"\n",
+		   reg, EFX_DWORD_VAL(*value));
+
+	/* No lock required */
+	_efx_writed(efx, value->u32[0], reg);
+}
+
+/* Read a 128-bit CSR, locking as appropriate. */
+static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value,
+			     unsigned int reg)
+{
+	unsigned long flags __attribute__ ((unused));
+
+	spin_lock_irqsave(&efx->biu_lock, flags);
+	value->u32[0] = _efx_readd(efx, reg + 0);
+	value->u32[1] = _efx_readd(efx, reg + 4);
+	value->u32[2] = _efx_readd(efx, reg + 8);
+	value->u32[3] = _efx_readd(efx, reg + 12);
+	spin_unlock_irqrestore(&efx->biu_lock, flags);
+
+	netif_vdbg(efx, hw, efx->net_dev,
+		   "read from register %x, got " EFX_OWORD_FMT "\n", reg,
+		   EFX_OWORD_VAL(*value));
+}
+
+/* Read 64-bit SRAM through the supplied mapping, locking as appropriate. */
+static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase,
+				  efx_qword_t *value, unsigned int index)
+{
+	unsigned int addr = index * sizeof(*value);
+	unsigned long flags __attribute__ ((unused));
+
+	spin_lock_irqsave(&efx->biu_lock, flags);
+#ifdef EFX_USE_QWORD_IO
+	value->u64[0] = (__force __le64)__raw_readq(membase + addr);
+#else
+	value->u32[0] = (__force __le32)__raw_readl(membase + addr);
+	value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4);
+#endif
+	spin_unlock_irqrestore(&efx->biu_lock, flags);
+
+	netif_vdbg(efx, hw, efx->net_dev,
+		   "read from SRAM address %x, got "EFX_QWORD_FMT"\n",
+		   addr, EFX_QWORD_VAL(*value));
+}
+
+/* Read a 32-bit CSR or SRAM */
+static inline void efx_readd(struct efx_nic *efx, efx_dword_t *value,
+				unsigned int reg)
+{
+	value->u32[0] = _efx_readd(efx, reg);
+	netif_vdbg(efx, hw, efx->net_dev,
+		   "read from register %x, got "EFX_DWORD_FMT"\n",
+		   reg, EFX_DWORD_VAL(*value));
+}
+
+/* Write a 128-bit CSR forming part of a table */
+static inline void
+efx_writeo_table(struct efx_nic *efx, const efx_oword_t *value,
+		 unsigned int reg, unsigned int index)
+{
+	efx_writeo(efx, value, reg + index * sizeof(efx_oword_t));
+}
+
+/* Read a 128-bit CSR forming part of a table */
+static inline void efx_reado_table(struct efx_nic *efx, efx_oword_t *value,
+				     unsigned int reg, unsigned int index)
+{
+	efx_reado(efx, value, reg + index * sizeof(efx_oword_t));
+}
+
+/* default VI stride (step between per-VI registers) is 8K on EF10 and
+ * 64K on EF100
+ */
+#define EFX_DEFAULT_VI_STRIDE		0x2000
+#define EF100_DEFAULT_VI_STRIDE		0x10000
+
+/* Calculate offset to page-mapped register */
+static inline unsigned int efx_paged_reg(struct efx_nic *efx, unsigned int page,
+					 unsigned int reg)
+{
+	return page * efx->vi_stride + reg;
+}
+
+/* Write the whole of RX_DESC_UPD or TX_DESC_UPD */
+static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value,
+				    unsigned int reg, unsigned int page)
+{
+	reg = efx_paged_reg(efx, page, reg);
+
+	netif_vdbg(efx, hw, efx->net_dev,
+		   "writing register %x with " EFX_OWORD_FMT "\n", reg,
+		   EFX_OWORD_VAL(*value));
+
+#ifdef EFX_USE_QWORD_IO
+	_efx_writeq(efx, value->u64[0], reg + 0);
+	_efx_writeq(efx, value->u64[1], reg + 8);
+#else
+	_efx_writed(efx, value->u32[0], reg + 0);
+	_efx_writed(efx, value->u32[1], reg + 4);
+	_efx_writed(efx, value->u32[2], reg + 8);
+	_efx_writed(efx, value->u32[3], reg + 12);
+#endif
+}
+#define efx_writeo_page(efx, value, reg, page)				\
+	_efx_writeo_page(efx, value,					\
+			 reg +						\
+			 BUILD_BUG_ON_ZERO((reg) != 0x830 && (reg) != 0xa10), \
+			 page)
+
+/* Write a page-mapped 32-bit CSR (EVQ_RPTR, EVQ_TMR (EF10), or the
+ * high bits of RX_DESC_UPD or TX_DESC_UPD)
+ */
+static inline void
+_efx_writed_page(struct efx_nic *efx, const efx_dword_t *value,
+		 unsigned int reg, unsigned int page)
+{
+	efx_writed(efx, value, efx_paged_reg(efx, page, reg));
+}
+#define efx_writed_page(efx, value, reg, page)				\
+	_efx_writed_page(efx, value,					\
+			 reg +						\
+			 BUILD_BUG_ON_ZERO((reg) != 0x180 &&		\
+					   (reg) != 0x200 &&		\
+					   (reg) != 0x400 &&		\
+					   (reg) != 0x420 &&		\
+					   (reg) != 0x830 &&		\
+					   (reg) != 0x83c &&		\
+					   (reg) != 0xa18 &&		\
+					   (reg) != 0xa1c),		\
+			 page)
+
+/* Write TIMER_COMMAND.  This is a page-mapped 32-bit CSR, but a bug
+ * in the BIU means that writes to TIMER_COMMAND[0] invalidate the
+ * collector register.
+ */
+static inline void _efx_writed_page_locked(struct efx_nic *efx,
+					   const efx_dword_t *value,
+					   unsigned int reg,
+					   unsigned int page)
+{
+	unsigned long flags __attribute__ ((unused));
+
+	if (page == 0) {
+		spin_lock_irqsave(&efx->biu_lock, flags);
+		efx_writed(efx, value, efx_paged_reg(efx, page, reg));
+		spin_unlock_irqrestore(&efx->biu_lock, flags);
+	} else {
+		efx_writed(efx, value, efx_paged_reg(efx, page, reg));
+	}
+}
+#define efx_writed_page_locked(efx, value, reg, page)			\
+	_efx_writed_page_locked(efx, value,				\
+				reg + BUILD_BUG_ON_ZERO((reg) != 0x420), \
+				page)
+
+#endif /* EFX_IO_H */
diff --git a/drivers/net/ethernet/sfc/siena/mcdi.c b/drivers/net/ethernet/sfc/siena/mcdi.c
new file mode 100644
index 000000000000..50baf62b2cbc
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/mcdi.c
@@ -0,0 +1,2375 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2008-2013 Solarflare Communications Inc.
+ */
+
+#include <linux/delay.h>
+#include <linux/moduleparam.h>
+#include <linux/atomic.h>
+#include "net_driver.h"
+#include "nic.h"
+#include "io.h"
+#include "farch_regs.h"
+#include "mcdi_pcol.h"
+
+/**************************************************************************
+ *
+ * Management-Controller-to-Driver Interface
+ *
+ **************************************************************************
+ */
+
+#define MCDI_RPC_TIMEOUT       (10 * HZ)
+
+/* A reboot/assertion causes the MCDI status word to be set after the
+ * command word is set or a REBOOT event is sent. If we notice a reboot
+ * via these mechanisms then wait 250ms for the status word to be set.
+ */
+#define MCDI_STATUS_DELAY_US		100
+#define MCDI_STATUS_DELAY_COUNT		2500
+#define MCDI_STATUS_SLEEP_MS						\
+	(MCDI_STATUS_DELAY_US * MCDI_STATUS_DELAY_COUNT / 1000)
+
+#define SEQ_MASK							\
+	EFX_MASK32(EFX_WIDTH(MCDI_HEADER_SEQ))
+
+struct efx_mcdi_async_param {
+	struct list_head list;
+	unsigned int cmd;
+	size_t inlen;
+	size_t outlen;
+	bool quiet;
+	efx_mcdi_async_completer *complete;
+	unsigned long cookie;
+	/* followed by request/response buffer */
+};
+
+static void efx_mcdi_timeout_async(struct timer_list *t);
+static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
+			       bool *was_attached_out);
+static bool efx_mcdi_poll_once(struct efx_nic *efx);
+static void efx_mcdi_abandon(struct efx_nic *efx);
+
+#ifdef CONFIG_SFC_MCDI_LOGGING
+static bool mcdi_logging_default;
+module_param(mcdi_logging_default, bool, 0644);
+MODULE_PARM_DESC(mcdi_logging_default,
+		 "Enable MCDI logging on newly-probed functions");
+#endif
+
+int efx_mcdi_init(struct efx_nic *efx)
+{
+	struct efx_mcdi_iface *mcdi;
+	bool already_attached;
+	int rc = -ENOMEM;
+
+	efx->mcdi = kzalloc(sizeof(*efx->mcdi), GFP_KERNEL);
+	if (!efx->mcdi)
+		goto fail;
+
+	mcdi = efx_mcdi(efx);
+	mcdi->efx = efx;
+#ifdef CONFIG_SFC_MCDI_LOGGING
+	/* consuming code assumes buffer is page-sized */
+	mcdi->logging_buffer = (char *)__get_free_page(GFP_KERNEL);
+	if (!mcdi->logging_buffer)
+		goto fail1;
+	mcdi->logging_enabled = mcdi_logging_default;
+#endif
+	init_waitqueue_head(&mcdi->wq);
+	init_waitqueue_head(&mcdi->proxy_rx_wq);
+	spin_lock_init(&mcdi->iface_lock);
+	mcdi->state = MCDI_STATE_QUIESCENT;
+	mcdi->mode = MCDI_MODE_POLL;
+	spin_lock_init(&mcdi->async_lock);
+	INIT_LIST_HEAD(&mcdi->async_list);
+	timer_setup(&mcdi->async_timer, efx_mcdi_timeout_async, 0);
+
+	(void) efx_mcdi_poll_reboot(efx);
+	mcdi->new_epoch = true;
+
+	/* Recover from a failed assertion before probing */
+	rc = efx_mcdi_handle_assertion(efx);
+	if (rc)
+		goto fail2;
+
+	/* Let the MC (and BMC, if this is a LOM) know that the driver
+	 * is loaded. We should do this before we reset the NIC.
+	 */
+	rc = efx_mcdi_drv_attach(efx, true, &already_attached);
+	if (rc) {
+		netif_err(efx, probe, efx->net_dev,
+			  "Unable to register driver with MCPU\n");
+		goto fail2;
+	}
+	if (already_attached)
+		/* Not a fatal error */
+		netif_err(efx, probe, efx->net_dev,
+			  "Host already registered with MCPU\n");
+
+	if (efx->mcdi->fn_flags &
+	    (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY))
+		efx->primary = efx;
+
+	return 0;
+fail2:
+#ifdef CONFIG_SFC_MCDI_LOGGING
+	free_page((unsigned long)mcdi->logging_buffer);
+fail1:
+#endif
+	kfree(efx->mcdi);
+	efx->mcdi = NULL;
+fail:
+	return rc;
+}
+
+void efx_mcdi_detach(struct efx_nic *efx)
+{
+	if (!efx->mcdi)
+		return;
+
+	BUG_ON(efx->mcdi->iface.state != MCDI_STATE_QUIESCENT);
+
+	/* Relinquish the device (back to the BMC, if this is a LOM) */
+	efx_mcdi_drv_attach(efx, false, NULL);
+}
+
+void efx_mcdi_fini(struct efx_nic *efx)
+{
+	if (!efx->mcdi)
+		return;
+
+#ifdef CONFIG_SFC_MCDI_LOGGING
+	free_page((unsigned long)efx->mcdi->iface.logging_buffer);
+#endif
+
+	kfree(efx->mcdi);
+}
+
+static void efx_mcdi_send_request(struct efx_nic *efx, unsigned cmd,
+				  const efx_dword_t *inbuf, size_t inlen)
+{
+	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+#ifdef CONFIG_SFC_MCDI_LOGGING
+	char *buf = mcdi->logging_buffer; /* page-sized */
+#endif
+	efx_dword_t hdr[2];
+	size_t hdr_len;
+	u32 xflags, seqno;
+
+	BUG_ON(mcdi->state == MCDI_STATE_QUIESCENT);
+
+	/* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */
+	spin_lock_bh(&mcdi->iface_lock);
+	++mcdi->seqno;
+	seqno = mcdi->seqno & SEQ_MASK;
+	spin_unlock_bh(&mcdi->iface_lock);
+
+	xflags = 0;
+	if (mcdi->mode == MCDI_MODE_EVENTS)
+		xflags |= MCDI_HEADER_XFLAGS_EVREQ;
+
+	if (efx->type->mcdi_max_ver == 1) {
+		/* MCDI v1 */
+		EFX_POPULATE_DWORD_7(hdr[0],
+				     MCDI_HEADER_RESPONSE, 0,
+				     MCDI_HEADER_RESYNC, 1,
+				     MCDI_HEADER_CODE, cmd,
+				     MCDI_HEADER_DATALEN, inlen,
+				     MCDI_HEADER_SEQ, seqno,
+				     MCDI_HEADER_XFLAGS, xflags,
+				     MCDI_HEADER_NOT_EPOCH, !mcdi->new_epoch);
+		hdr_len = 4;
+	} else {
+		/* MCDI v2 */
+		BUG_ON(inlen > MCDI_CTL_SDU_LEN_MAX_V2);
+		EFX_POPULATE_DWORD_7(hdr[0],
+				     MCDI_HEADER_RESPONSE, 0,
+				     MCDI_HEADER_RESYNC, 1,
+				     MCDI_HEADER_CODE, MC_CMD_V2_EXTN,
+				     MCDI_HEADER_DATALEN, 0,
+				     MCDI_HEADER_SEQ, seqno,
+				     MCDI_HEADER_XFLAGS, xflags,
+				     MCDI_HEADER_NOT_EPOCH, !mcdi->new_epoch);
+		EFX_POPULATE_DWORD_2(hdr[1],
+				     MC_CMD_V2_EXTN_IN_EXTENDED_CMD, cmd,
+				     MC_CMD_V2_EXTN_IN_ACTUAL_LEN, inlen);
+		hdr_len = 8;
+	}
+
+#ifdef CONFIG_SFC_MCDI_LOGGING
+	if (mcdi->logging_enabled && !WARN_ON_ONCE(!buf)) {
+		int bytes = 0;
+		int i;
+		/* Lengths should always be a whole number of dwords, so scream
+		 * if they're not.
+		 */
+		WARN_ON_ONCE(hdr_len % 4);
+		WARN_ON_ONCE(inlen % 4);
+
+		/* We own the logging buffer, as only one MCDI can be in
+		 * progress on a NIC at any one time.  So no need for locking.
+		 */
+		for (i = 0; i < hdr_len / 4 && bytes < PAGE_SIZE; i++)
+			bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
+					   " %08x",
+					   le32_to_cpu(hdr[i].u32[0]));
+
+		for (i = 0; i < inlen / 4 && bytes < PAGE_SIZE; i++)
+			bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
+					   " %08x",
+					   le32_to_cpu(inbuf[i].u32[0]));
+
+		netif_info(efx, hw, efx->net_dev, "MCDI RPC REQ:%s\n", buf);
+	}
+#endif
+
+	efx->type->mcdi_request(efx, hdr, hdr_len, inbuf, inlen);
+
+	mcdi->new_epoch = false;
+}
+
+static int efx_mcdi_errno(unsigned int mcdi_err)
+{
+	switch (mcdi_err) {
+	case 0:
+		return 0;
+#define TRANSLATE_ERROR(name)					\
+	case MC_CMD_ERR_ ## name:				\
+		return -name;
+	TRANSLATE_ERROR(EPERM);
+	TRANSLATE_ERROR(ENOENT);
+	TRANSLATE_ERROR(EINTR);
+	TRANSLATE_ERROR(EAGAIN);
+	TRANSLATE_ERROR(EACCES);
+	TRANSLATE_ERROR(EBUSY);
+	TRANSLATE_ERROR(EINVAL);
+	TRANSLATE_ERROR(EDEADLK);
+	TRANSLATE_ERROR(ENOSYS);
+	TRANSLATE_ERROR(ETIME);
+	TRANSLATE_ERROR(EALREADY);
+	TRANSLATE_ERROR(ENOSPC);
+#undef TRANSLATE_ERROR
+	case MC_CMD_ERR_ENOTSUP:
+		return -EOPNOTSUPP;
+	case MC_CMD_ERR_ALLOC_FAIL:
+		return -ENOBUFS;
+	case MC_CMD_ERR_MAC_EXIST:
+		return -EADDRINUSE;
+	default:
+		return -EPROTO;
+	}
+}
+
+static void efx_mcdi_read_response_header(struct efx_nic *efx)
+{
+	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+	unsigned int respseq, respcmd, error;
+#ifdef CONFIG_SFC_MCDI_LOGGING
+	char *buf = mcdi->logging_buffer; /* page-sized */
+#endif
+	efx_dword_t hdr;
+
+	efx->type->mcdi_read_response(efx, &hdr, 0, 4);
+	respseq = EFX_DWORD_FIELD(hdr, MCDI_HEADER_SEQ);
+	respcmd = EFX_DWORD_FIELD(hdr, MCDI_HEADER_CODE);
+	error = EFX_DWORD_FIELD(hdr, MCDI_HEADER_ERROR);
+
+	if (respcmd != MC_CMD_V2_EXTN) {
+		mcdi->resp_hdr_len = 4;
+		mcdi->resp_data_len = EFX_DWORD_FIELD(hdr, MCDI_HEADER_DATALEN);
+	} else {
+		efx->type->mcdi_read_response(efx, &hdr, 4, 4);
+		mcdi->resp_hdr_len = 8;
+		mcdi->resp_data_len =
+			EFX_DWORD_FIELD(hdr, MC_CMD_V2_EXTN_IN_ACTUAL_LEN);
+	}
+
+#ifdef CONFIG_SFC_MCDI_LOGGING
+	if (mcdi->logging_enabled && !WARN_ON_ONCE(!buf)) {
+		size_t hdr_len, data_len;
+		int bytes = 0;
+		int i;
+
+		WARN_ON_ONCE(mcdi->resp_hdr_len % 4);
+		hdr_len = mcdi->resp_hdr_len / 4;
+		/* MCDI_DECLARE_BUF ensures that underlying buffer is padded
+		 * to dword size, and the MCDI buffer is always dword size
+		 */
+		data_len = DIV_ROUND_UP(mcdi->resp_data_len, 4);
+
+		/* We own the logging buffer, as only one MCDI can be in
+		 * progress on a NIC at any one time.  So no need for locking.
+		 */
+		for (i = 0; i < hdr_len && bytes < PAGE_SIZE; i++) {
+			efx->type->mcdi_read_response(efx, &hdr, (i * 4), 4);
+			bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
+					   " %08x", le32_to_cpu(hdr.u32[0]));
+		}
+
+		for (i = 0; i < data_len && bytes < PAGE_SIZE; i++) {
+			efx->type->mcdi_read_response(efx, &hdr,
+					mcdi->resp_hdr_len + (i * 4), 4);
+			bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
+					   " %08x", le32_to_cpu(hdr.u32[0]));
+		}
+
+		netif_info(efx, hw, efx->net_dev, "MCDI RPC RESP:%s\n", buf);
+	}
+#endif
+
+	mcdi->resprc_raw = 0;
+	if (error && mcdi->resp_data_len == 0) {
+		netif_err(efx, hw, efx->net_dev, "MC rebooted\n");
+		mcdi->resprc = -EIO;
+	} else if ((respseq ^ mcdi->seqno) & SEQ_MASK) {
+		netif_err(efx, hw, efx->net_dev,
+			  "MC response mismatch tx seq 0x%x rx seq 0x%x\n",
+			  respseq, mcdi->seqno);
+		mcdi->resprc = -EIO;
+	} else if (error) {
+		efx->type->mcdi_read_response(efx, &hdr, mcdi->resp_hdr_len, 4);
+		mcdi->resprc_raw = EFX_DWORD_FIELD(hdr, EFX_DWORD_0);
+		mcdi->resprc = efx_mcdi_errno(mcdi->resprc_raw);
+	} else {
+		mcdi->resprc = 0;
+	}
+}
+
+static bool efx_mcdi_poll_once(struct efx_nic *efx)
+{
+	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+
+	rmb();
+	if (!efx->type->mcdi_poll_response(efx))
+		return false;
+
+	spin_lock_bh(&mcdi->iface_lock);
+	efx_mcdi_read_response_header(efx);
+	spin_unlock_bh(&mcdi->iface_lock);
+
+	return true;
+}
+
+static int efx_mcdi_poll(struct efx_nic *efx)
+{
+	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+	unsigned long time, finish;
+	unsigned int spins;
+	int rc;
+
+	/* Check for a reboot atomically with respect to efx_mcdi_copyout() */
+	rc = efx_mcdi_poll_reboot(efx);
+	if (rc) {
+		spin_lock_bh(&mcdi->iface_lock);
+		mcdi->resprc = rc;
+		mcdi->resp_hdr_len = 0;
+		mcdi->resp_data_len = 0;
+		spin_unlock_bh(&mcdi->iface_lock);
+		return 0;
+	}
+
+	/* Poll for completion. Poll quickly (once a us) for the 1st jiffy,
+	 * because generally mcdi responses are fast. After that, back off
+	 * and poll once a jiffy (approximately)
+	 */
+	spins = USER_TICK_USEC;
+	finish = jiffies + MCDI_RPC_TIMEOUT;
+
+	while (1) {
+		if (spins != 0) {
+			--spins;
+			udelay(1);
+		} else {
+			schedule_timeout_uninterruptible(1);
+		}
+
+		time = jiffies;
+
+		if (efx_mcdi_poll_once(efx))
+			break;
+
+		if (time_after(time, finish))
+			return -ETIMEDOUT;
+	}
+
+	/* Return rc=0 like wait_event_timeout() */
+	return 0;
+}
+
+/* Test and clear MC-rebooted flag for this port/function; reset
+ * software state as necessary.
+ */
+int efx_mcdi_poll_reboot(struct efx_nic *efx)
+{
+	if (!efx->mcdi)
+		return 0;
+
+	return efx->type->mcdi_poll_reboot(efx);
+}
+
+static bool efx_mcdi_acquire_async(struct efx_mcdi_iface *mcdi)
+{
+	return cmpxchg(&mcdi->state,
+		       MCDI_STATE_QUIESCENT, MCDI_STATE_RUNNING_ASYNC) ==
+		MCDI_STATE_QUIESCENT;
+}
+
+static void efx_mcdi_acquire_sync(struct efx_mcdi_iface *mcdi)
+{
+	/* Wait until the interface becomes QUIESCENT and we win the race
+	 * to mark it RUNNING_SYNC.
+	 */
+	wait_event(mcdi->wq,
+		   cmpxchg(&mcdi->state,
+			   MCDI_STATE_QUIESCENT, MCDI_STATE_RUNNING_SYNC) ==
+		   MCDI_STATE_QUIESCENT);
+}
+
+static int efx_mcdi_await_completion(struct efx_nic *efx)
+{
+	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+
+	if (wait_event_timeout(mcdi->wq, mcdi->state == MCDI_STATE_COMPLETED,
+			       MCDI_RPC_TIMEOUT) == 0)
+		return -ETIMEDOUT;
+
+	/* Check if efx_mcdi_set_mode() switched us back to polled completions.
+	 * In which case, poll for completions directly. If efx_mcdi_ev_cpl()
+	 * completed the request first, then we'll just end up completing the
+	 * request again, which is safe.
+	 *
+	 * We need an smp_rmb() to synchronise with efx_mcdi_mode_poll(), which
+	 * wait_event_timeout() implicitly provides.
+	 */
+	if (mcdi->mode == MCDI_MODE_POLL)
+		return efx_mcdi_poll(efx);
+
+	return 0;
+}
+
+/* If the interface is RUNNING_SYNC, switch to COMPLETED and wake the
+ * requester.  Return whether this was done.  Does not take any locks.
+ */
+static bool efx_mcdi_complete_sync(struct efx_mcdi_iface *mcdi)
+{
+	if (cmpxchg(&mcdi->state,
+		    MCDI_STATE_RUNNING_SYNC, MCDI_STATE_COMPLETED) ==
+	    MCDI_STATE_RUNNING_SYNC) {
+		wake_up(&mcdi->wq);
+		return true;
+	}
+
+	return false;
+}
+
+static void efx_mcdi_release(struct efx_mcdi_iface *mcdi)
+{
+	if (mcdi->mode == MCDI_MODE_EVENTS) {
+		struct efx_mcdi_async_param *async;
+		struct efx_nic *efx = mcdi->efx;
+
+		/* Process the asynchronous request queue */
+		spin_lock_bh(&mcdi->async_lock);
+		async = list_first_entry_or_null(
+			&mcdi->async_list, struct efx_mcdi_async_param, list);
+		if (async) {
+			mcdi->state = MCDI_STATE_RUNNING_ASYNC;
+			efx_mcdi_send_request(efx, async->cmd,
+					      (const efx_dword_t *)(async + 1),
+					      async->inlen);
+			mod_timer(&mcdi->async_timer,
+				  jiffies + MCDI_RPC_TIMEOUT);
+		}
+		spin_unlock_bh(&mcdi->async_lock);
+
+		if (async)
+			return;
+	}
+
+	mcdi->state = MCDI_STATE_QUIESCENT;
+	wake_up(&mcdi->wq);
+}
+
+/* If the interface is RUNNING_ASYNC, switch to COMPLETED, call the
+ * asynchronous completion function, and release the interface.
+ * Return whether this was done.  Must be called in bh-disabled
+ * context.  Will take iface_lock and async_lock.
+ */
+static bool efx_mcdi_complete_async(struct efx_mcdi_iface *mcdi, bool timeout)
+{
+	struct efx_nic *efx = mcdi->efx;
+	struct efx_mcdi_async_param *async;
+	size_t hdr_len, data_len, err_len;
+	efx_dword_t *outbuf;
+	MCDI_DECLARE_BUF_ERR(errbuf);
+	int rc;
+
+	if (cmpxchg(&mcdi->state,
+		    MCDI_STATE_RUNNING_ASYNC, MCDI_STATE_COMPLETED) !=
+	    MCDI_STATE_RUNNING_ASYNC)
+		return false;
+
+	spin_lock(&mcdi->iface_lock);
+	if (timeout) {
+		/* Ensure that if the completion event arrives later,
+		 * the seqno check in efx_mcdi_ev_cpl() will fail
+		 */
+		++mcdi->seqno;
+		++mcdi->credits;
+		rc = -ETIMEDOUT;
+		hdr_len = 0;
+		data_len = 0;
+	} else {
+		rc = mcdi->resprc;
+		hdr_len = mcdi->resp_hdr_len;
+		data_len = mcdi->resp_data_len;
+	}
+	spin_unlock(&mcdi->iface_lock);
+
+	/* Stop the timer.  In case the timer function is running, we
+	 * must wait for it to return so that there is no possibility
+	 * of it aborting the next request.
+	 */
+	if (!timeout)
+		del_timer_sync(&mcdi->async_timer);
+
+	spin_lock(&mcdi->async_lock);
+	async = list_first_entry(&mcdi->async_list,
+				 struct efx_mcdi_async_param, list);
+	list_del(&async->list);
+	spin_unlock(&mcdi->async_lock);
+
+	outbuf = (efx_dword_t *)(async + 1);
+	efx->type->mcdi_read_response(efx, outbuf, hdr_len,
+				      min(async->outlen, data_len));
+	if (!timeout && rc && !async->quiet) {
+		err_len = min(sizeof(errbuf), data_len);
+		efx->type->mcdi_read_response(efx, errbuf, hdr_len,
+					      sizeof(errbuf));
+		efx_mcdi_display_error(efx, async->cmd, async->inlen, errbuf,
+				       err_len, rc);
+	}
+
+	if (async->complete)
+		async->complete(efx, async->cookie, rc, outbuf,
+				min(async->outlen, data_len));
+	kfree(async);
+
+	efx_mcdi_release(mcdi);
+
+	return true;
+}
+
+static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno,
+			    unsigned int datalen, unsigned int mcdi_err)
+{
+	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+	bool wake = false;
+
+	spin_lock(&mcdi->iface_lock);
+
+	if ((seqno ^ mcdi->seqno) & SEQ_MASK) {
+		if (mcdi->credits)
+			/* The request has been cancelled */
+			--mcdi->credits;
+		else
+			netif_err(efx, hw, efx->net_dev,
+				  "MC response mismatch tx seq 0x%x rx "
+				  "seq 0x%x\n", seqno, mcdi->seqno);
+	} else {
+		if (efx->type->mcdi_max_ver >= 2) {
+			/* MCDI v2 responses don't fit in an event */
+			efx_mcdi_read_response_header(efx);
+		} else {
+			mcdi->resprc = efx_mcdi_errno(mcdi_err);
+			mcdi->resp_hdr_len = 4;
+			mcdi->resp_data_len = datalen;
+		}
+
+		wake = true;
+	}
+
+	spin_unlock(&mcdi->iface_lock);
+
+	if (wake) {
+		if (!efx_mcdi_complete_async(mcdi, false))
+			(void) efx_mcdi_complete_sync(mcdi);
+
+		/* If the interface isn't RUNNING_ASYNC or
+		 * RUNNING_SYNC then we've received a duplicate
+		 * completion after we've already transitioned back to
+		 * QUIESCENT. [A subsequent invocation would increment
+		 * seqno, so would have failed the seqno check].
+		 */
+	}
+}
+
+static void efx_mcdi_timeout_async(struct timer_list *t)
+{
+	struct efx_mcdi_iface *mcdi = from_timer(mcdi, t, async_timer);
+
+	efx_mcdi_complete_async(mcdi, true);
+}
+
+static int
+efx_mcdi_check_supported(struct efx_nic *efx, unsigned int cmd, size_t inlen)
+{
+	if (efx->type->mcdi_max_ver < 0 ||
+	     (efx->type->mcdi_max_ver < 2 &&
+	      cmd > MC_CMD_CMD_SPACE_ESCAPE_7))
+		return -EINVAL;
+
+	if (inlen > MCDI_CTL_SDU_LEN_MAX_V2 ||
+	    (efx->type->mcdi_max_ver < 2 &&
+	     inlen > MCDI_CTL_SDU_LEN_MAX_V1))
+		return -EMSGSIZE;
+
+	return 0;
+}
+
+static bool efx_mcdi_get_proxy_handle(struct efx_nic *efx,
+				      size_t hdr_len, size_t data_len,
+				      u32 *proxy_handle)
+{
+	MCDI_DECLARE_BUF_ERR(testbuf);
+	const size_t buflen = sizeof(testbuf);
+
+	if (!proxy_handle || data_len < buflen)
+		return false;
+
+	efx->type->mcdi_read_response(efx, testbuf, hdr_len, buflen);
+	if (MCDI_DWORD(testbuf, ERR_CODE) == MC_CMD_ERR_PROXY_PENDING) {
+		*proxy_handle = MCDI_DWORD(testbuf, ERR_PROXY_PENDING_HANDLE);
+		return true;
+	}
+
+	return false;
+}
+
+static int _efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned int cmd,
+				size_t inlen,
+				efx_dword_t *outbuf, size_t outlen,
+				size_t *outlen_actual, bool quiet,
+				u32 *proxy_handle, int *raw_rc)
+{
+	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+	MCDI_DECLARE_BUF_ERR(errbuf);
+	int rc;
+
+	if (mcdi->mode == MCDI_MODE_POLL)
+		rc = efx_mcdi_poll(efx);
+	else
+		rc = efx_mcdi_await_completion(efx);
+
+	if (rc != 0) {
+		netif_err(efx, hw, efx->net_dev,
+			  "MC command 0x%x inlen %d mode %d timed out\n",
+			  cmd, (int)inlen, mcdi->mode);
+
+		if (mcdi->mode == MCDI_MODE_EVENTS && efx_mcdi_poll_once(efx)) {
+			netif_err(efx, hw, efx->net_dev,
+				  "MCDI request was completed without an event\n");
+			rc = 0;
+		}
+
+		efx_mcdi_abandon(efx);
+
+		/* Close the race with efx_mcdi_ev_cpl() executing just too late
+		 * and completing a request we've just cancelled, by ensuring
+		 * that the seqno check therein fails.
+		 */
+		spin_lock_bh(&mcdi->iface_lock);
+		++mcdi->seqno;
+		++mcdi->credits;
+		spin_unlock_bh(&mcdi->iface_lock);
+	}
+
+	if (proxy_handle)
+		*proxy_handle = 0;
+
+	if (rc != 0) {
+		if (outlen_actual)
+			*outlen_actual = 0;
+	} else {
+		size_t hdr_len, data_len, err_len;
+
+		/* At the very least we need a memory barrier here to ensure
+		 * we pick up changes from efx_mcdi_ev_cpl(). Protect against
+		 * a spurious efx_mcdi_ev_cpl() running concurrently by
+		 * acquiring the iface_lock. */
+		spin_lock_bh(&mcdi->iface_lock);
+		rc = mcdi->resprc;
+		if (raw_rc)
+			*raw_rc = mcdi->resprc_raw;
+		hdr_len = mcdi->resp_hdr_len;
+		data_len = mcdi->resp_data_len;
+		err_len = min(sizeof(errbuf), data_len);
+		spin_unlock_bh(&mcdi->iface_lock);
+
+		BUG_ON(rc > 0);
+
+		efx->type->mcdi_read_response(efx, outbuf, hdr_len,
+					      min(outlen, data_len));
+		if (outlen_actual)
+			*outlen_actual = data_len;
+
+		efx->type->mcdi_read_response(efx, errbuf, hdr_len, err_len);
+
+		if (cmd == MC_CMD_REBOOT && rc == -EIO) {
+			/* Don't reset if MC_CMD_REBOOT returns EIO */
+		} else if (rc == -EIO || rc == -EINTR) {
+			netif_err(efx, hw, efx->net_dev, "MC reboot detected\n");
+			netif_dbg(efx, hw, efx->net_dev, "MC rebooted during command %d rc %d\n",
+				  cmd, -rc);
+			if (efx->type->mcdi_reboot_detected)
+				efx->type->mcdi_reboot_detected(efx);
+			efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
+		} else if (proxy_handle && (rc == -EPROTO) &&
+			   efx_mcdi_get_proxy_handle(efx, hdr_len, data_len,
+						     proxy_handle)) {
+			mcdi->proxy_rx_status = 0;
+			mcdi->proxy_rx_handle = 0;
+			mcdi->state = MCDI_STATE_PROXY_WAIT;
+		} else if (rc && !quiet) {
+			efx_mcdi_display_error(efx, cmd, inlen, errbuf, err_len,
+					       rc);
+		}
+
+		if (rc == -EIO || rc == -EINTR) {
+			msleep(MCDI_STATUS_SLEEP_MS);
+			efx_mcdi_poll_reboot(efx);
+			mcdi->new_epoch = true;
+		}
+	}
+
+	if (!proxy_handle || !*proxy_handle)
+		efx_mcdi_release(mcdi);
+	return rc;
+}
+
+static void efx_mcdi_proxy_abort(struct efx_mcdi_iface *mcdi)
+{
+	if (mcdi->state == MCDI_STATE_PROXY_WAIT) {
+		/* Interrupt the proxy wait. */
+		mcdi->proxy_rx_status = -EINTR;
+		wake_up(&mcdi->proxy_rx_wq);
+	}
+}
+
+static void efx_mcdi_ev_proxy_response(struct efx_nic *efx,
+				       u32 handle, int status)
+{
+	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+
+	WARN_ON(mcdi->state != MCDI_STATE_PROXY_WAIT);
+
+	mcdi->proxy_rx_status = efx_mcdi_errno(status);
+	/* Ensure the status is written before we update the handle, since the
+	 * latter is used to check if we've finished.
+	 */
+	wmb();
+	mcdi->proxy_rx_handle = handle;
+	wake_up(&mcdi->proxy_rx_wq);
+}
+
+static int efx_mcdi_proxy_wait(struct efx_nic *efx, u32 handle, bool quiet)
+{
+	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+	int rc;
+
+	/* Wait for a proxy event, or timeout. */
+	rc = wait_event_timeout(mcdi->proxy_rx_wq,
+				mcdi->proxy_rx_handle != 0 ||
+				mcdi->proxy_rx_status == -EINTR,
+				MCDI_RPC_TIMEOUT);
+
+	if (rc <= 0) {
+		netif_dbg(efx, hw, efx->net_dev,
+			  "MCDI proxy timeout %d\n", handle);
+		return -ETIMEDOUT;
+	} else if (mcdi->proxy_rx_handle != handle) {
+		netif_warn(efx, hw, efx->net_dev,
+			   "MCDI proxy unexpected handle %d (expected %d)\n",
+			   mcdi->proxy_rx_handle, handle);
+		return -EINVAL;
+	}
+
+	return mcdi->proxy_rx_status;
+}
+
+static int _efx_mcdi_rpc(struct efx_nic *efx, unsigned int cmd,
+			 const efx_dword_t *inbuf, size_t inlen,
+			 efx_dword_t *outbuf, size_t outlen,
+			 size_t *outlen_actual, bool quiet, int *raw_rc)
+{
+	u32 proxy_handle = 0; /* Zero is an invalid proxy handle. */
+	int rc;
+
+	if (inbuf && inlen && (inbuf == outbuf)) {
+		/* The input buffer can't be aliased with the output. */
+		WARN_ON(1);
+		return -EINVAL;
+	}
+
+	rc = efx_mcdi_rpc_start(efx, cmd, inbuf, inlen);
+	if (rc)
+		return rc;
+
+	rc = _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen,
+				  outlen_actual, quiet, &proxy_handle, raw_rc);
+
+	if (proxy_handle) {
+		/* Handle proxy authorisation. This allows approval of MCDI
+		 * operations to be delegated to the admin function, allowing
+		 * fine control over (eg) multicast subscriptions.
+		 */
+		struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+
+		netif_dbg(efx, hw, efx->net_dev,
+			  "MCDI waiting for proxy auth %d\n",
+			  proxy_handle);
+		rc = efx_mcdi_proxy_wait(efx, proxy_handle, quiet);
+
+		if (rc == 0) {
+			netif_dbg(efx, hw, efx->net_dev,
+				  "MCDI proxy retry %d\n", proxy_handle);
+
+			/* We now retry the original request. */
+			mcdi->state = MCDI_STATE_RUNNING_SYNC;
+			efx_mcdi_send_request(efx, cmd, inbuf, inlen);
+
+			rc = _efx_mcdi_rpc_finish(efx, cmd, inlen,
+						  outbuf, outlen, outlen_actual,
+						  quiet, NULL, raw_rc);
+		} else {
+			netif_cond_dbg(efx, hw, efx->net_dev, rc == -EPERM, err,
+				       "MC command 0x%x failed after proxy auth rc=%d\n",
+				       cmd, rc);
+
+			if (rc == -EINTR || rc == -EIO)
+				efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
+			efx_mcdi_release(mcdi);
+		}
+	}
+
+	return rc;
+}
+
+static int _efx_mcdi_rpc_evb_retry(struct efx_nic *efx, unsigned cmd,
+				   const efx_dword_t *inbuf, size_t inlen,
+				   efx_dword_t *outbuf, size_t outlen,
+				   size_t *outlen_actual, bool quiet)
+{
+	int raw_rc = 0;
+	int rc;
+
+	rc = _efx_mcdi_rpc(efx, cmd, inbuf, inlen,
+			   outbuf, outlen, outlen_actual, true, &raw_rc);
+
+	if ((rc == -EPROTO) && (raw_rc == MC_CMD_ERR_NO_EVB_PORT) &&
+	    efx->type->is_vf) {
+		/* If the EVB port isn't available within a VF this may
+		 * mean the PF is still bringing the switch up. We should
+		 * retry our request shortly.
+		 */
+		unsigned long abort_time = jiffies + MCDI_RPC_TIMEOUT;
+		unsigned int delay_us = 10000;
+
+		netif_dbg(efx, hw, efx->net_dev,
+			  "%s: NO_EVB_PORT; will retry request\n",
+			  __func__);
+
+		do {
+			usleep_range(delay_us, delay_us + 10000);
+			rc = _efx_mcdi_rpc(efx, cmd, inbuf, inlen,
+					   outbuf, outlen, outlen_actual,
+					   true, &raw_rc);
+			if (delay_us < 100000)
+				delay_us <<= 1;
+		} while ((rc == -EPROTO) &&
+			 (raw_rc == MC_CMD_ERR_NO_EVB_PORT) &&
+			 time_before(jiffies, abort_time));
+	}
+
+	if (rc && !quiet && !(cmd == MC_CMD_REBOOT && rc == -EIO))
+		efx_mcdi_display_error(efx, cmd, inlen,
+				       outbuf, outlen, rc);
+
+	return rc;
+}
+
+/**
+ * efx_mcdi_rpc - Issue an MCDI command and wait for completion
+ * @efx: NIC through which to issue the command
+ * @cmd: Command type number
+ * @inbuf: Command parameters
+ * @inlen: Length of command parameters, in bytes.  Must be a multiple
+ *	of 4 and no greater than %MCDI_CTL_SDU_LEN_MAX_V1.
+ * @outbuf: Response buffer.  May be %NULL if @outlen is 0.
+ * @outlen: Length of response buffer, in bytes.  If the actual
+ *	response is longer than @outlen & ~3, it will be truncated
+ *	to that length.
+ * @outlen_actual: Pointer through which to return the actual response
+ *	length.  May be %NULL if this is not needed.
+ *
+ * This function may sleep and therefore must be called in an appropriate
+ * context.
+ *
+ * Return: A negative error code, or zero if successful.  The error
+ *	code may come from the MCDI response or may indicate a failure
+ *	to communicate with the MC.  In the former case, the response
+ *	will still be copied to @outbuf and *@...len_actual will be
+ *	set accordingly.  In the latter case, *@...len_actual will be
+ *	set to zero.
+ */
+int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
+		 const efx_dword_t *inbuf, size_t inlen,
+		 efx_dword_t *outbuf, size_t outlen,
+		 size_t *outlen_actual)
+{
+	return _efx_mcdi_rpc_evb_retry(efx, cmd, inbuf, inlen, outbuf, outlen,
+				       outlen_actual, false);
+}
+
+/* Normally, on receiving an error code in the MCDI response,
+ * efx_mcdi_rpc will log an error message containing (among other
+ * things) the raw error code, by means of efx_mcdi_display_error.
+ * This _quiet version suppresses that; if the caller wishes to log
+ * the error conditionally on the return code, it should call this
+ * function and is then responsible for calling efx_mcdi_display_error
+ * as needed.
+ */
+int efx_mcdi_rpc_quiet(struct efx_nic *efx, unsigned cmd,
+		       const efx_dword_t *inbuf, size_t inlen,
+		       efx_dword_t *outbuf, size_t outlen,
+		       size_t *outlen_actual)
+{
+	return _efx_mcdi_rpc_evb_retry(efx, cmd, inbuf, inlen, outbuf, outlen,
+				       outlen_actual, true);
+}
+
+int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
+		       const efx_dword_t *inbuf, size_t inlen)
+{
+	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+	int rc;
+
+	rc = efx_mcdi_check_supported(efx, cmd, inlen);
+	if (rc)
+		return rc;
+
+	if (efx->mc_bist_for_other_fn)
+		return -ENETDOWN;
+
+	if (mcdi->mode == MCDI_MODE_FAIL)
+		return -ENETDOWN;
+
+	efx_mcdi_acquire_sync(mcdi);
+	efx_mcdi_send_request(efx, cmd, inbuf, inlen);
+	return 0;
+}
+
+static int _efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
+			       const efx_dword_t *inbuf, size_t inlen,
+			       size_t outlen,
+			       efx_mcdi_async_completer *complete,
+			       unsigned long cookie, bool quiet)
+{
+	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+	struct efx_mcdi_async_param *async;
+	int rc;
+
+	rc = efx_mcdi_check_supported(efx, cmd, inlen);
+	if (rc)
+		return rc;
+
+	if (efx->mc_bist_for_other_fn)
+		return -ENETDOWN;
+
+	async = kmalloc(sizeof(*async) + ALIGN(max(inlen, outlen), 4),
+			GFP_ATOMIC);
+	if (!async)
+		return -ENOMEM;
+
+	async->cmd = cmd;
+	async->inlen = inlen;
+	async->outlen = outlen;
+	async->quiet = quiet;
+	async->complete = complete;
+	async->cookie = cookie;
+	memcpy(async + 1, inbuf, inlen);
+
+	spin_lock_bh(&mcdi->async_lock);
+
+	if (mcdi->mode == MCDI_MODE_EVENTS) {
+		list_add_tail(&async->list, &mcdi->async_list);
+
+		/* If this is at the front of the queue, try to start it
+		 * immediately
+		 */
+		if (mcdi->async_list.next == &async->list &&
+		    efx_mcdi_acquire_async(mcdi)) {
+			efx_mcdi_send_request(efx, cmd, inbuf, inlen);
+			mod_timer(&mcdi->async_timer,
+				  jiffies + MCDI_RPC_TIMEOUT);
+		}
+	} else {
+		kfree(async);
+		rc = -ENETDOWN;
+	}
+
+	spin_unlock_bh(&mcdi->async_lock);
+
+	return rc;
+}
+
+/**
+ * efx_mcdi_rpc_async - Schedule an MCDI command to run asynchronously
+ * @efx: NIC through which to issue the command
+ * @cmd: Command type number
+ * @inbuf: Command parameters
+ * @inlen: Length of command parameters, in bytes
+ * @outlen: Length to allocate for response buffer, in bytes
+ * @complete: Function to be called on completion or cancellation.
+ * @cookie: Arbitrary value to be passed to @complete.
+ *
+ * This function does not sleep and therefore may be called in atomic
+ * context.  It will fail if event queues are disabled or if MCDI
+ * event completions have been disabled due to an error.
+ *
+ * If it succeeds, the @complete function will be called exactly once
+ * in atomic context, when one of the following occurs:
+ * (a) the completion event is received (in NAPI context)
+ * (b) event queues are disabled (in the process that disables them)
+ * (c) the request times-out (in timer context)
+ */
+int
+efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
+		   const efx_dword_t *inbuf, size_t inlen, size_t outlen,
+		   efx_mcdi_async_completer *complete, unsigned long cookie)
+{
+	return _efx_mcdi_rpc_async(efx, cmd, inbuf, inlen, outlen, complete,
+				   cookie, false);
+}
+
+int efx_mcdi_rpc_async_quiet(struct efx_nic *efx, unsigned int cmd,
+			     const efx_dword_t *inbuf, size_t inlen,
+			     size_t outlen, efx_mcdi_async_completer *complete,
+			     unsigned long cookie)
+{
+	return _efx_mcdi_rpc_async(efx, cmd, inbuf, inlen, outlen, complete,
+				   cookie, true);
+}
+
+int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
+			efx_dword_t *outbuf, size_t outlen,
+			size_t *outlen_actual)
+{
+	return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen,
+				    outlen_actual, false, NULL, NULL);
+}
+
+int efx_mcdi_rpc_finish_quiet(struct efx_nic *efx, unsigned cmd, size_t inlen,
+			      efx_dword_t *outbuf, size_t outlen,
+			      size_t *outlen_actual)
+{
+	return _efx_mcdi_rpc_finish(efx, cmd, inlen, outbuf, outlen,
+				    outlen_actual, true, NULL, NULL);
+}
+
+void efx_mcdi_display_error(struct efx_nic *efx, unsigned cmd,
+			    size_t inlen, efx_dword_t *outbuf,
+			    size_t outlen, int rc)
+{
+	int code = 0, err_arg = 0;
+
+	if (outlen >= MC_CMD_ERR_CODE_OFST + 4)
+		code = MCDI_DWORD(outbuf, ERR_CODE);
+	if (outlen >= MC_CMD_ERR_ARG_OFST + 4)
+		err_arg = MCDI_DWORD(outbuf, ERR_ARG);
+	netif_cond_dbg(efx, hw, efx->net_dev, rc == -EPERM, err,
+		       "MC command 0x%x inlen %zu failed rc=%d (raw=%d) arg=%d\n",
+		       cmd, inlen, rc, code, err_arg);
+}
+
+/* Switch to polled MCDI completions.  This can be called in various
+ * error conditions with various locks held, so it must be lockless.
+ * Caller is responsible for flushing asynchronous requests later.
+ */
+void efx_mcdi_mode_poll(struct efx_nic *efx)
+{
+	struct efx_mcdi_iface *mcdi;
+
+	if (!efx->mcdi)
+		return;
+
+	mcdi = efx_mcdi(efx);
+	/* If already in polling mode, nothing to do.
+	 * If in fail-fast state, don't switch to polled completion.
+	 * FLR recovery will do that later.
+	 */
+	if (mcdi->mode == MCDI_MODE_POLL || mcdi->mode == MCDI_MODE_FAIL)
+		return;
+
+	/* We can switch from event completion to polled completion, because
+	 * mcdi requests are always completed in shared memory. We do this by
+	 * switching the mode to POLL'd then completing the request.
+	 * efx_mcdi_await_completion() will then call efx_mcdi_poll().
+	 *
+	 * We need an smp_wmb() to synchronise with efx_mcdi_await_completion(),
+	 * which efx_mcdi_complete_sync() provides for us.
+	 */
+	mcdi->mode = MCDI_MODE_POLL;
+
+	efx_mcdi_complete_sync(mcdi);
+}
+
+/* Flush any running or queued asynchronous requests, after event processing
+ * is stopped
+ */
+void efx_mcdi_flush_async(struct efx_nic *efx)
+{
+	struct efx_mcdi_async_param *async, *next;
+	struct efx_mcdi_iface *mcdi;
+
+	if (!efx->mcdi)
+		return;
+
+	mcdi = efx_mcdi(efx);
+
+	/* We must be in poll or fail mode so no more requests can be queued */
+	BUG_ON(mcdi->mode == MCDI_MODE_EVENTS);
+
+	del_timer_sync(&mcdi->async_timer);
+
+	/* If a request is still running, make sure we give the MC
+	 * time to complete it so that the response won't overwrite our
+	 * next request.
+	 */
+	if (mcdi->state == MCDI_STATE_RUNNING_ASYNC) {
+		efx_mcdi_poll(efx);
+		mcdi->state = MCDI_STATE_QUIESCENT;
+	}
+
+	/* Nothing else will access the async list now, so it is safe
+	 * to walk it without holding async_lock.  If we hold it while
+	 * calling a completer then lockdep may warn that we have
+	 * acquired locks in the wrong order.
+	 */
+	list_for_each_entry_safe(async, next, &mcdi->async_list, list) {
+		if (async->complete)
+			async->complete(efx, async->cookie, -ENETDOWN, NULL, 0);
+		list_del(&async->list);
+		kfree(async);
+	}
+}
+
+void efx_mcdi_mode_event(struct efx_nic *efx)
+{
+	struct efx_mcdi_iface *mcdi;
+
+	if (!efx->mcdi)
+		return;
+
+	mcdi = efx_mcdi(efx);
+	/* If already in event completion mode, nothing to do.
+	 * If in fail-fast state, don't switch to event completion.  FLR
+	 * recovery will do that later.
+	 */
+	if (mcdi->mode == MCDI_MODE_EVENTS || mcdi->mode == MCDI_MODE_FAIL)
+		return;
+
+	/* We can't switch from polled to event completion in the middle of a
+	 * request, because the completion method is specified in the request.
+	 * So acquire the interface to serialise the requestors. We don't need
+	 * to acquire the iface_lock to change the mode here, but we do need a
+	 * write memory barrier ensure that efx_mcdi_rpc() sees it, which
+	 * efx_mcdi_acquire() provides.
+	 */
+	efx_mcdi_acquire_sync(mcdi);
+	mcdi->mode = MCDI_MODE_EVENTS;
+	efx_mcdi_release(mcdi);
+}
+
+static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
+{
+	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+
+	/* If there is an outstanding MCDI request, it has been terminated
+	 * either by a BADASSERT or REBOOT event. If the mcdi interface is
+	 * in polled mode, then do nothing because the MC reboot handler will
+	 * set the header correctly. However, if the mcdi interface is waiting
+	 * for a CMDDONE event it won't receive it [and since all MCDI events
+	 * are sent to the same queue, we can't be racing with
+	 * efx_mcdi_ev_cpl()]
+	 *
+	 * If there is an outstanding asynchronous request, we can't
+	 * complete it now (efx_mcdi_complete() would deadlock).  The
+	 * reset process will take care of this.
+	 *
+	 * There's a race here with efx_mcdi_send_request(), because
+	 * we might receive a REBOOT event *before* the request has
+	 * been copied out. In polled mode (during startup) this is
+	 * irrelevant, because efx_mcdi_complete_sync() is ignored. In
+	 * event mode, this condition is just an edge-case of
+	 * receiving a REBOOT event after posting the MCDI
+	 * request. Did the mc reboot before or after the copyout? The
+	 * best we can do always is just return failure.
+	 *
+	 * If there is an outstanding proxy response expected it is not going
+	 * to arrive. We should thus abort it.
+	 */
+	spin_lock(&mcdi->iface_lock);
+	efx_mcdi_proxy_abort(mcdi);
+
+	if (efx_mcdi_complete_sync(mcdi)) {
+		if (mcdi->mode == MCDI_MODE_EVENTS) {
+			mcdi->resprc = rc;
+			mcdi->resp_hdr_len = 0;
+			mcdi->resp_data_len = 0;
+			++mcdi->credits;
+		}
+	} else {
+		int count;
+
+		/* Consume the status word since efx_mcdi_rpc_finish() won't */
+		for (count = 0; count < MCDI_STATUS_DELAY_COUNT; ++count) {
+			rc = efx_mcdi_poll_reboot(efx);
+			if (rc)
+				break;
+			udelay(MCDI_STATUS_DELAY_US);
+		}
+
+		/* On EF10, a CODE_MC_REBOOT event can be received without the
+		 * reboot detection in efx_mcdi_poll_reboot() being triggered.
+		 * If zero was returned from the final call to
+		 * efx_mcdi_poll_reboot(), the MC reboot wasn't noticed but the
+		 * MC has definitely rebooted so prepare for the reset.
+		 */
+		if (!rc && efx->type->mcdi_reboot_detected)
+			efx->type->mcdi_reboot_detected(efx);
+
+		mcdi->new_epoch = true;
+
+		/* Nobody was waiting for an MCDI request, so trigger a reset */
+		efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
+	}
+
+	spin_unlock(&mcdi->iface_lock);
+}
+
+/* The MC is going down in to BIST mode. set the BIST flag to block
+ * new MCDI, cancel any outstanding MCDI and and schedule a BIST-type reset
+ * (which doesn't actually execute a reset, it waits for the controlling
+ * function to reset it).
+ */
+static void efx_mcdi_ev_bist(struct efx_nic *efx)
+{
+	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+
+	spin_lock(&mcdi->iface_lock);
+	efx->mc_bist_for_other_fn = true;
+	efx_mcdi_proxy_abort(mcdi);
+
+	if (efx_mcdi_complete_sync(mcdi)) {
+		if (mcdi->mode == MCDI_MODE_EVENTS) {
+			mcdi->resprc = -EIO;
+			mcdi->resp_hdr_len = 0;
+			mcdi->resp_data_len = 0;
+			++mcdi->credits;
+		}
+	}
+	mcdi->new_epoch = true;
+	efx_schedule_reset(efx, RESET_TYPE_MC_BIST);
+	spin_unlock(&mcdi->iface_lock);
+}
+
+/* MCDI timeouts seen, so make all MCDI calls fail-fast and issue an FLR to try
+ * to recover.
+ */
+static void efx_mcdi_abandon(struct efx_nic *efx)
+{
+	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+
+	if (xchg(&mcdi->mode, MCDI_MODE_FAIL) == MCDI_MODE_FAIL)
+		return; /* it had already been done */
+	netif_dbg(efx, hw, efx->net_dev, "MCDI is timing out; trying to recover\n");
+	efx_schedule_reset(efx, RESET_TYPE_MCDI_TIMEOUT);
+}
+
+static void efx_handle_drain_event(struct efx_nic *efx)
+{
+	if (atomic_dec_and_test(&efx->active_queues))
+		wake_up(&efx->flush_wq);
+
+	WARN_ON(atomic_read(&efx->active_queues) < 0);
+}
+
+/* Called from efx_farch_ev_process and efx_ef10_ev_process for MCDI events */
+void efx_mcdi_process_event(struct efx_channel *channel,
+			    efx_qword_t *event)
+{
+	struct efx_nic *efx = channel->efx;
+	int code = EFX_QWORD_FIELD(*event, MCDI_EVENT_CODE);
+	u32 data = EFX_QWORD_FIELD(*event, MCDI_EVENT_DATA);
+
+	switch (code) {
+	case MCDI_EVENT_CODE_BADSSERT:
+		netif_err(efx, hw, efx->net_dev,
+			  "MC watchdog or assertion failure at 0x%x\n", data);
+		efx_mcdi_ev_death(efx, -EINTR);
+		break;
+
+	case MCDI_EVENT_CODE_PMNOTICE:
+		netif_info(efx, wol, efx->net_dev, "MCDI PM event.\n");
+		break;
+
+	case MCDI_EVENT_CODE_CMDDONE:
+		efx_mcdi_ev_cpl(efx,
+				MCDI_EVENT_FIELD(*event, CMDDONE_SEQ),
+				MCDI_EVENT_FIELD(*event, CMDDONE_DATALEN),
+				MCDI_EVENT_FIELD(*event, CMDDONE_ERRNO));
+		break;
+
+	case MCDI_EVENT_CODE_LINKCHANGE:
+		efx_mcdi_process_link_change(efx, event);
+		break;
+	case MCDI_EVENT_CODE_SENSOREVT:
+		efx_sensor_event(efx, event);
+		break;
+	case MCDI_EVENT_CODE_SCHEDERR:
+		netif_dbg(efx, hw, efx->net_dev,
+			  "MC Scheduler alert (0x%x)\n", data);
+		break;
+	case MCDI_EVENT_CODE_REBOOT:
+	case MCDI_EVENT_CODE_MC_REBOOT:
+		netif_info(efx, hw, efx->net_dev, "MC Reboot\n");
+		efx_mcdi_ev_death(efx, -EIO);
+		break;
+	case MCDI_EVENT_CODE_MC_BIST:
+		netif_info(efx, hw, efx->net_dev, "MC entered BIST mode\n");
+		efx_mcdi_ev_bist(efx);
+		break;
+	case MCDI_EVENT_CODE_MAC_STATS_DMA:
+		/* MAC stats are gather lazily.  We can ignore this. */
+		break;
+	case MCDI_EVENT_CODE_FLR:
+		if (efx->type->sriov_flr)
+			efx->type->sriov_flr(efx,
+					     MCDI_EVENT_FIELD(*event, FLR_VF));
+		break;
+	case MCDI_EVENT_CODE_PTP_RX:
+	case MCDI_EVENT_CODE_PTP_FAULT:
+	case MCDI_EVENT_CODE_PTP_PPS:
+		efx_ptp_event(efx, event);
+		break;
+	case MCDI_EVENT_CODE_PTP_TIME:
+		efx_time_sync_event(channel, event);
+		break;
+	case MCDI_EVENT_CODE_TX_FLUSH:
+	case MCDI_EVENT_CODE_RX_FLUSH:
+		/* Two flush events will be sent: one to the same event
+		 * queue as completions, and one to event queue 0.
+		 * In the latter case the {RX,TX}_FLUSH_TO_DRIVER
+		 * flag will be set, and we should ignore the event
+		 * because we want to wait for all completions.
+		 */
+		BUILD_BUG_ON(MCDI_EVENT_TX_FLUSH_TO_DRIVER_LBN !=
+			     MCDI_EVENT_RX_FLUSH_TO_DRIVER_LBN);
+		if (!MCDI_EVENT_FIELD(*event, TX_FLUSH_TO_DRIVER))
+			efx_handle_drain_event(efx);
+		break;
+	case MCDI_EVENT_CODE_TX_ERR:
+	case MCDI_EVENT_CODE_RX_ERR:
+		netif_err(efx, hw, efx->net_dev,
+			  "%s DMA error (event: "EFX_QWORD_FMT")\n",
+			  code == MCDI_EVENT_CODE_TX_ERR ? "TX" : "RX",
+			  EFX_QWORD_VAL(*event));
+		efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
+		break;
+	case MCDI_EVENT_CODE_PROXY_RESPONSE:
+		efx_mcdi_ev_proxy_response(efx,
+				MCDI_EVENT_FIELD(*event, PROXY_RESPONSE_HANDLE),
+				MCDI_EVENT_FIELD(*event, PROXY_RESPONSE_RC));
+		break;
+	default:
+		netif_err(efx, hw, efx->net_dev,
+			  "Unknown MCDI event " EFX_QWORD_FMT "\n",
+			  EFX_QWORD_VAL(*event));
+	}
+}
+
+/**************************************************************************
+ *
+ * Specific request functions
+ *
+ **************************************************************************
+ */
+
+void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
+{
+	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_VERSION_OUT_LEN);
+	size_t outlength;
+	const __le16 *ver_words;
+	size_t offset;
+	int rc;
+
+	BUILD_BUG_ON(MC_CMD_GET_VERSION_IN_LEN != 0);
+	rc = efx_mcdi_rpc(efx, MC_CMD_GET_VERSION, NULL, 0,
+			  outbuf, sizeof(outbuf), &outlength);
+	if (rc)
+		goto fail;
+	if (outlength < MC_CMD_GET_VERSION_OUT_LEN) {
+		rc = -EIO;
+		goto fail;
+	}
+
+	ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION);
+	offset = scnprintf(buf, len, "%u.%u.%u.%u",
+			   le16_to_cpu(ver_words[0]),
+			   le16_to_cpu(ver_words[1]),
+			   le16_to_cpu(ver_words[2]),
+			   le16_to_cpu(ver_words[3]));
+
+	if (efx->type->print_additional_fwver)
+		offset += efx->type->print_additional_fwver(efx, buf + offset,
+							    len - offset);
+
+	/* It's theoretically possible for the string to exceed 31
+	 * characters, though in practice the first three version
+	 * components are short enough that this doesn't happen.
+	 */
+	if (WARN_ON(offset >= len))
+		buf[0] = 0;
+
+	return;
+
+fail:
+	netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+	buf[0] = 0;
+}
+
+static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
+			       bool *was_attached)
+{
+	MCDI_DECLARE_BUF(inbuf, MC_CMD_DRV_ATTACH_IN_LEN);
+	MCDI_DECLARE_BUF(outbuf, MC_CMD_DRV_ATTACH_EXT_OUT_LEN);
+	size_t outlen;
+	int rc;
+
+	MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_NEW_STATE,
+		       driver_operating ? 1 : 0);
+	MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_UPDATE, 1);
+	MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID, MC_CMD_FW_LOW_LATENCY);
+
+	rc = efx_mcdi_rpc_quiet(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf),
+				outbuf, sizeof(outbuf), &outlen);
+	/* If we're not the primary PF, trying to ATTACH with a FIRMWARE_ID
+	 * specified will fail with EPERM, and we have to tell the MC we don't
+	 * care what firmware we get.
+	 */
+	if (rc == -EPERM) {
+		netif_dbg(efx, probe, efx->net_dev,
+			  "efx_mcdi_drv_attach with fw-variant setting failed EPERM, trying without it\n");
+		MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID,
+			       MC_CMD_FW_DONT_CARE);
+		rc = efx_mcdi_rpc_quiet(efx, MC_CMD_DRV_ATTACH, inbuf,
+					sizeof(inbuf), outbuf, sizeof(outbuf),
+					&outlen);
+	}
+	if (rc) {
+		efx_mcdi_display_error(efx, MC_CMD_DRV_ATTACH, sizeof(inbuf),
+				       outbuf, outlen, rc);
+		goto fail;
+	}
+	if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN) {
+		rc = -EIO;
+		goto fail;
+	}
+
+	if (driver_operating) {
+		if (outlen >= MC_CMD_DRV_ATTACH_EXT_OUT_LEN) {
+			efx->mcdi->fn_flags =
+				MCDI_DWORD(outbuf,
+					   DRV_ATTACH_EXT_OUT_FUNC_FLAGS);
+		} else {
+			/* Synthesise flags for Siena */
+			efx->mcdi->fn_flags =
+				1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL |
+				1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED |
+				(efx_port_num(efx) == 0) <<
+				MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY;
+		}
+	}
+
+	/* We currently assume we have control of the external link
+	 * and are completely trusted by firmware.  Abort probing
+	 * if that's not true for this function.
+	 */
+
+	if (was_attached != NULL)
+		*was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE);
+	return 0;
+
+fail:
+	netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+	return rc;
+}
+
+int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
+			   u16 *fw_subtype_list, u32 *capabilities)
+{
+	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_BOARD_CFG_OUT_LENMAX);
+	size_t outlen, i;
+	int port_num = efx_port_num(efx);
+	int rc;
+
+	BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_IN_LEN != 0);
+	/* we need __aligned(2) for ether_addr_copy */
+	BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST & 1);
+	BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST & 1);
+
+	rc = efx_mcdi_rpc(efx, MC_CMD_GET_BOARD_CFG, NULL, 0,
+			  outbuf, sizeof(outbuf), &outlen);
+	if (rc)
+		goto fail;
+
+	if (outlen < MC_CMD_GET_BOARD_CFG_OUT_LENMIN) {
+		rc = -EIO;
+		goto fail;
+	}
+
+	if (mac_address)
+		ether_addr_copy(mac_address,
+				port_num ?
+				MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1) :
+				MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0));
+	if (fw_subtype_list) {
+		for (i = 0;
+		     i < MCDI_VAR_ARRAY_LEN(outlen,
+					    GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST);
+		     i++)
+			fw_subtype_list[i] = MCDI_ARRAY_WORD(
+				outbuf, GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST, i);
+		for (; i < MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM; i++)
+			fw_subtype_list[i] = 0;
+	}
+	if (capabilities) {
+		if (port_num)
+			*capabilities = MCDI_DWORD(outbuf,
+					GET_BOARD_CFG_OUT_CAPABILITIES_PORT1);
+		else
+			*capabilities = MCDI_DWORD(outbuf,
+					GET_BOARD_CFG_OUT_CAPABILITIES_PORT0);
+	}
+
+	return 0;
+
+fail:
+	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d len=%d\n",
+		  __func__, rc, (int)outlen);
+
+	return rc;
+}
+
+int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq)
+{
+	MCDI_DECLARE_BUF(inbuf, MC_CMD_LOG_CTRL_IN_LEN);
+	u32 dest = 0;
+	int rc;
+
+	if (uart)
+		dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_UART;
+	if (evq)
+		dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ;
+
+	MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST, dest);
+	MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST_EVQ, dest_evq);
+
+	BUILD_BUG_ON(MC_CMD_LOG_CTRL_OUT_LEN != 0);
+
+	rc = efx_mcdi_rpc(efx, MC_CMD_LOG_CTRL, inbuf, sizeof(inbuf),
+			  NULL, 0, NULL);
+	return rc;
+}
+
+int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out)
+{
+	MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TYPES_OUT_LEN);
+	size_t outlen;
+	int rc;
+
+	BUILD_BUG_ON(MC_CMD_NVRAM_TYPES_IN_LEN != 0);
+
+	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TYPES, NULL, 0,
+			  outbuf, sizeof(outbuf), &outlen);
+	if (rc)
+		goto fail;
+	if (outlen < MC_CMD_NVRAM_TYPES_OUT_LEN) {
+		rc = -EIO;
+		goto fail;
+	}
+
+	*nvram_types_out = MCDI_DWORD(outbuf, NVRAM_TYPES_OUT_TYPES);
+	return 0;
+
+fail:
+	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
+		  __func__, rc);
+	return rc;
+}
+
+/* This function finds types using the new NVRAM_PARTITIONS mcdi. */
+static int efx_new_mcdi_nvram_types(struct efx_nic *efx, u32 *number,
+				    u32 *nvram_types)
+{
+	efx_dword_t *outbuf = kzalloc(MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX_MCDI2,
+				      GFP_KERNEL);
+	size_t outlen;
+	int rc;
+
+	if (!outbuf)
+		return -ENOMEM;
+
+	BUILD_BUG_ON(MC_CMD_NVRAM_PARTITIONS_IN_LEN != 0);
+
+	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_PARTITIONS, NULL, 0,
+			  outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX_MCDI2, &outlen);
+	if (rc)
+		goto fail;
+
+	*number = MCDI_DWORD(outbuf, NVRAM_PARTITIONS_OUT_NUM_PARTITIONS);
+
+	memcpy(nvram_types, MCDI_PTR(outbuf, NVRAM_PARTITIONS_OUT_TYPE_ID),
+	       *number * sizeof(u32));
+
+fail:
+	kfree(outbuf);
+	return rc;
+}
+
+int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
+			size_t *size_out, size_t *erase_size_out,
+			bool *protected_out)
+{
+	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_INFO_IN_LEN);
+	MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_INFO_OUT_LEN);
+	size_t outlen;
+	int rc;
+
+	MCDI_SET_DWORD(inbuf, NVRAM_INFO_IN_TYPE, type);
+
+	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_INFO, inbuf, sizeof(inbuf),
+			  outbuf, sizeof(outbuf), &outlen);
+	if (rc)
+		goto fail;
+	if (outlen < MC_CMD_NVRAM_INFO_OUT_LEN) {
+		rc = -EIO;
+		goto fail;
+	}
+
+	*size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE);
+	*erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE);
+	*protected_out = !!(MCDI_DWORD(outbuf, NVRAM_INFO_OUT_FLAGS) &
+				(1 << MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN));
+	return 0;
+
+fail:
+	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+	return rc;
+}
+
+static int efx_mcdi_nvram_test(struct efx_nic *efx, unsigned int type)
+{
+	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_TEST_IN_LEN);
+	MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TEST_OUT_LEN);
+	int rc;
+
+	MCDI_SET_DWORD(inbuf, NVRAM_TEST_IN_TYPE, type);
+
+	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TEST, inbuf, sizeof(inbuf),
+			  outbuf, sizeof(outbuf), NULL);
+	if (rc)
+		return rc;
+
+	switch (MCDI_DWORD(outbuf, NVRAM_TEST_OUT_RESULT)) {
+	case MC_CMD_NVRAM_TEST_PASS:
+	case MC_CMD_NVRAM_TEST_NOTSUPP:
+		return 0;
+	default:
+		return -EIO;
+	}
+}
+
+/* This function tests nvram partitions using the new mcdi partition lookup scheme */
+int efx_new_mcdi_nvram_test_all(struct efx_nic *efx)
+{
+	u32 *nvram_types = kzalloc(MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX_MCDI2,
+				   GFP_KERNEL);
+	unsigned int number;
+	int rc, i;
+
+	if (!nvram_types)
+		return -ENOMEM;
+
+	rc = efx_new_mcdi_nvram_types(efx, &number, nvram_types);
+	if (rc)
+		goto fail;
+
+	/* Require at least one check */
+	rc = -EAGAIN;
+
+	for (i = 0; i < number; i++) {
+		if (nvram_types[i] == NVRAM_PARTITION_TYPE_PARTITION_MAP ||
+		    nvram_types[i] == NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG)
+			continue;
+
+		rc = efx_mcdi_nvram_test(efx, nvram_types[i]);
+		if (rc)
+			goto fail;
+	}
+
+fail:
+	kfree(nvram_types);
+	return rc;
+}
+
+int efx_mcdi_nvram_test_all(struct efx_nic *efx)
+{
+	u32 nvram_types;
+	unsigned int type;
+	int rc;
+
+	rc = efx_mcdi_nvram_types(efx, &nvram_types);
+	if (rc)
+		goto fail1;
+
+	type = 0;
+	while (nvram_types != 0) {
+		if (nvram_types & 1) {
+			rc = efx_mcdi_nvram_test(efx, type);
+			if (rc)
+				goto fail2;
+		}
+		type++;
+		nvram_types >>= 1;
+	}
+
+	return 0;
+
+fail2:
+	netif_err(efx, hw, efx->net_dev, "%s: failed type=%u\n",
+		  __func__, type);
+fail1:
+	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+	return rc;
+}
+
+/* Returns 1 if an assertion was read, 0 if no assertion had fired,
+ * negative on error.
+ */
+static int efx_mcdi_read_assertion(struct efx_nic *efx)
+{
+	MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_ASSERTS_IN_LEN);
+	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_ASSERTS_OUT_LEN);
+	unsigned int flags, index;
+	const char *reason;
+	size_t outlen;
+	int retry;
+	int rc;
+
+	/* Attempt to read any stored assertion state before we reboot
+	 * the mcfw out of the assertion handler. Retry twice, once
+	 * because a boot-time assertion might cause this command to fail
+	 * with EINTR. And once again because GET_ASSERTS can race with
+	 * MC_CMD_REBOOT running on the other port. */
+	retry = 2;
+	do {
+		MCDI_SET_DWORD(inbuf, GET_ASSERTS_IN_CLEAR, 1);
+		rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_ASSERTS,
+					inbuf, MC_CMD_GET_ASSERTS_IN_LEN,
+					outbuf, sizeof(outbuf), &outlen);
+		if (rc == -EPERM)
+			return 0;
+	} while ((rc == -EINTR || rc == -EIO) && retry-- > 0);
+
+	if (rc) {
+		efx_mcdi_display_error(efx, MC_CMD_GET_ASSERTS,
+				       MC_CMD_GET_ASSERTS_IN_LEN, outbuf,
+				       outlen, rc);
+		return rc;
+	}
+	if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN)
+		return -EIO;
+
+	/* Print out any recorded assertion state */
+	flags = MCDI_DWORD(outbuf, GET_ASSERTS_OUT_GLOBAL_FLAGS);
+	if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS)
+		return 0;
+
+	reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL)
+		? "system-level assertion"
+		: (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL)
+		? "thread-level assertion"
+		: (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED)
+		? "watchdog reset"
+		: "unknown assertion";
+	netif_err(efx, hw, efx->net_dev,
+		  "MCPU %s at PC = 0x%.8x in thread 0x%.8x\n", reason,
+		  MCDI_DWORD(outbuf, GET_ASSERTS_OUT_SAVED_PC_OFFS),
+		  MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS));
+
+	/* Print out the registers */
+	for (index = 0;
+	     index < MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM;
+	     index++)
+		netif_err(efx, hw, efx->net_dev, "R%.2d (?): 0x%.8x\n",
+			  1 + index,
+			  MCDI_ARRAY_DWORD(outbuf, GET_ASSERTS_OUT_GP_REGS_OFFS,
+					   index));
+
+	return 1;
+}
+
+static int efx_mcdi_exit_assertion(struct efx_nic *efx)
+{
+	MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN);
+	int rc;
+
+	/* If the MC is running debug firmware, it might now be
+	 * waiting for a debugger to attach, but we just want it to
+	 * reboot.  We set a flag that makes the command a no-op if it
+	 * has already done so.
+	 * The MCDI will thus return either 0 or -EIO.
+	 */
+	BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
+	MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS,
+		       MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION);
+	rc = efx_mcdi_rpc_quiet(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN,
+				NULL, 0, NULL);
+	if (rc == -EIO)
+		rc = 0;
+	if (rc)
+		efx_mcdi_display_error(efx, MC_CMD_REBOOT, MC_CMD_REBOOT_IN_LEN,
+				       NULL, 0, rc);
+	return rc;
+}
+
+int efx_mcdi_handle_assertion(struct efx_nic *efx)
+{
+	int rc;
+
+	rc = efx_mcdi_read_assertion(efx);
+	if (rc <= 0)
+		return rc;
+
+	return efx_mcdi_exit_assertion(efx);
+}
+
+int efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
+{
+	MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_ID_LED_IN_LEN);
+
+	BUILD_BUG_ON(EFX_LED_OFF != MC_CMD_LED_OFF);
+	BUILD_BUG_ON(EFX_LED_ON != MC_CMD_LED_ON);
+	BUILD_BUG_ON(EFX_LED_DEFAULT != MC_CMD_LED_DEFAULT);
+
+	BUILD_BUG_ON(MC_CMD_SET_ID_LED_OUT_LEN != 0);
+
+	MCDI_SET_DWORD(inbuf, SET_ID_LED_IN_STATE, mode);
+
+	return efx_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf), NULL, 0, NULL);
+}
+
+static int efx_mcdi_reset_func(struct efx_nic *efx)
+{
+	MCDI_DECLARE_BUF(inbuf, MC_CMD_ENTITY_RESET_IN_LEN);
+	int rc;
+
+	BUILD_BUG_ON(MC_CMD_ENTITY_RESET_OUT_LEN != 0);
+	MCDI_POPULATE_DWORD_1(inbuf, ENTITY_RESET_IN_FLAG,
+			      ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET, 1);
+	rc = efx_mcdi_rpc(efx, MC_CMD_ENTITY_RESET, inbuf, sizeof(inbuf),
+			  NULL, 0, NULL);
+	return rc;
+}
+
+static int efx_mcdi_reset_mc(struct efx_nic *efx)
+{
+	MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN);
+	int rc;
+
+	BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
+	MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, 0);
+	rc = efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, sizeof(inbuf),
+			  NULL, 0, NULL);
+	/* White is black, and up is down */
+	if (rc == -EIO)
+		return 0;
+	if (rc == 0)
+		rc = -EIO;
+	return rc;
+}
+
+enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason)
+{
+	return RESET_TYPE_RECOVER_OR_ALL;
+}
+
+int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method)
+{
+	int rc;
+
+	/* If MCDI is down, we can't handle_assertion */
+	if (method == RESET_TYPE_MCDI_TIMEOUT) {
+		rc = pci_reset_function(efx->pci_dev);
+		if (rc)
+			return rc;
+		/* Re-enable polled MCDI completion */
+		if (efx->mcdi) {
+			struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+			mcdi->mode = MCDI_MODE_POLL;
+		}
+		return 0;
+	}
+
+	/* Recover from a failed assertion pre-reset */
+	rc = efx_mcdi_handle_assertion(efx);
+	if (rc)
+		return rc;
+
+	if (method == RESET_TYPE_DATAPATH)
+		return 0;
+	else if (method == RESET_TYPE_WORLD)
+		return efx_mcdi_reset_mc(efx);
+	else
+		return efx_mcdi_reset_func(efx);
+}
+
+static int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type,
+				   const u8 *mac, int *id_out)
+{
+	MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_SET_IN_LEN);
+	MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_SET_OUT_LEN);
+	size_t outlen;
+	int rc;
+
+	MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_WOL_TYPE, type);
+	MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_FILTER_MODE,
+		       MC_CMD_FILTER_MODE_SIMPLE);
+	ether_addr_copy(MCDI_PTR(inbuf, WOL_FILTER_SET_IN_MAGIC_MAC), mac);
+
+	rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_SET, inbuf, sizeof(inbuf),
+			  outbuf, sizeof(outbuf), &outlen);
+	if (rc)
+		goto fail;
+
+	if (outlen < MC_CMD_WOL_FILTER_SET_OUT_LEN) {
+		rc = -EIO;
+		goto fail;
+	}
+
+	*id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_SET_OUT_FILTER_ID);
+
+	return 0;
+
+fail:
+	*id_out = -1;
+	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+	return rc;
+
+}
+
+
+int
+efx_mcdi_wol_filter_set_magic(struct efx_nic *efx,  const u8 *mac, int *id_out)
+{
+	return efx_mcdi_wol_filter_set(efx, MC_CMD_WOL_TYPE_MAGIC, mac, id_out);
+}
+
+
+int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out)
+{
+	MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_GET_OUT_LEN);
+	size_t outlen;
+	int rc;
+
+	rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_GET, NULL, 0,
+			  outbuf, sizeof(outbuf), &outlen);
+	if (rc)
+		goto fail;
+
+	if (outlen < MC_CMD_WOL_FILTER_GET_OUT_LEN) {
+		rc = -EIO;
+		goto fail;
+	}
+
+	*id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_GET_OUT_FILTER_ID);
+
+	return 0;
+
+fail:
+	*id_out = -1;
+	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+	return rc;
+}
+
+
+int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id)
+{
+	MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_REMOVE_IN_LEN);
+	int rc;
+
+	MCDI_SET_DWORD(inbuf, WOL_FILTER_REMOVE_IN_FILTER_ID, (u32)id);
+
+	rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_REMOVE, inbuf, sizeof(inbuf),
+			  NULL, 0, NULL);
+	return rc;
+}
+
+int efx_mcdi_flush_rxqs(struct efx_nic *efx)
+{
+	struct efx_channel *channel;
+	struct efx_rx_queue *rx_queue;
+	MCDI_DECLARE_BUF(inbuf,
+			 MC_CMD_FLUSH_RX_QUEUES_IN_LEN(EFX_MAX_CHANNELS));
+	int rc, count;
+
+	BUILD_BUG_ON(EFX_MAX_CHANNELS >
+		     MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM);
+
+	count = 0;
+	efx_for_each_channel(channel, efx) {
+		efx_for_each_channel_rx_queue(rx_queue, channel) {
+			if (rx_queue->flush_pending) {
+				rx_queue->flush_pending = false;
+				atomic_dec(&efx->rxq_flush_pending);
+				MCDI_SET_ARRAY_DWORD(
+					inbuf, FLUSH_RX_QUEUES_IN_QID_OFST,
+					count, efx_rx_queue_index(rx_queue));
+				count++;
+			}
+		}
+	}
+
+	rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, inbuf,
+			  MC_CMD_FLUSH_RX_QUEUES_IN_LEN(count), NULL, 0, NULL);
+	WARN_ON(rc < 0);
+
+	return rc;
+}
+
+int efx_mcdi_wol_filter_reset(struct efx_nic *efx)
+{
+	int rc;
+
+	rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_RESET, NULL, 0, NULL, 0, NULL);
+	return rc;
+}
+
+int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled,
+			    unsigned int *flags)
+{
+	MCDI_DECLARE_BUF(inbuf, MC_CMD_WORKAROUND_IN_LEN);
+	MCDI_DECLARE_BUF(outbuf, MC_CMD_WORKAROUND_EXT_OUT_LEN);
+	size_t outlen;
+	int rc;
+
+	BUILD_BUG_ON(MC_CMD_WORKAROUND_OUT_LEN != 0);
+	MCDI_SET_DWORD(inbuf, WORKAROUND_IN_TYPE, type);
+	MCDI_SET_DWORD(inbuf, WORKAROUND_IN_ENABLED, enabled);
+	rc = efx_mcdi_rpc(efx, MC_CMD_WORKAROUND, inbuf, sizeof(inbuf),
+			  outbuf, sizeof(outbuf), &outlen);
+	if (rc)
+		return rc;
+
+	if (!flags)
+		return 0;
+
+	if (outlen >= MC_CMD_WORKAROUND_EXT_OUT_LEN)
+		*flags = MCDI_DWORD(outbuf, WORKAROUND_EXT_OUT_FLAGS);
+	else
+		*flags = 0;
+
+	return 0;
+}
+
+int efx_mcdi_get_workarounds(struct efx_nic *efx, unsigned int *impl_out,
+			     unsigned int *enabled_out)
+{
+	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_WORKAROUNDS_OUT_LEN);
+	size_t outlen;
+	int rc;
+
+	rc = efx_mcdi_rpc(efx, MC_CMD_GET_WORKAROUNDS, NULL, 0,
+			  outbuf, sizeof(outbuf), &outlen);
+	if (rc)
+		goto fail;
+
+	if (outlen < MC_CMD_GET_WORKAROUNDS_OUT_LEN) {
+		rc = -EIO;
+		goto fail;
+	}
+
+	if (impl_out)
+		*impl_out = MCDI_DWORD(outbuf, GET_WORKAROUNDS_OUT_IMPLEMENTED);
+
+	if (enabled_out)
+		*enabled_out = MCDI_DWORD(outbuf, GET_WORKAROUNDS_OUT_ENABLED);
+
+	return 0;
+
+fail:
+	/* Older firmware lacks GET_WORKAROUNDS and this isn't especially
+	 * terrifying.  The call site will have to deal with it though.
+	 */
+	netif_cond_dbg(efx, hw, efx->net_dev, rc == -ENOSYS, err,
+		       "%s: failed rc=%d\n", __func__, rc);
+	return rc;
+}
+
+#ifdef CONFIG_SFC_MTD
+
+#define EFX_MCDI_NVRAM_LEN_MAX 128
+
+static int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
+{
+	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_START_V2_IN_LEN);
+	int rc;
+
+	MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_START_IN_TYPE, type);
+	MCDI_POPULATE_DWORD_1(inbuf, NVRAM_UPDATE_START_V2_IN_FLAGS,
+			      NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT,
+			      1);
+
+	BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_START_OUT_LEN != 0);
+
+	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf),
+			  NULL, 0, NULL);
+
+	return rc;
+}
+
+static int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
+			       loff_t offset, u8 *buffer, size_t length)
+{
+	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_READ_IN_V2_LEN);
+	MCDI_DECLARE_BUF(outbuf,
+			 MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX));
+	size_t outlen;
+	int rc;
+
+	MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_TYPE, type);
+	MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_OFFSET, offset);
+	MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_LENGTH, length);
+	MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_V2_MODE,
+		       MC_CMD_NVRAM_READ_IN_V2_DEFAULT);
+
+	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf),
+			  outbuf, sizeof(outbuf), &outlen);
+	if (rc)
+		return rc;
+
+	memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length);
+	return 0;
+}
+
+static int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
+				loff_t offset, const u8 *buffer, size_t length)
+{
+	MCDI_DECLARE_BUF(inbuf,
+			 MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX));
+	int rc;
+
+	MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type);
+	MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset);
+	MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length);
+	memcpy(MCDI_PTR(inbuf, NVRAM_WRITE_IN_WRITE_BUFFER), buffer, length);
+
+	BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0);
+
+	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf,
+			  ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4),
+			  NULL, 0, NULL);
+	return rc;
+}
+
+static int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
+				loff_t offset, size_t length)
+{
+	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_ERASE_IN_LEN);
+	int rc;
+
+	MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_TYPE, type);
+	MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_OFFSET, offset);
+	MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_LENGTH, length);
+
+	BUILD_BUG_ON(MC_CMD_NVRAM_ERASE_OUT_LEN != 0);
+
+	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf),
+			  NULL, 0, NULL);
+	return rc;
+}
+
+static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
+{
+	MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_LEN);
+	MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN);
+	size_t outlen;
+	int rc, rc2;
+
+	MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type);
+	/* Always set this flag. Old firmware ignores it */
+	MCDI_POPULATE_DWORD_1(inbuf, NVRAM_UPDATE_FINISH_V2_IN_FLAGS,
+			      NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT,
+			      1);
+
+	rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf),
+			  outbuf, sizeof(outbuf), &outlen);
+	if (!rc && outlen >= MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN) {
+		rc2 = MCDI_DWORD(outbuf, NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE);
+		if (rc2 != MC_CMD_NVRAM_VERIFY_RC_SUCCESS)
+			netif_err(efx, drv, efx->net_dev,
+				  "NVRAM update failed verification with code 0x%x\n",
+				  rc2);
+		switch (rc2) {
+		case MC_CMD_NVRAM_VERIFY_RC_SUCCESS:
+			break;
+		case MC_CMD_NVRAM_VERIFY_RC_CMS_CHECK_FAILED:
+		case MC_CMD_NVRAM_VERIFY_RC_MESSAGE_DIGEST_CHECK_FAILED:
+		case MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHECK_FAILED:
+		case MC_CMD_NVRAM_VERIFY_RC_TRUSTED_APPROVERS_CHECK_FAILED:
+		case MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHAIN_CHECK_FAILED:
+			rc = -EIO;
+			break;
+		case MC_CMD_NVRAM_VERIFY_RC_INVALID_CMS_FORMAT:
+		case MC_CMD_NVRAM_VERIFY_RC_BAD_MESSAGE_DIGEST:
+			rc = -EINVAL;
+			break;
+		case MC_CMD_NVRAM_VERIFY_RC_NO_VALID_SIGNATURES:
+		case MC_CMD_NVRAM_VERIFY_RC_NO_TRUSTED_APPROVERS:
+		case MC_CMD_NVRAM_VERIFY_RC_NO_SIGNATURE_MATCH:
+			rc = -EPERM;
+			break;
+		default:
+			netif_err(efx, drv, efx->net_dev,
+				  "Unknown response to NVRAM_UPDATE_FINISH\n");
+			rc = -EIO;
+		}
+	}
+
+	return rc;
+}
+
+int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start,
+		      size_t len, size_t *retlen, u8 *buffer)
+{
+	struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
+	struct efx_nic *efx = mtd->priv;
+	loff_t offset = start;
+	loff_t end = min_t(loff_t, start + len, mtd->size);
+	size_t chunk;
+	int rc = 0;
+
+	while (offset < end) {
+		chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
+		rc = efx_mcdi_nvram_read(efx, part->nvram_type, offset,
+					 buffer, chunk);
+		if (rc)
+			goto out;
+		offset += chunk;
+		buffer += chunk;
+	}
+out:
+	*retlen = offset - start;
+	return rc;
+}
+
+int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
+{
+	struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
+	struct efx_nic *efx = mtd->priv;
+	loff_t offset = start & ~((loff_t)(mtd->erasesize - 1));
+	loff_t end = min_t(loff_t, start + len, mtd->size);
+	size_t chunk = part->common.mtd.erasesize;
+	int rc = 0;
+
+	if (!part->updating) {
+		rc = efx_mcdi_nvram_update_start(efx, part->nvram_type);
+		if (rc)
+			goto out;
+		part->updating = true;
+	}
+
+	/* The MCDI interface can in fact do multiple erase blocks at once;
+	 * but erasing may be slow, so we make multiple calls here to avoid
+	 * tripping the MCDI RPC timeout. */
+	while (offset < end) {
+		rc = efx_mcdi_nvram_erase(efx, part->nvram_type, offset,
+					  chunk);
+		if (rc)
+			goto out;
+		offset += chunk;
+	}
+out:
+	return rc;
+}
+
+int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start,
+		       size_t len, size_t *retlen, const u8 *buffer)
+{
+	struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
+	struct efx_nic *efx = mtd->priv;
+	loff_t offset = start;
+	loff_t end = min_t(loff_t, start + len, mtd->size);
+	size_t chunk;
+	int rc = 0;
+
+	if (!part->updating) {
+		rc = efx_mcdi_nvram_update_start(efx, part->nvram_type);
+		if (rc)
+			goto out;
+		part->updating = true;
+	}
+
+	while (offset < end) {
+		chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
+		rc = efx_mcdi_nvram_write(efx, part->nvram_type, offset,
+					  buffer, chunk);
+		if (rc)
+			goto out;
+		offset += chunk;
+		buffer += chunk;
+	}
+out:
+	*retlen = offset - start;
+	return rc;
+}
+
+int efx_mcdi_mtd_sync(struct mtd_info *mtd)
+{
+	struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
+	struct efx_nic *efx = mtd->priv;
+	int rc = 0;
+
+	if (part->updating) {
+		part->updating = false;
+		rc = efx_mcdi_nvram_update_finish(efx, part->nvram_type);
+	}
+
+	return rc;
+}
+
+void efx_mcdi_mtd_rename(struct efx_mtd_partition *part)
+{
+	struct efx_mcdi_mtd_partition *mcdi_part =
+		container_of(part, struct efx_mcdi_mtd_partition, common);
+	struct efx_nic *efx = part->mtd.priv;
+
+	snprintf(part->name, sizeof(part->name), "%s %s:%02x",
+		 efx->name, part->type_name, mcdi_part->fw_subtype);
+}
+
+#endif /* CONFIG_SFC_MTD */
diff --git a/drivers/net/ethernet/sfc/siena/mcdi.h b/drivers/net/ethernet/sfc/siena/mcdi.h
new file mode 100644
index 000000000000..69c2924a147c
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/mcdi.h
@@ -0,0 +1,388 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2008-2013 Solarflare Communications Inc.
+ */
+
+#ifndef EFX_MCDI_H
+#define EFX_MCDI_H
+
+/**
+ * enum efx_mcdi_state - MCDI request handling state
+ * @MCDI_STATE_QUIESCENT: No pending MCDI requests. If the caller holds the
+ *	mcdi @iface_lock then they are able to move to %MCDI_STATE_RUNNING
+ * @MCDI_STATE_RUNNING_SYNC: There is a synchronous MCDI request pending.
+ *	Only the thread that moved into this state is allowed to move out of it.
+ * @MCDI_STATE_RUNNING_ASYNC: There is an asynchronous MCDI request pending.
+ * @MCDI_STATE_PROXY_WAIT: An MCDI request has completed with a response that
+ *	indicates we must wait for a proxy try again message.
+ * @MCDI_STATE_COMPLETED: An MCDI request has completed, but the owning thread
+ *	has not yet consumed the result. For all other threads, equivalent to
+ *	%MCDI_STATE_RUNNING.
+ */
+enum efx_mcdi_state {
+	MCDI_STATE_QUIESCENT,
+	MCDI_STATE_RUNNING_SYNC,
+	MCDI_STATE_RUNNING_ASYNC,
+	MCDI_STATE_PROXY_WAIT,
+	MCDI_STATE_COMPLETED,
+};
+
+/**
+ * enum efx_mcdi_mode - MCDI transaction mode
+ * @MCDI_MODE_POLL: poll for MCDI completion, until timeout
+ * @MCDI_MODE_EVENTS: wait for an mcdi_event.  On timeout, poll once
+ * @MCDI_MODE_FAIL: we think MCDI is dead, so fail-fast all calls
+ */
+enum efx_mcdi_mode {
+	MCDI_MODE_POLL,
+	MCDI_MODE_EVENTS,
+	MCDI_MODE_FAIL,
+};
+
+/**
+ * struct efx_mcdi_iface - MCDI protocol context
+ * @efx: The associated NIC.
+ * @state: Request handling state. Waited for by @wq.
+ * @mode: Poll for mcdi completion, or wait for an mcdi_event.
+ * @wq: Wait queue for threads waiting for @state != %MCDI_STATE_RUNNING
+ * @new_epoch: Indicates start of day or start of MC reboot recovery
+ * @iface_lock: Serialises access to @seqno, @credits and response metadata
+ * @seqno: The next sequence number to use for mcdi requests.
+ * @credits: Number of spurious MCDI completion events allowed before we
+ *     trigger a fatal error
+ * @resprc: Response error/success code (Linux numbering)
+ * @resp_hdr_len: Response header length
+ * @resp_data_len: Response data (SDU or error) length
+ * @async_lock: Serialises access to @async_list while event processing is
+ *	enabled
+ * @async_list: Queue of asynchronous requests
+ * @async_timer: Timer for asynchronous request timeout
+ * @logging_buffer: buffer that may be used to build MCDI tracing messages
+ * @logging_enabled: whether to trace MCDI
+ * @proxy_rx_handle: Most recently received proxy authorisation handle
+ * @proxy_rx_status: Status of most recent proxy authorisation
+ * @proxy_rx_wq: Wait queue for updates to proxy_rx_handle
+ */
+struct efx_mcdi_iface {
+	struct efx_nic *efx;
+	enum efx_mcdi_state state;
+	enum efx_mcdi_mode mode;
+	wait_queue_head_t wq;
+	spinlock_t iface_lock;
+	bool new_epoch;
+	unsigned int credits;
+	unsigned int seqno;
+	int resprc;
+	int resprc_raw;
+	size_t resp_hdr_len;
+	size_t resp_data_len;
+	spinlock_t async_lock;
+	struct list_head async_list;
+	struct timer_list async_timer;
+#ifdef CONFIG_SFC_MCDI_LOGGING
+	char *logging_buffer;
+	bool logging_enabled;
+#endif
+	unsigned int proxy_rx_handle;
+	int proxy_rx_status;
+	wait_queue_head_t proxy_rx_wq;
+};
+
+struct efx_mcdi_mon {
+	struct efx_buffer dma_buf;
+	struct mutex update_lock;
+	unsigned long last_update;
+	struct device *device;
+	struct efx_mcdi_mon_attribute *attrs;
+	struct attribute_group group;
+	const struct attribute_group *groups[2];
+	unsigned int n_attrs;
+};
+
+struct efx_mcdi_mtd_partition {
+	struct efx_mtd_partition common;
+	bool updating;
+	u16 nvram_type;
+	u16 fw_subtype;
+};
+
+#define to_efx_mcdi_mtd_partition(mtd)				\
+	container_of(mtd, struct efx_mcdi_mtd_partition, common.mtd)
+
+/**
+ * struct efx_mcdi_data - extra state for NICs that implement MCDI
+ * @iface: Interface/protocol state
+ * @hwmon: Hardware monitor state
+ * @fn_flags: Flags for this function, as returned by %MC_CMD_DRV_ATTACH.
+ */
+struct efx_mcdi_data {
+	struct efx_mcdi_iface iface;
+#ifdef CONFIG_SFC_MCDI_MON
+	struct efx_mcdi_mon hwmon;
+#endif
+	u32 fn_flags;
+};
+
+static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
+{
+	EFX_WARN_ON_PARANOID(!efx->mcdi);
+	return &efx->mcdi->iface;
+}
+
+#ifdef CONFIG_SFC_MCDI_MON
+static inline struct efx_mcdi_mon *efx_mcdi_mon(struct efx_nic *efx)
+{
+	EFX_WARN_ON_PARANOID(!efx->mcdi);
+	return &efx->mcdi->hwmon;
+}
+#endif
+
+int efx_mcdi_init(struct efx_nic *efx);
+void efx_mcdi_detach(struct efx_nic *efx);
+void efx_mcdi_fini(struct efx_nic *efx);
+
+int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, const efx_dword_t *inbuf,
+		 size_t inlen, efx_dword_t *outbuf, size_t outlen,
+		 size_t *outlen_actual);
+int efx_mcdi_rpc_quiet(struct efx_nic *efx, unsigned cmd,
+		       const efx_dword_t *inbuf, size_t inlen,
+		       efx_dword_t *outbuf, size_t outlen,
+		       size_t *outlen_actual);
+
+int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
+		       const efx_dword_t *inbuf, size_t inlen);
+int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
+			efx_dword_t *outbuf, size_t outlen,
+			size_t *outlen_actual);
+int efx_mcdi_rpc_finish_quiet(struct efx_nic *efx, unsigned cmd,
+			      size_t inlen, efx_dword_t *outbuf,
+			      size_t outlen, size_t *outlen_actual);
+
+typedef void efx_mcdi_async_completer(struct efx_nic *efx,
+				      unsigned long cookie, int rc,
+				      efx_dword_t *outbuf,
+				      size_t outlen_actual);
+int efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
+		       const efx_dword_t *inbuf, size_t inlen, size_t outlen,
+		       efx_mcdi_async_completer *complete,
+		       unsigned long cookie);
+int efx_mcdi_rpc_async_quiet(struct efx_nic *efx, unsigned int cmd,
+			     const efx_dword_t *inbuf, size_t inlen,
+			     size_t outlen,
+			     efx_mcdi_async_completer *complete,
+			     unsigned long cookie);
+
+void efx_mcdi_display_error(struct efx_nic *efx, unsigned cmd,
+			    size_t inlen, efx_dword_t *outbuf,
+			    size_t outlen, int rc);
+
+int efx_mcdi_poll_reboot(struct efx_nic *efx);
+void efx_mcdi_mode_poll(struct efx_nic *efx);
+void efx_mcdi_mode_event(struct efx_nic *efx);
+void efx_mcdi_flush_async(struct efx_nic *efx);
+
+void efx_mcdi_process_event(struct efx_channel *channel, efx_qword_t *event);
+void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
+
+/* We expect that 16- and 32-bit fields in MCDI requests and responses
+ * are appropriately aligned, but 64-bit fields are only
+ * 32-bit-aligned.  Also, on Siena we must copy to the MC shared
+ * memory strictly 32 bits at a time, so add any necessary padding.
+ */
+#define MCDI_TX_BUF_LEN(_len) DIV_ROUND_UP((_len), 4)
+#define _MCDI_DECLARE_BUF(_name, _len)					\
+	efx_dword_t _name[DIV_ROUND_UP(_len, 4)]
+#define MCDI_DECLARE_BUF(_name, _len)					\
+	_MCDI_DECLARE_BUF(_name, _len) = {{{0}}}
+#define MCDI_DECLARE_BUF_ERR(_name)					\
+	MCDI_DECLARE_BUF(_name, 8)
+#define _MCDI_PTR(_buf, _offset)					\
+	((u8 *)(_buf) + (_offset))
+#define MCDI_PTR(_buf, _field)						\
+	_MCDI_PTR(_buf, MC_CMD_ ## _field ## _OFST)
+#define _MCDI_CHECK_ALIGN(_ofst, _align)				\
+	((_ofst) + BUILD_BUG_ON_ZERO((_ofst) & (_align - 1)))
+#define _MCDI_DWORD(_buf, _field)					\
+	((_buf) + (_MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _OFST, 4) >> 2))
+
+#define MCDI_BYTE(_buf, _field)						\
+	((void)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 1),	\
+	 *MCDI_PTR(_buf, _field))
+#define MCDI_WORD(_buf, _field)						\
+	((u16)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 2) +	\
+	 le16_to_cpu(*(__force const __le16 *)MCDI_PTR(_buf, _field)))
+#define MCDI_SET_DWORD(_buf, _field, _value)				\
+	EFX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0, _value)
+#define MCDI_DWORD(_buf, _field)					\
+	EFX_DWORD_FIELD(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0)
+#define MCDI_POPULATE_DWORD_1(_buf, _field, _name1, _value1)		\
+	EFX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field),		\
+			     MC_CMD_ ## _name1, _value1)
+#define MCDI_POPULATE_DWORD_2(_buf, _field, _name1, _value1,		\
+			      _name2, _value2)				\
+	EFX_POPULATE_DWORD_2(*_MCDI_DWORD(_buf, _field),		\
+			     MC_CMD_ ## _name1, _value1,		\
+			     MC_CMD_ ## _name2, _value2)
+#define MCDI_POPULATE_DWORD_3(_buf, _field, _name1, _value1,		\
+			      _name2, _value2, _name3, _value3)		\
+	EFX_POPULATE_DWORD_3(*_MCDI_DWORD(_buf, _field),		\
+			     MC_CMD_ ## _name1, _value1,		\
+			     MC_CMD_ ## _name2, _value2,		\
+			     MC_CMD_ ## _name3, _value3)
+#define MCDI_POPULATE_DWORD_4(_buf, _field, _name1, _value1,		\
+			      _name2, _value2, _name3, _value3,		\
+			      _name4, _value4)				\
+	EFX_POPULATE_DWORD_4(*_MCDI_DWORD(_buf, _field),		\
+			     MC_CMD_ ## _name1, _value1,		\
+			     MC_CMD_ ## _name2, _value2,		\
+			     MC_CMD_ ## _name3, _value3,		\
+			     MC_CMD_ ## _name4, _value4)
+#define MCDI_POPULATE_DWORD_5(_buf, _field, _name1, _value1,		\
+			      _name2, _value2, _name3, _value3,		\
+			      _name4, _value4, _name5, _value5)		\
+	EFX_POPULATE_DWORD_5(*_MCDI_DWORD(_buf, _field),		\
+			     MC_CMD_ ## _name1, _value1,		\
+			     MC_CMD_ ## _name2, _value2,		\
+			     MC_CMD_ ## _name3, _value3,		\
+			     MC_CMD_ ## _name4, _value4,		\
+			     MC_CMD_ ## _name5, _value5)
+#define MCDI_POPULATE_DWORD_6(_buf, _field, _name1, _value1,		\
+			      _name2, _value2, _name3, _value3,		\
+			      _name4, _value4, _name5, _value5,		\
+			      _name6, _value6)				\
+	EFX_POPULATE_DWORD_6(*_MCDI_DWORD(_buf, _field),		\
+			     MC_CMD_ ## _name1, _value1,		\
+			     MC_CMD_ ## _name2, _value2,		\
+			     MC_CMD_ ## _name3, _value3,		\
+			     MC_CMD_ ## _name4, _value4,		\
+			     MC_CMD_ ## _name5, _value5,		\
+			     MC_CMD_ ## _name6, _value6)
+#define MCDI_POPULATE_DWORD_7(_buf, _field, _name1, _value1,		\
+			      _name2, _value2, _name3, _value3,		\
+			      _name4, _value4, _name5, _value5,		\
+			      _name6, _value6, _name7, _value7)		\
+	EFX_POPULATE_DWORD_7(*_MCDI_DWORD(_buf, _field),		\
+			     MC_CMD_ ## _name1, _value1,		\
+			     MC_CMD_ ## _name2, _value2,		\
+			     MC_CMD_ ## _name3, _value3,		\
+			     MC_CMD_ ## _name4, _value4,		\
+			     MC_CMD_ ## _name5, _value5,		\
+			     MC_CMD_ ## _name6, _value6,		\
+			     MC_CMD_ ## _name7, _value7)
+#define MCDI_SET_QWORD(_buf, _field, _value)				\
+	do {								\
+		EFX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[0],	\
+				     EFX_DWORD_0, (u32)(_value));	\
+		EFX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[1],	\
+				     EFX_DWORD_0, (u64)(_value) >> 32);	\
+	} while (0)
+#define MCDI_QWORD(_buf, _field)					\
+	(EFX_DWORD_FIELD(_MCDI_DWORD(_buf, _field)[0], EFX_DWORD_0) |	\
+	(u64)EFX_DWORD_FIELD(_MCDI_DWORD(_buf, _field)[1], EFX_DWORD_0) << 32)
+#define MCDI_FIELD(_ptr, _type, _field)					\
+	EFX_EXTRACT_DWORD(						\
+		*(efx_dword_t *)					\
+		_MCDI_PTR(_ptr, MC_CMD_ ## _type ## _ ## _field ## _OFST & ~3),\
+		MC_CMD_ ## _type ## _ ## _field ## _LBN & 0x1f,	\
+		(MC_CMD_ ## _type ## _ ## _field ## _LBN & 0x1f) +	\
+		MC_CMD_ ## _type ## _ ## _field ## _WIDTH - 1)
+
+#define _MCDI_ARRAY_PTR(_buf, _field, _index, _align)			\
+	(_MCDI_PTR(_buf, _MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _OFST, _align))\
+	 + (_index) * _MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _LEN, _align))
+#define MCDI_DECLARE_STRUCT_PTR(_name)					\
+	efx_dword_t *_name
+#define MCDI_ARRAY_STRUCT_PTR(_buf, _field, _index)			\
+	((efx_dword_t *)_MCDI_ARRAY_PTR(_buf, _field, _index, 4))
+#define MCDI_VAR_ARRAY_LEN(_len, _field)				\
+	min_t(size_t, MC_CMD_ ## _field ## _MAXNUM,			\
+	      ((_len) - MC_CMD_ ## _field ## _OFST) / MC_CMD_ ## _field ## _LEN)
+#define MCDI_ARRAY_WORD(_buf, _field, _index)				\
+	(BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 2) +		\
+	 le16_to_cpu(*(__force const __le16 *)				\
+		     _MCDI_ARRAY_PTR(_buf, _field, _index, 2)))
+#define _MCDI_ARRAY_DWORD(_buf, _field, _index)				\
+	(BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 4) +		\
+	 (efx_dword_t *)_MCDI_ARRAY_PTR(_buf, _field, _index, 4))
+#define MCDI_SET_ARRAY_DWORD(_buf, _field, _index, _value)		\
+	EFX_SET_DWORD_FIELD(*_MCDI_ARRAY_DWORD(_buf, _field, _index),	\
+			    EFX_DWORD_0, _value)
+#define MCDI_ARRAY_DWORD(_buf, _field, _index)				\
+	EFX_DWORD_FIELD(*_MCDI_ARRAY_DWORD(_buf, _field, _index), EFX_DWORD_0)
+#define _MCDI_ARRAY_QWORD(_buf, _field, _index)				\
+	(BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 8) +		\
+	 (efx_dword_t *)_MCDI_ARRAY_PTR(_buf, _field, _index, 4))
+#define MCDI_SET_ARRAY_QWORD(_buf, _field, _index, _value)		\
+	do {								\
+		EFX_SET_DWORD_FIELD(_MCDI_ARRAY_QWORD(_buf, _field, _index)[0],\
+				    EFX_DWORD_0, (u32)(_value));	\
+		EFX_SET_DWORD_FIELD(_MCDI_ARRAY_QWORD(_buf, _field, _index)[1],\
+				    EFX_DWORD_0, (u64)(_value) >> 32);	\
+	} while (0)
+#define MCDI_ARRAY_FIELD(_buf, _field1, _type, _index, _field2)		\
+	MCDI_FIELD(MCDI_ARRAY_STRUCT_PTR(_buf, _field1, _index),	\
+		   _type ## _TYPEDEF, _field2)
+
+#define MCDI_EVENT_FIELD(_ev, _field)			\
+	EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field)
+
+#define MCDI_CAPABILITY(field)						\
+	MC_CMD_GET_CAPABILITIES_V8_OUT_ ## field ## _LBN
+
+#define MCDI_CAPABILITY_OFST(field) \
+	MC_CMD_GET_CAPABILITIES_V8_OUT_ ## field ## _OFST
+
+#define efx_has_cap(efx, field) \
+	efx->type->check_caps(efx, \
+			      MCDI_CAPABILITY(field), \
+			      MCDI_CAPABILITY_OFST(field))
+
+void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len);
+int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
+			   u16 *fw_subtype_list, u32 *capabilities);
+int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq);
+int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out);
+int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
+			size_t *size_out, size_t *erase_size_out,
+			bool *protected_out);
+int efx_new_mcdi_nvram_test_all(struct efx_nic *efx);
+int efx_mcdi_nvram_test_all(struct efx_nic *efx);
+int efx_mcdi_handle_assertion(struct efx_nic *efx);
+int efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
+int efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac,
+				  int *id_out);
+int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out);
+int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id);
+int efx_mcdi_wol_filter_reset(struct efx_nic *efx);
+int efx_mcdi_flush_rxqs(struct efx_nic *efx);
+void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev);
+void efx_mcdi_mac_start_stats(struct efx_nic *efx);
+void efx_mcdi_mac_stop_stats(struct efx_nic *efx);
+void efx_mcdi_mac_pull_stats(struct efx_nic *efx);
+enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason);
+int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method);
+int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled,
+			    unsigned int *flags);
+int efx_mcdi_get_workarounds(struct efx_nic *efx, unsigned int *impl_out,
+			     unsigned int *enabled_out);
+
+#ifdef CONFIG_SFC_MCDI_MON
+int efx_mcdi_mon_probe(struct efx_nic *efx);
+void efx_mcdi_mon_remove(struct efx_nic *efx);
+#else
+static inline int efx_mcdi_mon_probe(struct efx_nic *efx) { return 0; }
+static inline void efx_mcdi_mon_remove(struct efx_nic *efx) {}
+#endif
+
+#ifdef CONFIG_SFC_MTD
+int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start, size_t len,
+		      size_t *retlen, u8 *buffer);
+int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len);
+int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start, size_t len,
+		       size_t *retlen, const u8 *buffer);
+int efx_mcdi_mtd_sync(struct mtd_info *mtd);
+void efx_mcdi_mtd_rename(struct efx_mtd_partition *part);
+#endif
+
+#endif /* EFX_MCDI_H */
diff --git a/drivers/net/ethernet/sfc/siena/mcdi_mon.c b/drivers/net/ethernet/sfc/siena/mcdi_mon.c
new file mode 100644
index 000000000000..5954fcfee2b1
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/mcdi_mon.c
@@ -0,0 +1,531 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2011-2013 Solarflare Communications Inc.
+ */
+
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/hwmon.h>
+#include <linux/stat.h>
+
+#include "net_driver.h"
+#include "mcdi.h"
+#include "mcdi_pcol.h"
+#include "nic.h"
+
+enum efx_hwmon_type {
+	EFX_HWMON_UNKNOWN,
+	EFX_HWMON_TEMP,         /* temperature */
+	EFX_HWMON_COOL,         /* cooling device, probably a heatsink */
+	EFX_HWMON_IN,		/* voltage */
+	EFX_HWMON_CURR,		/* current */
+	EFX_HWMON_POWER,	/* power */
+	EFX_HWMON_TYPES_COUNT
+};
+
+static const char *const efx_hwmon_unit[EFX_HWMON_TYPES_COUNT] = {
+	[EFX_HWMON_TEMP]  = " degC",
+	[EFX_HWMON_COOL]  = " rpm", /* though nonsense for a heatsink */
+	[EFX_HWMON_IN]    = " mV",
+	[EFX_HWMON_CURR]  = " mA",
+	[EFX_HWMON_POWER] = " W",
+};
+
+static const struct {
+	const char *label;
+	enum efx_hwmon_type hwmon_type;
+	int port;
+} efx_mcdi_sensor_type[] = {
+#define SENSOR(name, label, hwmon_type, port)				\
+	[MC_CMD_SENSOR_##name] = { label, EFX_HWMON_ ## hwmon_type, port }
+	SENSOR(CONTROLLER_TEMP,		"Controller board temp.",   TEMP,  -1),
+	SENSOR(PHY_COMMON_TEMP,		"PHY temp.",		    TEMP,  -1),
+	SENSOR(CONTROLLER_COOLING,	"Controller heat sink",	    COOL,  -1),
+	SENSOR(PHY0_TEMP,		"PHY temp.",		    TEMP,  0),
+	SENSOR(PHY0_COOLING,		"PHY heat sink",	    COOL,  0),
+	SENSOR(PHY1_TEMP,		"PHY temp.",		    TEMP,  1),
+	SENSOR(PHY1_COOLING,		"PHY heat sink",	    COOL,  1),
+	SENSOR(IN_1V0,			"1.0V supply",		    IN,    -1),
+	SENSOR(IN_1V2,			"1.2V supply",		    IN,    -1),
+	SENSOR(IN_1V8,			"1.8V supply",		    IN,    -1),
+	SENSOR(IN_2V5,			"2.5V supply",		    IN,    -1),
+	SENSOR(IN_3V3,			"3.3V supply",		    IN,    -1),
+	SENSOR(IN_12V0,			"12.0V supply",		    IN,    -1),
+	SENSOR(IN_1V2A,			"1.2V analogue supply",	    IN,    -1),
+	SENSOR(IN_VREF,			"Ref. voltage",		    IN,    -1),
+	SENSOR(OUT_VAOE,		"AOE FPGA supply",	    IN,    -1),
+	SENSOR(AOE_TEMP,		"AOE FPGA temp.",	    TEMP,  -1),
+	SENSOR(PSU_AOE_TEMP,		"AOE regulator temp.",	    TEMP,  -1),
+	SENSOR(PSU_TEMP,		"Controller regulator temp.",
+								    TEMP,  -1),
+	SENSOR(FAN_0,			"Fan 0",		    COOL,  -1),
+	SENSOR(FAN_1,			"Fan 1",		    COOL,  -1),
+	SENSOR(FAN_2,			"Fan 2",		    COOL,  -1),
+	SENSOR(FAN_3,			"Fan 3",		    COOL,  -1),
+	SENSOR(FAN_4,			"Fan 4",		    COOL,  -1),
+	SENSOR(IN_VAOE,			"AOE input supply",	    IN,    -1),
+	SENSOR(OUT_IAOE,		"AOE output current",	    CURR,  -1),
+	SENSOR(IN_IAOE,			"AOE input current",	    CURR,  -1),
+	SENSOR(NIC_POWER,		"Board power use",	    POWER, -1),
+	SENSOR(IN_0V9,			"0.9V supply",		    IN,    -1),
+	SENSOR(IN_I0V9,			"0.9V supply current",	    CURR,  -1),
+	SENSOR(IN_I1V2,			"1.2V supply current",	    CURR,  -1),
+	SENSOR(IN_0V9_ADC,		"0.9V supply (ext. ADC)",   IN,    -1),
+	SENSOR(CONTROLLER_2_TEMP,	"Controller board temp. 2", TEMP,  -1),
+	SENSOR(VREG_INTERNAL_TEMP,	"Regulator die temp.",	    TEMP,  -1),
+	SENSOR(VREG_0V9_TEMP,		"0.9V regulator temp.",     TEMP,  -1),
+	SENSOR(VREG_1V2_TEMP,		"1.2V regulator temp.",     TEMP,  -1),
+	SENSOR(CONTROLLER_VPTAT,
+			      "Controller PTAT voltage (int. ADC)", IN,    -1),
+	SENSOR(CONTROLLER_INTERNAL_TEMP,
+				 "Controller die temp. (int. ADC)", TEMP,  -1),
+	SENSOR(CONTROLLER_VPTAT_EXTADC,
+			      "Controller PTAT voltage (ext. ADC)", IN,    -1),
+	SENSOR(CONTROLLER_INTERNAL_TEMP_EXTADC,
+				 "Controller die temp. (ext. ADC)", TEMP,  -1),
+	SENSOR(AMBIENT_TEMP,		"Ambient temp.",	    TEMP,  -1),
+	SENSOR(AIRFLOW,			"Air flow raw",		    IN,    -1),
+	SENSOR(VDD08D_VSS08D_CSR,	"0.9V die (int. ADC)",	    IN,    -1),
+	SENSOR(VDD08D_VSS08D_CSR_EXTADC, "0.9V die (ext. ADC)",	    IN,    -1),
+	SENSOR(HOTPOINT_TEMP,  "Controller board temp. (hotpoint)", TEMP,  -1),
+#undef SENSOR
+};
+
+static const char *const sensor_status_names[] = {
+	[MC_CMD_SENSOR_STATE_OK] = "OK",
+	[MC_CMD_SENSOR_STATE_WARNING] = "Warning",
+	[MC_CMD_SENSOR_STATE_FATAL] = "Fatal",
+	[MC_CMD_SENSOR_STATE_BROKEN] = "Device failure",
+	[MC_CMD_SENSOR_STATE_NO_READING] = "No reading",
+};
+
+void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev)
+{
+	unsigned int type, state, value;
+	enum efx_hwmon_type hwmon_type = EFX_HWMON_UNKNOWN;
+	const char *name = NULL, *state_txt, *unit;
+
+	type = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_MONITOR);
+	state = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_STATE);
+	value = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_VALUE);
+
+	/* Deal gracefully with the board having more drivers than we
+	 * know about, but do not expect new sensor states. */
+	if (type < ARRAY_SIZE(efx_mcdi_sensor_type)) {
+		name = efx_mcdi_sensor_type[type].label;
+		hwmon_type = efx_mcdi_sensor_type[type].hwmon_type;
+	}
+	if (!name)
+		name = "No sensor name available";
+	EFX_WARN_ON_PARANOID(state >= ARRAY_SIZE(sensor_status_names));
+	state_txt = sensor_status_names[state];
+	EFX_WARN_ON_PARANOID(hwmon_type >= EFX_HWMON_TYPES_COUNT);
+	unit = efx_hwmon_unit[hwmon_type];
+	if (!unit)
+		unit = "";
+
+	netif_err(efx, hw, efx->net_dev,
+		  "Sensor %d (%s) reports condition '%s' for value %d%s\n",
+		  type, name, state_txt, value, unit);
+}
+
+#ifdef CONFIG_SFC_MCDI_MON
+
+struct efx_mcdi_mon_attribute {
+	struct device_attribute dev_attr;
+	unsigned int index;
+	unsigned int type;
+	enum efx_hwmon_type hwmon_type;
+	unsigned int limit_value;
+	char name[12];
+};
+
+static int efx_mcdi_mon_update(struct efx_nic *efx)
+{
+	struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
+	MCDI_DECLARE_BUF(inbuf, MC_CMD_READ_SENSORS_EXT_IN_LEN);
+	int rc;
+
+	MCDI_SET_QWORD(inbuf, READ_SENSORS_EXT_IN_DMA_ADDR,
+		       hwmon->dma_buf.dma_addr);
+	MCDI_SET_DWORD(inbuf, READ_SENSORS_EXT_IN_LENGTH, hwmon->dma_buf.len);
+
+	rc = efx_mcdi_rpc(efx, MC_CMD_READ_SENSORS,
+			  inbuf, sizeof(inbuf), NULL, 0, NULL);
+	if (rc == 0)
+		hwmon->last_update = jiffies;
+	return rc;
+}
+
+static int efx_mcdi_mon_get_entry(struct device *dev, unsigned int index,
+				  efx_dword_t *entry)
+{
+	struct efx_nic *efx = dev_get_drvdata(dev->parent);
+	struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
+	int rc;
+
+	BUILD_BUG_ON(MC_CMD_READ_SENSORS_OUT_LEN != 0);
+
+	mutex_lock(&hwmon->update_lock);
+
+	/* Use cached value if last update was < 1 s ago */
+	if (time_before(jiffies, hwmon->last_update + HZ))
+		rc = 0;
+	else
+		rc = efx_mcdi_mon_update(efx);
+
+	/* Copy out the requested entry */
+	*entry = ((efx_dword_t *)hwmon->dma_buf.addr)[index];
+
+	mutex_unlock(&hwmon->update_lock);
+
+	return rc;
+}
+
+static ssize_t efx_mcdi_mon_show_value(struct device *dev,
+				       struct device_attribute *attr,
+				       char *buf)
+{
+	struct efx_mcdi_mon_attribute *mon_attr =
+		container_of(attr, struct efx_mcdi_mon_attribute, dev_attr);
+	efx_dword_t entry;
+	unsigned int value, state;
+	int rc;
+
+	rc = efx_mcdi_mon_get_entry(dev, mon_attr->index, &entry);
+	if (rc)
+		return rc;
+
+	state = EFX_DWORD_FIELD(entry, MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE);
+	if (state == MC_CMD_SENSOR_STATE_NO_READING)
+		return -EBUSY;
+
+	value = EFX_DWORD_FIELD(entry, MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE);
+
+	switch (mon_attr->hwmon_type) {
+	case EFX_HWMON_TEMP:
+		/* Convert temperature from degrees to milli-degrees Celsius */
+		value *= 1000;
+		break;
+	case EFX_HWMON_POWER:
+		/* Convert power from watts to microwatts */
+		value *= 1000000;
+		break;
+	default:
+		/* No conversion needed */
+		break;
+	}
+
+	return sprintf(buf, "%u\n", value);
+}
+
+static ssize_t efx_mcdi_mon_show_limit(struct device *dev,
+				       struct device_attribute *attr,
+				       char *buf)
+{
+	struct efx_mcdi_mon_attribute *mon_attr =
+		container_of(attr, struct efx_mcdi_mon_attribute, dev_attr);
+	unsigned int value;
+
+	value = mon_attr->limit_value;
+
+	switch (mon_attr->hwmon_type) {
+	case EFX_HWMON_TEMP:
+		/* Convert temperature from degrees to milli-degrees Celsius */
+		value *= 1000;
+		break;
+	case EFX_HWMON_POWER:
+		/* Convert power from watts to microwatts */
+		value *= 1000000;
+		break;
+	default:
+		/* No conversion needed */
+		break;
+	}
+
+	return sprintf(buf, "%u\n", value);
+}
+
+static ssize_t efx_mcdi_mon_show_alarm(struct device *dev,
+				       struct device_attribute *attr,
+				       char *buf)
+{
+	struct efx_mcdi_mon_attribute *mon_attr =
+		container_of(attr, struct efx_mcdi_mon_attribute, dev_attr);
+	efx_dword_t entry;
+	int state;
+	int rc;
+
+	rc = efx_mcdi_mon_get_entry(dev, mon_attr->index, &entry);
+	if (rc)
+		return rc;
+
+	state = EFX_DWORD_FIELD(entry, MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE);
+	return sprintf(buf, "%d\n", state != MC_CMD_SENSOR_STATE_OK);
+}
+
+static ssize_t efx_mcdi_mon_show_label(struct device *dev,
+				       struct device_attribute *attr,
+				       char *buf)
+{
+	struct efx_mcdi_mon_attribute *mon_attr =
+		container_of(attr, struct efx_mcdi_mon_attribute, dev_attr);
+	return sprintf(buf, "%s\n",
+		       efx_mcdi_sensor_type[mon_attr->type].label);
+}
+
+static void
+efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name,
+		      ssize_t (*reader)(struct device *,
+					struct device_attribute *, char *),
+		      unsigned int index, unsigned int type,
+		      unsigned int limit_value)
+{
+	struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
+	struct efx_mcdi_mon_attribute *attr = &hwmon->attrs[hwmon->n_attrs];
+
+	strlcpy(attr->name, name, sizeof(attr->name));
+	attr->index = index;
+	attr->type = type;
+	if (type < ARRAY_SIZE(efx_mcdi_sensor_type))
+		attr->hwmon_type = efx_mcdi_sensor_type[type].hwmon_type;
+	else
+		attr->hwmon_type = EFX_HWMON_UNKNOWN;
+	attr->limit_value = limit_value;
+	sysfs_attr_init(&attr->dev_attr.attr);
+	attr->dev_attr.attr.name = attr->name;
+	attr->dev_attr.attr.mode = 0444;
+	attr->dev_attr.show = reader;
+	hwmon->group.attrs[hwmon->n_attrs++] = &attr->dev_attr.attr;
+}
+
+int efx_mcdi_mon_probe(struct efx_nic *efx)
+{
+	unsigned int n_temp = 0, n_cool = 0, n_in = 0, n_curr = 0, n_power = 0;
+	struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
+	MCDI_DECLARE_BUF(inbuf, MC_CMD_SENSOR_INFO_EXT_IN_LEN);
+	MCDI_DECLARE_BUF(outbuf, MC_CMD_SENSOR_INFO_OUT_LENMAX);
+	unsigned int n_pages, n_sensors, n_attrs, page;
+	size_t outlen;
+	char name[12];
+	u32 mask;
+	int rc, i, j, type;
+
+	/* Find out how many sensors are present */
+	n_sensors = 0;
+	page = 0;
+	do {
+		MCDI_SET_DWORD(inbuf, SENSOR_INFO_EXT_IN_PAGE, page);
+
+		rc = efx_mcdi_rpc(efx, MC_CMD_SENSOR_INFO, inbuf, sizeof(inbuf),
+				  outbuf, sizeof(outbuf), &outlen);
+		if (rc)
+			return rc;
+		if (outlen < MC_CMD_SENSOR_INFO_OUT_LENMIN)
+			return -EIO;
+
+		mask = MCDI_DWORD(outbuf, SENSOR_INFO_OUT_MASK);
+		n_sensors += hweight32(mask & ~(1 << MC_CMD_SENSOR_PAGE0_NEXT));
+		++page;
+	} while (mask & (1 << MC_CMD_SENSOR_PAGE0_NEXT));
+	n_pages = page;
+
+	/* Don't create a device if there are none */
+	if (n_sensors == 0)
+		return 0;
+
+	rc = efx_nic_alloc_buffer(
+		efx, &hwmon->dma_buf,
+		n_sensors * MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_LEN,
+		GFP_KERNEL);
+	if (rc)
+		return rc;
+
+	mutex_init(&hwmon->update_lock);
+	efx_mcdi_mon_update(efx);
+
+	/* Allocate space for the maximum possible number of
+	 * attributes for this set of sensors:
+	 * value, min, max, crit, alarm and label for each sensor.
+	 */
+	n_attrs = 6 * n_sensors;
+	hwmon->attrs = kcalloc(n_attrs, sizeof(*hwmon->attrs), GFP_KERNEL);
+	if (!hwmon->attrs) {
+		rc = -ENOMEM;
+		goto fail;
+	}
+	hwmon->group.attrs = kcalloc(n_attrs + 1, sizeof(struct attribute *),
+				     GFP_KERNEL);
+	if (!hwmon->group.attrs) {
+		rc = -ENOMEM;
+		goto fail;
+	}
+
+	for (i = 0, j = -1, type = -1; ; i++) {
+		enum efx_hwmon_type hwmon_type;
+		const char *hwmon_prefix;
+		unsigned hwmon_index;
+		u16 min1, max1, min2, max2;
+
+		/* Find next sensor type or exit if there is none */
+		do {
+			type++;
+
+			if ((type % 32) == 0) {
+				page = type / 32;
+				j = -1;
+				if (page == n_pages)
+					goto hwmon_register;
+
+				MCDI_SET_DWORD(inbuf, SENSOR_INFO_EXT_IN_PAGE,
+					       page);
+				rc = efx_mcdi_rpc(efx, MC_CMD_SENSOR_INFO,
+						  inbuf, sizeof(inbuf),
+						  outbuf, sizeof(outbuf),
+						  &outlen);
+				if (rc)
+					goto fail;
+				if (outlen < MC_CMD_SENSOR_INFO_OUT_LENMIN) {
+					rc = -EIO;
+					goto fail;
+				}
+
+				mask = (MCDI_DWORD(outbuf,
+						   SENSOR_INFO_OUT_MASK) &
+					~(1 << MC_CMD_SENSOR_PAGE0_NEXT));
+
+				/* Check again for short response */
+				if (outlen <
+				    MC_CMD_SENSOR_INFO_OUT_LEN(hweight32(mask))) {
+					rc = -EIO;
+					goto fail;
+				}
+			}
+		} while (!(mask & (1 << type % 32)));
+		j++;
+
+		if (type < ARRAY_SIZE(efx_mcdi_sensor_type)) {
+			hwmon_type = efx_mcdi_sensor_type[type].hwmon_type;
+
+			/* Skip sensors specific to a different port */
+			if (hwmon_type != EFX_HWMON_UNKNOWN &&
+			    efx_mcdi_sensor_type[type].port >= 0 &&
+			    efx_mcdi_sensor_type[type].port !=
+			    efx_port_num(efx))
+				continue;
+		} else {
+			hwmon_type = EFX_HWMON_UNKNOWN;
+		}
+
+		switch (hwmon_type) {
+		case EFX_HWMON_TEMP:
+			hwmon_prefix = "temp";
+			hwmon_index = ++n_temp; /* 1-based */
+			break;
+		case EFX_HWMON_COOL:
+			/* This is likely to be a heatsink, but there
+			 * is no convention for representing cooling
+			 * devices other than fans.
+			 */
+			hwmon_prefix = "fan";
+			hwmon_index = ++n_cool; /* 1-based */
+			break;
+		default:
+			hwmon_prefix = "in";
+			hwmon_index = n_in++; /* 0-based */
+			break;
+		case EFX_HWMON_CURR:
+			hwmon_prefix = "curr";
+			hwmon_index = ++n_curr; /* 1-based */
+			break;
+		case EFX_HWMON_POWER:
+			hwmon_prefix = "power";
+			hwmon_index = ++n_power; /* 1-based */
+			break;
+		}
+
+		min1 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY,
+					SENSOR_INFO_ENTRY, j, MIN1);
+		max1 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY,
+					SENSOR_INFO_ENTRY, j, MAX1);
+		min2 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY,
+					SENSOR_INFO_ENTRY, j, MIN2);
+		max2 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY,
+					SENSOR_INFO_ENTRY, j, MAX2);
+
+		if (min1 != max1) {
+			snprintf(name, sizeof(name), "%s%u_input",
+				 hwmon_prefix, hwmon_index);
+			efx_mcdi_mon_add_attr(
+				efx, name, efx_mcdi_mon_show_value, i, type, 0);
+
+			if (hwmon_type != EFX_HWMON_POWER) {
+				snprintf(name, sizeof(name), "%s%u_min",
+					 hwmon_prefix, hwmon_index);
+				efx_mcdi_mon_add_attr(
+					efx, name, efx_mcdi_mon_show_limit,
+					i, type, min1);
+			}
+
+			snprintf(name, sizeof(name), "%s%u_max",
+				 hwmon_prefix, hwmon_index);
+			efx_mcdi_mon_add_attr(
+				efx, name, efx_mcdi_mon_show_limit,
+				i, type, max1);
+
+			if (min2 != max2) {
+				/* Assume max2 is critical value.
+				 * But we have no good way to expose min2.
+				 */
+				snprintf(name, sizeof(name), "%s%u_crit",
+					 hwmon_prefix, hwmon_index);
+				efx_mcdi_mon_add_attr(
+					efx, name, efx_mcdi_mon_show_limit,
+					i, type, max2);
+			}
+		}
+
+		snprintf(name, sizeof(name), "%s%u_alarm",
+			 hwmon_prefix, hwmon_index);
+		efx_mcdi_mon_add_attr(
+			efx, name, efx_mcdi_mon_show_alarm, i, type, 0);
+
+		if (type < ARRAY_SIZE(efx_mcdi_sensor_type) &&
+		    efx_mcdi_sensor_type[type].label) {
+			snprintf(name, sizeof(name), "%s%u_label",
+				 hwmon_prefix, hwmon_index);
+			efx_mcdi_mon_add_attr(
+				efx, name, efx_mcdi_mon_show_label, i, type, 0);
+		}
+	}
+
+hwmon_register:
+	hwmon->groups[0] = &hwmon->group;
+	hwmon->device = hwmon_device_register_with_groups(&efx->pci_dev->dev,
+							  KBUILD_MODNAME, NULL,
+							  hwmon->groups);
+	if (IS_ERR(hwmon->device)) {
+		rc = PTR_ERR(hwmon->device);
+		goto fail;
+	}
+
+	return 0;
+
+fail:
+	efx_mcdi_mon_remove(efx);
+	return rc;
+}
+
+void efx_mcdi_mon_remove(struct efx_nic *efx)
+{
+	struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
+
+	if (hwmon->device)
+		hwmon_device_unregister(hwmon->device);
+	kfree(hwmon->attrs);
+	kfree(hwmon->group.attrs);
+	efx_nic_free_buffer(efx, &hwmon->dma_buf);
+}
+
+#endif /* CONFIG_SFC_MCDI_MON */
diff --git a/drivers/net/ethernet/sfc/siena/mcdi_pcol.h b/drivers/net/ethernet/sfc/siena/mcdi_pcol.h
new file mode 100644
index 000000000000..ff617b1b38d3
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/mcdi_pcol.h
@@ -0,0 +1,21968 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2009-2018 Solarflare Communications Inc.
+ * Copyright 2019-2020 Xilinx Inc.
+ */
+
+
+#ifndef MCDI_PCOL_H
+#define MCDI_PCOL_H
+
+/* Values to be written into FMCR_CZ_RESET_STATE_REG to control boot. */
+/* Power-on reset state */
+#define MC_FW_STATE_POR (1)
+/* If this is set in MC_RESET_STATE_REG then it should be
+ * possible to jump into IMEM without loading code from flash. */
+#define MC_FW_WARM_BOOT_OK (2)
+/* The MC main image has started to boot. */
+#define MC_FW_STATE_BOOTING (4)
+/* The Scheduler has started. */
+#define MC_FW_STATE_SCHED (8)
+/* If this is set in MC_RESET_STATE_REG then it should be
+ * possible to jump into IMEM without loading code from flash.
+ * Unlike a warm boot, assume DMEM has been reloaded, so that
+ * the MC persistent data must be reinitialised. */
+#define MC_FW_TEPID_BOOT_OK (16)
+/* We have entered the main firmware via recovery mode.  This
+ * means that MC persistent data must be reinitialised, but that
+ * we shouldn't touch PCIe config. */
+#define MC_FW_RECOVERY_MODE_PCIE_INIT_OK (32)
+/* BIST state has been initialized */
+#define MC_FW_BIST_INIT_OK (128)
+
+/* Siena MC shared memmory offsets */
+/* The 'doorbell' addresses are hard-wired to alert the MC when written */
+#define	MC_SMEM_P0_DOORBELL_OFST	0x000
+#define	MC_SMEM_P1_DOORBELL_OFST	0x004
+/* The rest of these are firmware-defined */
+#define	MC_SMEM_P0_PDU_OFST		0x008
+#define	MC_SMEM_P1_PDU_OFST		0x108
+#define	MC_SMEM_PDU_LEN			0x100
+#define	MC_SMEM_P0_PTP_TIME_OFST	0x7f0
+#define	MC_SMEM_P0_STATUS_OFST		0x7f8
+#define	MC_SMEM_P1_STATUS_OFST		0x7fc
+
+/* Values to be written to the per-port status dword in shared
+ * memory on reboot and assert */
+#define MC_STATUS_DWORD_REBOOT (0xb007b007)
+#define MC_STATUS_DWORD_ASSERT (0xdeaddead)
+
+/* Check whether an mcfw version (in host order) belongs to a bootloader */
+#define MC_FW_VERSION_IS_BOOTLOADER(_v) (((_v) >> 16) == 0xb007)
+
+/* The current version of the MCDI protocol.
+ *
+ * Note that the ROM burnt into the card only talks V0, so at the very
+ * least every driver must support version 0 and MCDI_PCOL_VERSION
+ */
+#define MCDI_PCOL_VERSION 2
+
+/* Unused commands: 0x23, 0x27, 0x30, 0x31 */
+
+/* MCDI version 1
+ *
+ * Each MCDI request starts with an MCDI_HEADER, which is a 32bit
+ * structure, filled in by the client.
+ *
+ *       0       7  8     16    20     22  23  24    31
+ *      | CODE | R | LEN | SEQ | Rsvd | E | R | XFLAGS |
+ *               |                      |   |
+ *               |                      |   \--- Response
+ *               |                      \------- Error
+ *               \------------------------------ Resync (always set)
+ *
+ * The client writes it's request into MC shared memory, and rings the
+ * doorbell. Each request is completed by either by the MC writing
+ * back into shared memory, or by writing out an event.
+ *
+ * All MCDI commands support completion by shared memory response. Each
+ * request may also contain additional data (accounted for by HEADER.LEN),
+ * and some response's may also contain additional data (again, accounted
+ * for by HEADER.LEN).
+ *
+ * Some MCDI commands support completion by event, in which any associated
+ * response data is included in the event.
+ *
+ * The protocol requires one response to be delivered for every request, a
+ * request should not be sent unless the response for the previous request
+ * has been received (either by polling shared memory, or by receiving
+ * an event).
+ */
+
+/** Request/Response structure */
+#define MCDI_HEADER_OFST 0
+#define MCDI_HEADER_CODE_LBN 0
+#define MCDI_HEADER_CODE_WIDTH 7
+#define MCDI_HEADER_RESYNC_LBN 7
+#define MCDI_HEADER_RESYNC_WIDTH 1
+#define MCDI_HEADER_DATALEN_LBN 8
+#define MCDI_HEADER_DATALEN_WIDTH 8
+#define MCDI_HEADER_SEQ_LBN 16
+#define MCDI_HEADER_SEQ_WIDTH 4
+#define MCDI_HEADER_RSVD_LBN 20
+#define MCDI_HEADER_RSVD_WIDTH 1
+#define MCDI_HEADER_NOT_EPOCH_LBN 21
+#define MCDI_HEADER_NOT_EPOCH_WIDTH 1
+#define MCDI_HEADER_ERROR_LBN 22
+#define MCDI_HEADER_ERROR_WIDTH 1
+#define MCDI_HEADER_RESPONSE_LBN 23
+#define MCDI_HEADER_RESPONSE_WIDTH 1
+#define MCDI_HEADER_XFLAGS_LBN 24
+#define MCDI_HEADER_XFLAGS_WIDTH 8
+/* Request response using event */
+#define MCDI_HEADER_XFLAGS_EVREQ 0x01
+/* Request (and signal) early doorbell return */
+#define MCDI_HEADER_XFLAGS_DBRET 0x02
+
+/* Maximum number of payload bytes */
+#define MCDI_CTL_SDU_LEN_MAX_V1 0xfc
+#define MCDI_CTL_SDU_LEN_MAX_V2 0x400
+
+#define MCDI_CTL_SDU_LEN_MAX MCDI_CTL_SDU_LEN_MAX_V2
+
+
+/* The MC can generate events for two reasons:
+ *   - To advance a shared memory request if XFLAGS_EVREQ was set
+ *   - As a notification (link state, i2c event), controlled
+ *     via MC_CMD_LOG_CTRL
+ *
+ * Both events share a common structure:
+ *
+ *  0      32     33      36    44     52     60
+ * | Data | Cont | Level | Src | Code | Rsvd |
+ *           |
+ *           \ There is another event pending in this notification
+ *
+ * If Code==CMDDONE, then the fields are further interpreted as:
+ *
+ *   - LEVEL==INFO    Command succeeded
+ *   - LEVEL==ERR     Command failed
+ *
+ *    0     8         16      24     32
+ *   | Seq | Datalen | Errno | Rsvd |
+ *
+ *   These fields are taken directly out of the standard MCDI header, i.e.,
+ *   LEVEL==ERR, Datalen == 0 => Reboot
+ *
+ * Events can be squirted out of the UART (using LOG_CTRL) without a
+ * MCDI header.  An event can be distinguished from a MCDI response by
+ * examining the first byte which is 0xc0.  This corresponds to the
+ * non-existent MCDI command MC_CMD_DEBUG_LOG.
+ *
+ *      0         7        8
+ *     | command | Resync |     = 0xc0
+ *
+ * Since the event is written in big-endian byte order, this works
+ * providing bits 56-63 of the event are 0xc0.
+ *
+ *      56     60  63
+ *     | Rsvd | Code |    = 0xc0
+ *
+ * Which means for convenience the event code is 0xc for all MC
+ * generated events.
+ */
+#define FSE_AZ_EV_CODE_MCDI_EVRESPONSE 0xc
+
+
+/* Operation not permitted. */
+#define MC_CMD_ERR_EPERM 1
+/* Non-existent command target */
+#define MC_CMD_ERR_ENOENT 2
+/* assert() has killed the MC */
+#define MC_CMD_ERR_EINTR 4
+/* I/O failure */
+#define MC_CMD_ERR_EIO 5
+/* Already exists */
+#define MC_CMD_ERR_EEXIST 6
+/* Try again */
+#define MC_CMD_ERR_EAGAIN 11
+/* Out of memory */
+#define MC_CMD_ERR_ENOMEM 12
+/* Caller does not hold required locks */
+#define MC_CMD_ERR_EACCES 13
+/* Resource is currently unavailable (e.g. lock contention) */
+#define MC_CMD_ERR_EBUSY 16
+/* No such device */
+#define MC_CMD_ERR_ENODEV 19
+/* Invalid argument to target */
+#define MC_CMD_ERR_EINVAL 22
+/* Broken pipe */
+#define MC_CMD_ERR_EPIPE 32
+/* Read-only */
+#define MC_CMD_ERR_EROFS 30
+/* Out of range */
+#define MC_CMD_ERR_ERANGE 34
+/* Non-recursive resource is already acquired */
+#define MC_CMD_ERR_EDEADLK 35
+/* Operation not implemented */
+#define MC_CMD_ERR_ENOSYS 38
+/* Operation timed out */
+#define MC_CMD_ERR_ETIME 62
+/* Link has been severed */
+#define MC_CMD_ERR_ENOLINK 67
+/* Protocol error */
+#define MC_CMD_ERR_EPROTO 71
+/* Operation not supported */
+#define MC_CMD_ERR_ENOTSUP 95
+/* Address not available */
+#define MC_CMD_ERR_EADDRNOTAVAIL 99
+/* Not connected */
+#define MC_CMD_ERR_ENOTCONN 107
+/* Operation already in progress */
+#define MC_CMD_ERR_EALREADY 114
+
+/* Resource allocation failed. */
+#define MC_CMD_ERR_ALLOC_FAIL  0x1000
+/* V-adaptor not found. */
+#define MC_CMD_ERR_NO_VADAPTOR 0x1001
+/* EVB port not found. */
+#define MC_CMD_ERR_NO_EVB_PORT 0x1002
+/* V-switch not found. */
+#define MC_CMD_ERR_NO_VSWITCH  0x1003
+/* Too many VLAN tags. */
+#define MC_CMD_ERR_VLAN_LIMIT  0x1004
+/* Bad PCI function number. */
+#define MC_CMD_ERR_BAD_PCI_FUNC 0x1005
+/* Invalid VLAN mode. */
+#define MC_CMD_ERR_BAD_VLAN_MODE 0x1006
+/* Invalid v-switch type. */
+#define MC_CMD_ERR_BAD_VSWITCH_TYPE 0x1007
+/* Invalid v-port type. */
+#define MC_CMD_ERR_BAD_VPORT_TYPE 0x1008
+/* MAC address exists. */
+#define MC_CMD_ERR_MAC_EXIST 0x1009
+/* Slave core not present */
+#define MC_CMD_ERR_SLAVE_NOT_PRESENT 0x100a
+/* The datapath is disabled. */
+#define MC_CMD_ERR_DATAPATH_DISABLED 0x100b
+/* The requesting client is not a function */
+#define MC_CMD_ERR_CLIENT_NOT_FN  0x100c
+/* The requested operation might require the
+   command to be passed between MCs, and the
+   transport doesn't support that.  Should
+   only ever been seen over the UART. */
+#define MC_CMD_ERR_TRANSPORT_NOPROXY 0x100d
+/* VLAN tag(s) exists */
+#define MC_CMD_ERR_VLAN_EXIST 0x100e
+/* No MAC address assigned to an EVB port */
+#define MC_CMD_ERR_NO_MAC_ADDR 0x100f
+/* Notifies the driver that the request has been relayed
+ * to an admin function for authorization. The driver should
+ * wait for a PROXY_RESPONSE event and then resend its request.
+ * This error code is followed by a 32-bit handle that
+ * helps matching it with the respective PROXY_RESPONSE event. */
+#define MC_CMD_ERR_PROXY_PENDING 0x1010
+#define MC_CMD_ERR_PROXY_PENDING_HANDLE_OFST 4
+/* The request cannot be passed for authorization because
+ * another request from the same function is currently being
+ * authorized. The drvier should try again later. */
+#define MC_CMD_ERR_PROXY_INPROGRESS 0x1011
+/* Returned by MC_CMD_PROXY_COMPLETE if the caller is not the function
+ * that has enabled proxying or BLOCK_INDEX points to a function that
+ * doesn't await an authorization. */
+#define MC_CMD_ERR_PROXY_UNEXPECTED 0x1012
+/* This code is currently only used internally in FW. Its meaning is that
+ * an operation failed due to lack of SR-IOV privilege.
+ * Normally it is translated to EPERM by send_cmd_err(),
+ * but it may also be used to trigger some special mechanism
+ * for handling such case, e.g. to relay the failed request
+ * to a designated admin function for authorization. */
+#define MC_CMD_ERR_NO_PRIVILEGE 0x1013
+/* Workaround 26807 could not be turned on/off because some functions
+ * have already installed filters. See the comment at
+ * MC_CMD_WORKAROUND_BUG26807.
+ * May also returned for other operations such as sub-variant switching. */
+#define MC_CMD_ERR_FILTERS_PRESENT 0x1014
+/* The clock whose frequency you've attempted to set set
+ * doesn't exist on this NIC */
+#define MC_CMD_ERR_NO_CLOCK 0x1015
+/* Returned by MC_CMD_TESTASSERT if the action that should
+ * have caused an assertion failed to do so.  */
+#define MC_CMD_ERR_UNREACHABLE 0x1016
+/* This command needs to be processed in the background but there were no
+ * resources to do so. Send it again after a command has completed. */
+#define MC_CMD_ERR_QUEUE_FULL 0x1017
+/* The operation could not be completed because the PCIe link has gone
+ * away.  This error code is never expected to be returned over the TLP
+ * transport. */
+#define MC_CMD_ERR_NO_PCIE 0x1018
+/* The operation could not be completed because the datapath has gone
+ * away.  This is distinct from MC_CMD_ERR_DATAPATH_DISABLED in that the
+ * datapath absence may be temporary*/
+#define MC_CMD_ERR_NO_DATAPATH 0x1019
+/* The operation could not complete because some VIs are allocated */
+#define MC_CMD_ERR_VIS_PRESENT 0x101a
+/* The operation could not complete because some PIO buffers are allocated */
+#define MC_CMD_ERR_PIOBUFS_PRESENT 0x101b
+
+#define MC_CMD_ERR_CODE_OFST 0
+
+/* We define 8 "escape" commands to allow
+   for command number space extension */
+
+#define MC_CMD_CMD_SPACE_ESCAPE_0	      0x78
+#define MC_CMD_CMD_SPACE_ESCAPE_1	      0x79
+#define MC_CMD_CMD_SPACE_ESCAPE_2	      0x7A
+#define MC_CMD_CMD_SPACE_ESCAPE_3	      0x7B
+#define MC_CMD_CMD_SPACE_ESCAPE_4	      0x7C
+#define MC_CMD_CMD_SPACE_ESCAPE_5	      0x7D
+#define MC_CMD_CMD_SPACE_ESCAPE_6	      0x7E
+#define MC_CMD_CMD_SPACE_ESCAPE_7	      0x7F
+
+/* Vectors in the boot ROM */
+/* Point to the copycode entry point. */
+#define SIENA_MC_BOOTROM_COPYCODE_VEC (0x800 - 3 * 0x4)
+#define HUNT_MC_BOOTROM_COPYCODE_VEC (0x8000 - 3 * 0x4)
+#define MEDFORD_MC_BOOTROM_COPYCODE_VEC (0x10000 - 3 * 0x4)
+/* Points to the recovery mode entry point. Misnamed but kept for compatibility. */
+#define SIENA_MC_BOOTROM_NOFLASH_VEC (0x800 - 2 * 0x4)
+#define HUNT_MC_BOOTROM_NOFLASH_VEC (0x8000 - 2 * 0x4)
+#define MEDFORD_MC_BOOTROM_NOFLASH_VEC (0x10000 - 2 * 0x4)
+/* Points to the recovery mode entry point. Same as above, but the right name. */
+#define SIENA_MC_BOOTROM_RECOVERY_VEC (0x800 - 2 * 0x4)
+#define HUNT_MC_BOOTROM_RECOVERY_VEC (0x8000 - 2 * 0x4)
+#define MEDFORD_MC_BOOTROM_RECOVERY_VEC (0x10000 - 2 * 0x4)
+
+/* Points to noflash mode entry point. */
+#define MEDFORD_MC_BOOTROM_REAL_NOFLASH_VEC (0x10000 - 4 * 0x4)
+
+/* The command set exported by the boot ROM (MCDI v0) */
+#define MC_CMD_GET_VERSION_V0_SUPPORTED_FUNCS {		\
+	(1 << MC_CMD_READ32)	|			\
+	(1 << MC_CMD_WRITE32)	|			\
+	(1 << MC_CMD_COPYCODE)	|			\
+	(1 << MC_CMD_GET_VERSION),			\
+	0, 0, 0 }
+
+#define MC_CMD_SENSOR_INFO_OUT_OFFSET_OFST(_x)		\
+	(MC_CMD_SENSOR_ENTRY_OFST + (_x))
+
+#define MC_CMD_DBI_WRITE_IN_ADDRESS_OFST(n)		\
+	(MC_CMD_DBI_WRITE_IN_DBIWROP_OFST +		\
+	 MC_CMD_DBIWROP_TYPEDEF_ADDRESS_OFST +		\
+	 (n) * MC_CMD_DBIWROP_TYPEDEF_LEN)
+
+#define MC_CMD_DBI_WRITE_IN_BYTE_MASK_OFST(n)		\
+	(MC_CMD_DBI_WRITE_IN_DBIWROP_OFST +		\
+	 MC_CMD_DBIWROP_TYPEDEF_BYTE_MASK_OFST +	\
+	 (n) * MC_CMD_DBIWROP_TYPEDEF_LEN)
+
+#define MC_CMD_DBI_WRITE_IN_VALUE_OFST(n)		\
+	(MC_CMD_DBI_WRITE_IN_DBIWROP_OFST +		\
+	 MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST +		\
+	 (n) * MC_CMD_DBIWROP_TYPEDEF_LEN)
+
+/* This may be ORed with an EVB_PORT_ID_xxx constant to pass a non-default
+ * stack ID (which must be in the range 1-255) along with an EVB port ID.
+ */
+#define EVB_STACK_ID(n)  (((n) & 0xff) << 16)
+
+
+/* Version 2 adds an optional argument to error returns: the errno value
+ * may be followed by the (0-based) number of the first argument that
+ * could not be processed.
+ */
+#define MC_CMD_ERR_ARG_OFST 4
+
+/* No space */
+#define MC_CMD_ERR_ENOSPC 28
+
+/* MCDI_EVENT structuredef */
+#define    MCDI_EVENT_LEN 8
+#define       MCDI_EVENT_CONT_LBN 32
+#define       MCDI_EVENT_CONT_WIDTH 1
+#define       MCDI_EVENT_LEVEL_LBN 33
+#define       MCDI_EVENT_LEVEL_WIDTH 3
+/* enum: Info. */
+#define          MCDI_EVENT_LEVEL_INFO 0x0
+/* enum: Warning. */
+#define          MCDI_EVENT_LEVEL_WARN 0x1
+/* enum: Error. */
+#define          MCDI_EVENT_LEVEL_ERR 0x2
+/* enum: Fatal. */
+#define          MCDI_EVENT_LEVEL_FATAL 0x3
+#define       MCDI_EVENT_DATA_OFST 0
+#define       MCDI_EVENT_DATA_LEN 4
+#define        MCDI_EVENT_CMDDONE_SEQ_OFST 0
+#define        MCDI_EVENT_CMDDONE_SEQ_LBN 0
+#define        MCDI_EVENT_CMDDONE_SEQ_WIDTH 8
+#define        MCDI_EVENT_CMDDONE_DATALEN_OFST 0
+#define        MCDI_EVENT_CMDDONE_DATALEN_LBN 8
+#define        MCDI_EVENT_CMDDONE_DATALEN_WIDTH 8
+#define        MCDI_EVENT_CMDDONE_ERRNO_OFST 0
+#define        MCDI_EVENT_CMDDONE_ERRNO_LBN 16
+#define        MCDI_EVENT_CMDDONE_ERRNO_WIDTH 8
+#define        MCDI_EVENT_LINKCHANGE_LP_CAP_OFST 0
+#define        MCDI_EVENT_LINKCHANGE_LP_CAP_LBN 0
+#define        MCDI_EVENT_LINKCHANGE_LP_CAP_WIDTH 16
+#define        MCDI_EVENT_LINKCHANGE_SPEED_OFST 0
+#define        MCDI_EVENT_LINKCHANGE_SPEED_LBN 16
+#define        MCDI_EVENT_LINKCHANGE_SPEED_WIDTH 4
+/* enum: Link is down or link speed could not be determined */
+#define          MCDI_EVENT_LINKCHANGE_SPEED_UNKNOWN 0x0
+/* enum: 100Mbs */
+#define          MCDI_EVENT_LINKCHANGE_SPEED_100M 0x1
+/* enum: 1Gbs */
+#define          MCDI_EVENT_LINKCHANGE_SPEED_1G 0x2
+/* enum: 10Gbs */
+#define          MCDI_EVENT_LINKCHANGE_SPEED_10G 0x3
+/* enum: 40Gbs */
+#define          MCDI_EVENT_LINKCHANGE_SPEED_40G 0x4
+/* enum: 25Gbs */
+#define          MCDI_EVENT_LINKCHANGE_SPEED_25G 0x5
+/* enum: 50Gbs */
+#define          MCDI_EVENT_LINKCHANGE_SPEED_50G 0x6
+/* enum: 100Gbs */
+#define          MCDI_EVENT_LINKCHANGE_SPEED_100G 0x7
+#define        MCDI_EVENT_LINKCHANGE_FCNTL_OFST 0
+#define        MCDI_EVENT_LINKCHANGE_FCNTL_LBN 20
+#define        MCDI_EVENT_LINKCHANGE_FCNTL_WIDTH 4
+#define        MCDI_EVENT_LINKCHANGE_LINK_FLAGS_OFST 0
+#define        MCDI_EVENT_LINKCHANGE_LINK_FLAGS_LBN 24
+#define        MCDI_EVENT_LINKCHANGE_LINK_FLAGS_WIDTH 8
+#define        MCDI_EVENT_SENSOREVT_MONITOR_OFST 0
+#define        MCDI_EVENT_SENSOREVT_MONITOR_LBN 0
+#define        MCDI_EVENT_SENSOREVT_MONITOR_WIDTH 8
+#define        MCDI_EVENT_SENSOREVT_STATE_OFST 0
+#define        MCDI_EVENT_SENSOREVT_STATE_LBN 8
+#define        MCDI_EVENT_SENSOREVT_STATE_WIDTH 8
+#define        MCDI_EVENT_SENSOREVT_VALUE_OFST 0
+#define        MCDI_EVENT_SENSOREVT_VALUE_LBN 16
+#define        MCDI_EVENT_SENSOREVT_VALUE_WIDTH 16
+#define        MCDI_EVENT_FWALERT_DATA_OFST 0
+#define        MCDI_EVENT_FWALERT_DATA_LBN 8
+#define        MCDI_EVENT_FWALERT_DATA_WIDTH 24
+#define        MCDI_EVENT_FWALERT_REASON_OFST 0
+#define        MCDI_EVENT_FWALERT_REASON_LBN 0
+#define        MCDI_EVENT_FWALERT_REASON_WIDTH 8
+/* enum: SRAM Access. */
+#define          MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS 0x1
+#define        MCDI_EVENT_FLR_VF_OFST 0
+#define        MCDI_EVENT_FLR_VF_LBN 0
+#define        MCDI_EVENT_FLR_VF_WIDTH 8
+#define        MCDI_EVENT_TX_ERR_TXQ_OFST 0
+#define        MCDI_EVENT_TX_ERR_TXQ_LBN 0
+#define        MCDI_EVENT_TX_ERR_TXQ_WIDTH 12
+#define        MCDI_EVENT_TX_ERR_TYPE_OFST 0
+#define        MCDI_EVENT_TX_ERR_TYPE_LBN 12
+#define        MCDI_EVENT_TX_ERR_TYPE_WIDTH 4
+/* enum: Descriptor loader reported failure */
+#define          MCDI_EVENT_TX_ERR_DL_FAIL 0x1
+/* enum: Descriptor ring empty and no EOP seen for packet */
+#define          MCDI_EVENT_TX_ERR_NO_EOP 0x2
+/* enum: Overlength packet */
+#define          MCDI_EVENT_TX_ERR_2BIG 0x3
+/* enum: Malformed option descriptor */
+#define          MCDI_EVENT_TX_BAD_OPTDESC 0x5
+/* enum: Option descriptor part way through a packet */
+#define          MCDI_EVENT_TX_OPT_IN_PKT 0x8
+/* enum: DMA or PIO data access error */
+#define          MCDI_EVENT_TX_ERR_BAD_DMA_OR_PIO 0x9
+#define        MCDI_EVENT_TX_ERR_INFO_OFST 0
+#define        MCDI_EVENT_TX_ERR_INFO_LBN 16
+#define        MCDI_EVENT_TX_ERR_INFO_WIDTH 16
+#define        MCDI_EVENT_TX_FLUSH_TO_DRIVER_OFST 0
+#define        MCDI_EVENT_TX_FLUSH_TO_DRIVER_LBN 12
+#define        MCDI_EVENT_TX_FLUSH_TO_DRIVER_WIDTH 1
+#define        MCDI_EVENT_TX_FLUSH_TXQ_OFST 0
+#define        MCDI_EVENT_TX_FLUSH_TXQ_LBN 0
+#define        MCDI_EVENT_TX_FLUSH_TXQ_WIDTH 12
+#define        MCDI_EVENT_PTP_ERR_TYPE_OFST 0
+#define        MCDI_EVENT_PTP_ERR_TYPE_LBN 0
+#define        MCDI_EVENT_PTP_ERR_TYPE_WIDTH 8
+/* enum: PLL lost lock */
+#define          MCDI_EVENT_PTP_ERR_PLL_LOST 0x1
+/* enum: Filter overflow (PDMA) */
+#define          MCDI_EVENT_PTP_ERR_FILTER 0x2
+/* enum: FIFO overflow (FPGA) */
+#define          MCDI_EVENT_PTP_ERR_FIFO 0x3
+/* enum: Merge queue overflow */
+#define          MCDI_EVENT_PTP_ERR_QUEUE 0x4
+#define        MCDI_EVENT_AOE_ERR_TYPE_OFST 0
+#define        MCDI_EVENT_AOE_ERR_TYPE_LBN 0
+#define        MCDI_EVENT_AOE_ERR_TYPE_WIDTH 8
+/* enum: AOE failed to load - no valid image? */
+#define          MCDI_EVENT_AOE_NO_LOAD 0x1
+/* enum: AOE FC reported an exception */
+#define          MCDI_EVENT_AOE_FC_ASSERT 0x2
+/* enum: AOE FC watchdogged */
+#define          MCDI_EVENT_AOE_FC_WATCHDOG 0x3
+/* enum: AOE FC failed to start */
+#define          MCDI_EVENT_AOE_FC_NO_START 0x4
+/* enum: Generic AOE fault - likely to have been reported via other means too
+ * but intended for use by aoex driver.
+ */
+#define          MCDI_EVENT_AOE_FAULT 0x5
+/* enum: Results of reprogramming the CPLD (status in AOE_ERR_DATA) */
+#define          MCDI_EVENT_AOE_CPLD_REPROGRAMMED 0x6
+/* enum: AOE loaded successfully */
+#define          MCDI_EVENT_AOE_LOAD 0x7
+/* enum: AOE DMA operation completed (LSB of HOST_HANDLE in AOE_ERR_DATA) */
+#define          MCDI_EVENT_AOE_DMA 0x8
+/* enum: AOE byteblaster connected/disconnected (Connection status in
+ * AOE_ERR_DATA)
+ */
+#define          MCDI_EVENT_AOE_BYTEBLASTER 0x9
+/* enum: DDR ECC status update */
+#define          MCDI_EVENT_AOE_DDR_ECC_STATUS 0xa
+/* enum: PTP status update */
+#define          MCDI_EVENT_AOE_PTP_STATUS 0xb
+/* enum: FPGA header incorrect */
+#define          MCDI_EVENT_AOE_FPGA_LOAD_HEADER_ERR 0xc
+/* enum: FPGA Powered Off due to error in powering up FPGA */
+#define          MCDI_EVENT_AOE_FPGA_POWER_OFF 0xd
+/* enum: AOE FPGA load failed due to MC to MUM communication failure */
+#define          MCDI_EVENT_AOE_FPGA_LOAD_FAILED 0xe
+/* enum: Notify that invalid flash type detected */
+#define          MCDI_EVENT_AOE_INVALID_FPGA_FLASH_TYPE 0xf
+/* enum: Notify that the attempt to run FPGA Controller firmware timedout */
+#define          MCDI_EVENT_AOE_FC_RUN_TIMEDOUT 0x10
+/* enum: Failure to probe one or more FPGA boot flash chips */
+#define          MCDI_EVENT_AOE_FPGA_BOOT_FLASH_INVALID 0x11
+/* enum: FPGA boot-flash contains an invalid image header */
+#define          MCDI_EVENT_AOE_FPGA_BOOT_FLASH_HDR_INVALID 0x12
+/* enum: Failed to program clocks required by the FPGA */
+#define          MCDI_EVENT_AOE_FPGA_CLOCKS_PROGRAM_FAILED 0x13
+/* enum: Notify that FPGA Controller is alive to serve MCDI requests */
+#define          MCDI_EVENT_AOE_FC_RUNNING 0x14
+#define        MCDI_EVENT_AOE_ERR_DATA_OFST 0
+#define        MCDI_EVENT_AOE_ERR_DATA_LBN 8
+#define        MCDI_EVENT_AOE_ERR_DATA_WIDTH 8
+#define        MCDI_EVENT_AOE_ERR_FC_ASSERT_INFO_OFST 0
+#define        MCDI_EVENT_AOE_ERR_FC_ASSERT_INFO_LBN 8
+#define        MCDI_EVENT_AOE_ERR_FC_ASSERT_INFO_WIDTH 8
+/* enum: FC Assert happened, but the register information is not available */
+#define          MCDI_EVENT_AOE_ERR_FC_ASSERT_SEEN 0x0
+/* enum: The register information for FC Assert is ready for readinng by driver
+ */
+#define          MCDI_EVENT_AOE_ERR_FC_ASSERT_DATA_READY 0x1
+#define        MCDI_EVENT_AOE_ERR_CODE_FPGA_HEADER_VERIFY_FAILED_OFST 0
+#define        MCDI_EVENT_AOE_ERR_CODE_FPGA_HEADER_VERIFY_FAILED_LBN 8
+#define        MCDI_EVENT_AOE_ERR_CODE_FPGA_HEADER_VERIFY_FAILED_WIDTH 8
+/* enum: Reading from NV failed */
+#define          MCDI_EVENT_AOE_ERR_FPGA_HEADER_NV_READ_FAIL 0x0
+/* enum: Invalid Magic Number if FPGA header */
+#define          MCDI_EVENT_AOE_ERR_FPGA_HEADER_MAGIC_FAIL 0x1
+/* enum: Invalid Silicon type detected in header */
+#define          MCDI_EVENT_AOE_ERR_FPGA_HEADER_SILICON_TYPE 0x2
+/* enum: Unsupported VRatio */
+#define          MCDI_EVENT_AOE_ERR_FPGA_HEADER_VRATIO 0x3
+/* enum: Unsupported DDR Type */
+#define          MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_TYPE 0x4
+/* enum: DDR Voltage out of supported range */
+#define          MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_VOLTAGE 0x5
+/* enum: Unsupported DDR speed */
+#define          MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_SPEED 0x6
+/* enum: Unsupported DDR size */
+#define          MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_SIZE 0x7
+/* enum: Unsupported DDR rank */
+#define          MCDI_EVENT_AOE_ERR_FPGA_HEADER_DDR_RANK 0x8
+#define        MCDI_EVENT_AOE_ERR_CODE_INVALID_FPGA_FLASH_TYPE_INFO_OFST 0
+#define        MCDI_EVENT_AOE_ERR_CODE_INVALID_FPGA_FLASH_TYPE_INFO_LBN 8
+#define        MCDI_EVENT_AOE_ERR_CODE_INVALID_FPGA_FLASH_TYPE_INFO_WIDTH 8
+/* enum: Primary boot flash */
+#define          MCDI_EVENT_AOE_FLASH_TYPE_BOOT_PRIMARY 0x0
+/* enum: Secondary boot flash */
+#define          MCDI_EVENT_AOE_FLASH_TYPE_BOOT_SECONDARY 0x1
+#define        MCDI_EVENT_AOE_ERR_CODE_FPGA_POWER_OFF_OFST 0
+#define        MCDI_EVENT_AOE_ERR_CODE_FPGA_POWER_OFF_LBN 8
+#define        MCDI_EVENT_AOE_ERR_CODE_FPGA_POWER_OFF_WIDTH 8
+#define        MCDI_EVENT_AOE_ERR_CODE_FPGA_LOAD_FAILED_OFST 0
+#define        MCDI_EVENT_AOE_ERR_CODE_FPGA_LOAD_FAILED_LBN 8
+#define        MCDI_EVENT_AOE_ERR_CODE_FPGA_LOAD_FAILED_WIDTH 8
+#define        MCDI_EVENT_RX_ERR_RXQ_OFST 0
+#define        MCDI_EVENT_RX_ERR_RXQ_LBN 0
+#define        MCDI_EVENT_RX_ERR_RXQ_WIDTH 12
+#define        MCDI_EVENT_RX_ERR_TYPE_OFST 0
+#define        MCDI_EVENT_RX_ERR_TYPE_LBN 12
+#define        MCDI_EVENT_RX_ERR_TYPE_WIDTH 4
+#define        MCDI_EVENT_RX_ERR_INFO_OFST 0
+#define        MCDI_EVENT_RX_ERR_INFO_LBN 16
+#define        MCDI_EVENT_RX_ERR_INFO_WIDTH 16
+#define        MCDI_EVENT_RX_FLUSH_TO_DRIVER_OFST 0
+#define        MCDI_EVENT_RX_FLUSH_TO_DRIVER_LBN 12
+#define        MCDI_EVENT_RX_FLUSH_TO_DRIVER_WIDTH 1
+#define        MCDI_EVENT_RX_FLUSH_RXQ_OFST 0
+#define        MCDI_EVENT_RX_FLUSH_RXQ_LBN 0
+#define        MCDI_EVENT_RX_FLUSH_RXQ_WIDTH 12
+#define        MCDI_EVENT_MC_REBOOT_COUNT_OFST 0
+#define        MCDI_EVENT_MC_REBOOT_COUNT_LBN 0
+#define        MCDI_EVENT_MC_REBOOT_COUNT_WIDTH 16
+#define        MCDI_EVENT_MUM_ERR_TYPE_OFST 0
+#define        MCDI_EVENT_MUM_ERR_TYPE_LBN 0
+#define        MCDI_EVENT_MUM_ERR_TYPE_WIDTH 8
+/* enum: MUM failed to load - no valid image? */
+#define          MCDI_EVENT_MUM_NO_LOAD 0x1
+/* enum: MUM f/w reported an exception */
+#define          MCDI_EVENT_MUM_ASSERT 0x2
+/* enum: MUM not kicking watchdog */
+#define          MCDI_EVENT_MUM_WATCHDOG 0x3
+#define        MCDI_EVENT_MUM_ERR_DATA_OFST 0
+#define        MCDI_EVENT_MUM_ERR_DATA_LBN 8
+#define        MCDI_EVENT_MUM_ERR_DATA_WIDTH 8
+#define        MCDI_EVENT_DBRET_SEQ_OFST 0
+#define        MCDI_EVENT_DBRET_SEQ_LBN 0
+#define        MCDI_EVENT_DBRET_SEQ_WIDTH 8
+#define        MCDI_EVENT_SUC_ERR_TYPE_OFST 0
+#define        MCDI_EVENT_SUC_ERR_TYPE_LBN 0
+#define        MCDI_EVENT_SUC_ERR_TYPE_WIDTH 8
+/* enum: Corrupted or bad SUC application. */
+#define          MCDI_EVENT_SUC_BAD_APP 0x1
+/* enum: SUC application reported an assert. */
+#define          MCDI_EVENT_SUC_ASSERT 0x2
+/* enum: SUC application reported an exception. */
+#define          MCDI_EVENT_SUC_EXCEPTION 0x3
+/* enum: SUC watchdog timer expired. */
+#define          MCDI_EVENT_SUC_WATCHDOG 0x4
+#define        MCDI_EVENT_SUC_ERR_ADDRESS_OFST 0
+#define        MCDI_EVENT_SUC_ERR_ADDRESS_LBN 8
+#define        MCDI_EVENT_SUC_ERR_ADDRESS_WIDTH 24
+#define        MCDI_EVENT_SUC_ERR_DATA_OFST 0
+#define        MCDI_EVENT_SUC_ERR_DATA_LBN 8
+#define        MCDI_EVENT_SUC_ERR_DATA_WIDTH 24
+#define        MCDI_EVENT_LINKCHANGE_V2_LP_CAP_OFST 0
+#define        MCDI_EVENT_LINKCHANGE_V2_LP_CAP_LBN 0
+#define        MCDI_EVENT_LINKCHANGE_V2_LP_CAP_WIDTH 24
+#define        MCDI_EVENT_LINKCHANGE_V2_SPEED_OFST 0
+#define        MCDI_EVENT_LINKCHANGE_V2_SPEED_LBN 24
+#define        MCDI_EVENT_LINKCHANGE_V2_SPEED_WIDTH 4
+/*             Enum values, see field(s): */
+/*                MCDI_EVENT/LINKCHANGE_SPEED */
+#define        MCDI_EVENT_LINKCHANGE_V2_FLAGS_LINK_UP_OFST 0
+#define        MCDI_EVENT_LINKCHANGE_V2_FLAGS_LINK_UP_LBN 28
+#define        MCDI_EVENT_LINKCHANGE_V2_FLAGS_LINK_UP_WIDTH 1
+#define        MCDI_EVENT_LINKCHANGE_V2_FCNTL_OFST 0
+#define        MCDI_EVENT_LINKCHANGE_V2_FCNTL_LBN 29
+#define        MCDI_EVENT_LINKCHANGE_V2_FCNTL_WIDTH 3
+/*             Enum values, see field(s): */
+/*                MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */
+#define        MCDI_EVENT_MODULECHANGE_LD_CAP_OFST 0
+#define        MCDI_EVENT_MODULECHANGE_LD_CAP_LBN 0
+#define        MCDI_EVENT_MODULECHANGE_LD_CAP_WIDTH 30
+#define        MCDI_EVENT_MODULECHANGE_SEQ_OFST 0
+#define        MCDI_EVENT_MODULECHANGE_SEQ_LBN 30
+#define        MCDI_EVENT_MODULECHANGE_SEQ_WIDTH 2
+#define       MCDI_EVENT_DATA_LBN 0
+#define       MCDI_EVENT_DATA_WIDTH 32
+/* Alias for PTP_DATA. */
+#define       MCDI_EVENT_SRC_LBN 36
+#define       MCDI_EVENT_SRC_WIDTH 8
+/* Data associated with PTP events which doesn't fit into the main DATA field
+ */
+#define       MCDI_EVENT_PTP_DATA_LBN 36
+#define       MCDI_EVENT_PTP_DATA_WIDTH 8
+/* EF100 specific. Defined by QDMA. The phase bit, changes each time round the
+ * event ring
+ */
+#define       MCDI_EVENT_EV_EVQ_PHASE_LBN 59
+#define       MCDI_EVENT_EV_EVQ_PHASE_WIDTH 1
+#define       MCDI_EVENT_EV_CODE_LBN 60
+#define       MCDI_EVENT_EV_CODE_WIDTH 4
+#define       MCDI_EVENT_CODE_LBN 44
+#define       MCDI_EVENT_CODE_WIDTH 8
+/* enum: Event generated by host software */
+#define          MCDI_EVENT_SW_EVENT 0x0
+/* enum: Bad assert. */
+#define          MCDI_EVENT_CODE_BADSSERT 0x1
+/* enum: PM Notice. */
+#define          MCDI_EVENT_CODE_PMNOTICE 0x2
+/* enum: Command done. */
+#define          MCDI_EVENT_CODE_CMDDONE 0x3
+/* enum: Link change. */
+#define          MCDI_EVENT_CODE_LINKCHANGE 0x4
+/* enum: Sensor Event. */
+#define          MCDI_EVENT_CODE_SENSOREVT 0x5
+/* enum: Schedule error. */
+#define          MCDI_EVENT_CODE_SCHEDERR 0x6
+/* enum: Reboot. */
+#define          MCDI_EVENT_CODE_REBOOT 0x7
+/* enum: Mac stats DMA. */
+#define          MCDI_EVENT_CODE_MAC_STATS_DMA 0x8
+/* enum: Firmware alert. */
+#define          MCDI_EVENT_CODE_FWALERT 0x9
+/* enum: Function level reset. */
+#define          MCDI_EVENT_CODE_FLR 0xa
+/* enum: Transmit error */
+#define          MCDI_EVENT_CODE_TX_ERR 0xb
+/* enum: Tx flush has completed */
+#define          MCDI_EVENT_CODE_TX_FLUSH 0xc
+/* enum: PTP packet received timestamp */
+#define          MCDI_EVENT_CODE_PTP_RX 0xd
+/* enum: PTP NIC failure */
+#define          MCDI_EVENT_CODE_PTP_FAULT 0xe
+/* enum: PTP PPS event */
+#define          MCDI_EVENT_CODE_PTP_PPS 0xf
+/* enum: Rx flush has completed */
+#define          MCDI_EVENT_CODE_RX_FLUSH 0x10
+/* enum: Receive error */
+#define          MCDI_EVENT_CODE_RX_ERR 0x11
+/* enum: AOE fault */
+#define          MCDI_EVENT_CODE_AOE 0x12
+/* enum: Network port calibration failed (VCAL). */
+#define          MCDI_EVENT_CODE_VCAL_FAIL 0x13
+/* enum: HW PPS event */
+#define          MCDI_EVENT_CODE_HW_PPS 0x14
+/* enum: The MC has rebooted (huntington and later, siena uses CODE_REBOOT and
+ * a different format)
+ */
+#define          MCDI_EVENT_CODE_MC_REBOOT 0x15
+/* enum: the MC has detected a parity error */
+#define          MCDI_EVENT_CODE_PAR_ERR 0x16
+/* enum: the MC has detected a correctable error */
+#define          MCDI_EVENT_CODE_ECC_CORR_ERR 0x17
+/* enum: the MC has detected an uncorrectable error */
+#define          MCDI_EVENT_CODE_ECC_FATAL_ERR 0x18
+/* enum: The MC has entered offline BIST mode */
+#define          MCDI_EVENT_CODE_MC_BIST 0x19
+/* enum: PTP tick event providing current NIC time */
+#define          MCDI_EVENT_CODE_PTP_TIME 0x1a
+/* enum: MUM fault */
+#define          MCDI_EVENT_CODE_MUM 0x1b
+/* enum: notify the designated PF of a new authorization request */
+#define          MCDI_EVENT_CODE_PROXY_REQUEST 0x1c
+/* enum: notify a function that awaits an authorization that its request has
+ * been processed and it may now resend the command
+ */
+#define          MCDI_EVENT_CODE_PROXY_RESPONSE 0x1d
+/* enum: MCDI command accepted. New commands can be issued but this command is
+ * not done yet.
+ */
+#define          MCDI_EVENT_CODE_DBRET 0x1e
+/* enum: The MC has detected a fault on the SUC */
+#define          MCDI_EVENT_CODE_SUC 0x1f
+/* enum: Link change. This event is sent instead of LINKCHANGE if
+ * WANT_V2_LINKCHANGES was set on driver attach.
+ */
+#define          MCDI_EVENT_CODE_LINKCHANGE_V2 0x20
+/* enum: This event is sent if WANT_V2_LINKCHANGES was set on driver attach
+ * when the local device capabilities changes. This will usually correspond to
+ * a module change.
+ */
+#define          MCDI_EVENT_CODE_MODULECHANGE 0x21
+/* enum: Notification that the sensors have been added and/or removed from the
+ * sensor table. This event includes the new sensor table generation count, if
+ * this does not match the driver's local copy it is expected to call
+ * DYNAMIC_SENSORS_LIST to refresh it.
+ */
+#define          MCDI_EVENT_CODE_DYNAMIC_SENSORS_CHANGE 0x22
+/* enum: Notification that a sensor has changed state as a result of a reading
+ * crossing a threshold. This is sent as two events, the first event contains
+ * the handle and the sensor's state (in the SRC field), and the second
+ * contains the value.
+ */
+#define          MCDI_EVENT_CODE_DYNAMIC_SENSORS_STATE_CHANGE 0x23
+/* enum: Notification that a descriptor proxy function configuration has been
+ * pushed to "live" status (visible to host). SRC field contains the handle of
+ * the affected descriptor proxy function. DATA field contains the generation
+ * count of configuration set applied. See MC_CMD_DESC_PROXY_FUNC_CONFIG_SET /
+ * MC_CMD_DESC_PROXY_FUNC_CONFIG_COMMIT and SF-122927-TC for details.
+ */
+#define          MCDI_EVENT_CODE_DESC_PROXY_FUNC_CONFIG_COMMITTED 0x24
+/* enum: Notification that a descriptor proxy function has been reset. SRC
+ * field contains the handle of the affected descriptor proxy function. See
+ * SF-122927-TC for details.
+ */
+#define          MCDI_EVENT_CODE_DESC_PROXY_FUNC_RESET 0x25
+/* enum: Notification that a driver attached to a descriptor proxy function.
+ * SRC field contains the handle of the affected descriptor proxy function. For
+ * Virtio proxy functions this message consists of two MCDI events, where the
+ * first event's (CONT=1) DATA field carries negotiated virtio feature bits 0
+ * to 31 and the second (CONT=0) carries bits 32 to 63. For EF100 proxy
+ * functions event length and meaning of DATA field is not yet defined. See
+ * SF-122927-TC for details.
+ */
+#define          MCDI_EVENT_CODE_DESC_PROXY_FUNC_DRIVER_ATTACH 0x26
+/* enum: Artificial event generated by host and posted via MC for test
+ * purposes.
+ */
+#define          MCDI_EVENT_CODE_TESTGEN 0xfa
+#define       MCDI_EVENT_CMDDONE_DATA_OFST 0
+#define       MCDI_EVENT_CMDDONE_DATA_LEN 4
+#define       MCDI_EVENT_CMDDONE_DATA_LBN 0
+#define       MCDI_EVENT_CMDDONE_DATA_WIDTH 32
+#define       MCDI_EVENT_LINKCHANGE_DATA_OFST 0
+#define       MCDI_EVENT_LINKCHANGE_DATA_LEN 4
+#define       MCDI_EVENT_LINKCHANGE_DATA_LBN 0
+#define       MCDI_EVENT_LINKCHANGE_DATA_WIDTH 32
+#define       MCDI_EVENT_SENSOREVT_DATA_OFST 0
+#define       MCDI_EVENT_SENSOREVT_DATA_LEN 4
+#define       MCDI_EVENT_SENSOREVT_DATA_LBN 0
+#define       MCDI_EVENT_SENSOREVT_DATA_WIDTH 32
+#define       MCDI_EVENT_MAC_STATS_DMA_GENERATION_OFST 0
+#define       MCDI_EVENT_MAC_STATS_DMA_GENERATION_LEN 4
+#define       MCDI_EVENT_MAC_STATS_DMA_GENERATION_LBN 0
+#define       MCDI_EVENT_MAC_STATS_DMA_GENERATION_WIDTH 32
+#define       MCDI_EVENT_TX_ERR_DATA_OFST 0
+#define       MCDI_EVENT_TX_ERR_DATA_LEN 4
+#define       MCDI_EVENT_TX_ERR_DATA_LBN 0
+#define       MCDI_EVENT_TX_ERR_DATA_WIDTH 32
+/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the seconds field of
+ * timestamp
+ */
+#define       MCDI_EVENT_PTP_SECONDS_OFST 0
+#define       MCDI_EVENT_PTP_SECONDS_LEN 4
+#define       MCDI_EVENT_PTP_SECONDS_LBN 0
+#define       MCDI_EVENT_PTP_SECONDS_WIDTH 32
+/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the major field of
+ * timestamp
+ */
+#define       MCDI_EVENT_PTP_MAJOR_OFST 0
+#define       MCDI_EVENT_PTP_MAJOR_LEN 4
+#define       MCDI_EVENT_PTP_MAJOR_LBN 0
+#define       MCDI_EVENT_PTP_MAJOR_WIDTH 32
+/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the nanoseconds field
+ * of timestamp
+ */
+#define       MCDI_EVENT_PTP_NANOSECONDS_OFST 0
+#define       MCDI_EVENT_PTP_NANOSECONDS_LEN 4
+#define       MCDI_EVENT_PTP_NANOSECONDS_LBN 0
+#define       MCDI_EVENT_PTP_NANOSECONDS_WIDTH 32
+/* For CODE_PTP_RX, CODE_PTP_PPS and CODE_HW_PPS events the minor field of
+ * timestamp
+ */
+#define       MCDI_EVENT_PTP_MINOR_OFST 0
+#define       MCDI_EVENT_PTP_MINOR_LEN 4
+#define       MCDI_EVENT_PTP_MINOR_LBN 0
+#define       MCDI_EVENT_PTP_MINOR_WIDTH 32
+/* For CODE_PTP_RX events, the lowest four bytes of sourceUUID from PTP packet
+ */
+#define       MCDI_EVENT_PTP_UUID_OFST 0
+#define       MCDI_EVENT_PTP_UUID_LEN 4
+#define       MCDI_EVENT_PTP_UUID_LBN 0
+#define       MCDI_EVENT_PTP_UUID_WIDTH 32
+#define       MCDI_EVENT_RX_ERR_DATA_OFST 0
+#define       MCDI_EVENT_RX_ERR_DATA_LEN 4
+#define       MCDI_EVENT_RX_ERR_DATA_LBN 0
+#define       MCDI_EVENT_RX_ERR_DATA_WIDTH 32
+#define       MCDI_EVENT_PAR_ERR_DATA_OFST 0
+#define       MCDI_EVENT_PAR_ERR_DATA_LEN 4
+#define       MCDI_EVENT_PAR_ERR_DATA_LBN 0
+#define       MCDI_EVENT_PAR_ERR_DATA_WIDTH 32
+#define       MCDI_EVENT_ECC_CORR_ERR_DATA_OFST 0
+#define       MCDI_EVENT_ECC_CORR_ERR_DATA_LEN 4
+#define       MCDI_EVENT_ECC_CORR_ERR_DATA_LBN 0
+#define       MCDI_EVENT_ECC_CORR_ERR_DATA_WIDTH 32
+#define       MCDI_EVENT_ECC_FATAL_ERR_DATA_OFST 0
+#define       MCDI_EVENT_ECC_FATAL_ERR_DATA_LEN 4
+#define       MCDI_EVENT_ECC_FATAL_ERR_DATA_LBN 0
+#define       MCDI_EVENT_ECC_FATAL_ERR_DATA_WIDTH 32
+/* For CODE_PTP_TIME events, the major value of the PTP clock */
+#define       MCDI_EVENT_PTP_TIME_MAJOR_OFST 0
+#define       MCDI_EVENT_PTP_TIME_MAJOR_LEN 4
+#define       MCDI_EVENT_PTP_TIME_MAJOR_LBN 0
+#define       MCDI_EVENT_PTP_TIME_MAJOR_WIDTH 32
+/* For CODE_PTP_TIME events, bits 19-26 of the minor value of the PTP clock */
+#define       MCDI_EVENT_PTP_TIME_MINOR_26_19_LBN 36
+#define       MCDI_EVENT_PTP_TIME_MINOR_26_19_WIDTH 8
+/* For CODE_PTP_TIME events, most significant bits of the minor value of the
+ * PTP clock. This is a more generic equivalent of PTP_TIME_MINOR_26_19.
+ */
+#define       MCDI_EVENT_PTP_TIME_MINOR_MS_8BITS_LBN 36
+#define       MCDI_EVENT_PTP_TIME_MINOR_MS_8BITS_WIDTH 8
+/* For CODE_PTP_TIME events where report sync status is enabled, indicates
+ * whether the NIC clock has ever been set
+ */
+#define       MCDI_EVENT_PTP_TIME_NIC_CLOCK_VALID_LBN 36
+#define       MCDI_EVENT_PTP_TIME_NIC_CLOCK_VALID_WIDTH 1
+/* For CODE_PTP_TIME events where report sync status is enabled, indicates
+ * whether the NIC and System clocks are in sync
+ */
+#define       MCDI_EVENT_PTP_TIME_HOST_NIC_IN_SYNC_LBN 37
+#define       MCDI_EVENT_PTP_TIME_HOST_NIC_IN_SYNC_WIDTH 1
+/* For CODE_PTP_TIME events where report sync status is enabled, bits 21-26 of
+ * the minor value of the PTP clock
+ */
+#define       MCDI_EVENT_PTP_TIME_MINOR_26_21_LBN 38
+#define       MCDI_EVENT_PTP_TIME_MINOR_26_21_WIDTH 6
+/* For CODE_PTP_TIME events, most significant bits of the minor value of the
+ * PTP clock. This is a more generic equivalent of PTP_TIME_MINOR_26_21.
+ */
+#define       MCDI_EVENT_PTP_TIME_MINOR_MS_6BITS_LBN 38
+#define       MCDI_EVENT_PTP_TIME_MINOR_MS_6BITS_WIDTH 6
+#define       MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_OFST 0
+#define       MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_LEN 4
+#define       MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_LBN 0
+#define       MCDI_EVENT_PROXY_REQUEST_BUFF_INDEX_WIDTH 32
+#define       MCDI_EVENT_PROXY_RESPONSE_HANDLE_OFST 0
+#define       MCDI_EVENT_PROXY_RESPONSE_HANDLE_LEN 4
+#define       MCDI_EVENT_PROXY_RESPONSE_HANDLE_LBN 0
+#define       MCDI_EVENT_PROXY_RESPONSE_HANDLE_WIDTH 32
+/* Zero means that the request has been completed or authorized, and the driver
+ * should resend it. A non-zero value means that the authorization has been
+ * denied, and gives the reason. Typically it will be EPERM.
+ */
+#define       MCDI_EVENT_PROXY_RESPONSE_RC_LBN 36
+#define       MCDI_EVENT_PROXY_RESPONSE_RC_WIDTH 8
+#define       MCDI_EVENT_DBRET_DATA_OFST 0
+#define       MCDI_EVENT_DBRET_DATA_LEN 4
+#define       MCDI_EVENT_DBRET_DATA_LBN 0
+#define       MCDI_EVENT_DBRET_DATA_WIDTH 32
+#define       MCDI_EVENT_LINKCHANGE_V2_DATA_OFST 0
+#define       MCDI_EVENT_LINKCHANGE_V2_DATA_LEN 4
+#define       MCDI_EVENT_LINKCHANGE_V2_DATA_LBN 0
+#define       MCDI_EVENT_LINKCHANGE_V2_DATA_WIDTH 32
+#define       MCDI_EVENT_MODULECHANGE_DATA_OFST 0
+#define       MCDI_EVENT_MODULECHANGE_DATA_LEN 4
+#define       MCDI_EVENT_MODULECHANGE_DATA_LBN 0
+#define       MCDI_EVENT_MODULECHANGE_DATA_WIDTH 32
+/* The new generation count after a sensor has been added or deleted. */
+#define       MCDI_EVENT_DYNAMIC_SENSORS_GENERATION_OFST 0
+#define       MCDI_EVENT_DYNAMIC_SENSORS_GENERATION_LEN 4
+#define       MCDI_EVENT_DYNAMIC_SENSORS_GENERATION_LBN 0
+#define       MCDI_EVENT_DYNAMIC_SENSORS_GENERATION_WIDTH 32
+/* The handle of a dynamic sensor. */
+#define       MCDI_EVENT_DYNAMIC_SENSORS_HANDLE_OFST 0
+#define       MCDI_EVENT_DYNAMIC_SENSORS_HANDLE_LEN 4
+#define       MCDI_EVENT_DYNAMIC_SENSORS_HANDLE_LBN 0
+#define       MCDI_EVENT_DYNAMIC_SENSORS_HANDLE_WIDTH 32
+/* The current values of a sensor. */
+#define       MCDI_EVENT_DYNAMIC_SENSORS_VALUE_OFST 0
+#define       MCDI_EVENT_DYNAMIC_SENSORS_VALUE_LEN 4
+#define       MCDI_EVENT_DYNAMIC_SENSORS_VALUE_LBN 0
+#define       MCDI_EVENT_DYNAMIC_SENSORS_VALUE_WIDTH 32
+/* The current state of a sensor. */
+#define       MCDI_EVENT_DYNAMIC_SENSORS_STATE_LBN 36
+#define       MCDI_EVENT_DYNAMIC_SENSORS_STATE_WIDTH 8
+#define       MCDI_EVENT_DESC_PROXY_DATA_OFST 0
+#define       MCDI_EVENT_DESC_PROXY_DATA_LEN 4
+#define       MCDI_EVENT_DESC_PROXY_DATA_LBN 0
+#define       MCDI_EVENT_DESC_PROXY_DATA_WIDTH 32
+/* Generation count of applied configuration set */
+#define       MCDI_EVENT_DESC_PROXY_GENERATION_OFST 0
+#define       MCDI_EVENT_DESC_PROXY_GENERATION_LEN 4
+#define       MCDI_EVENT_DESC_PROXY_GENERATION_LBN 0
+#define       MCDI_EVENT_DESC_PROXY_GENERATION_WIDTH 32
+/* Virtio features negotiated with the host driver. First event (CONT=1)
+ * carries bits 0 to 31. Second event (CONT=0) carries bits 32 to 63.
+ */
+#define       MCDI_EVENT_DESC_PROXY_VIRTIO_FEATURES_OFST 0
+#define       MCDI_EVENT_DESC_PROXY_VIRTIO_FEATURES_LEN 4
+#define       MCDI_EVENT_DESC_PROXY_VIRTIO_FEATURES_LBN 0
+#define       MCDI_EVENT_DESC_PROXY_VIRTIO_FEATURES_WIDTH 32
+
+/* FCDI_EVENT structuredef */
+#define    FCDI_EVENT_LEN 8
+#define       FCDI_EVENT_CONT_LBN 32
+#define       FCDI_EVENT_CONT_WIDTH 1
+#define       FCDI_EVENT_LEVEL_LBN 33
+#define       FCDI_EVENT_LEVEL_WIDTH 3
+/* enum: Info. */
+#define          FCDI_EVENT_LEVEL_INFO 0x0
+/* enum: Warning. */
+#define          FCDI_EVENT_LEVEL_WARN 0x1
+/* enum: Error. */
+#define          FCDI_EVENT_LEVEL_ERR 0x2
+/* enum: Fatal. */
+#define          FCDI_EVENT_LEVEL_FATAL 0x3
+#define       FCDI_EVENT_DATA_OFST 0
+#define       FCDI_EVENT_DATA_LEN 4
+#define        FCDI_EVENT_LINK_STATE_STATUS_OFST 0
+#define        FCDI_EVENT_LINK_STATE_STATUS_LBN 0
+#define        FCDI_EVENT_LINK_STATE_STATUS_WIDTH 1
+#define          FCDI_EVENT_LINK_DOWN 0x0 /* enum */
+#define          FCDI_EVENT_LINK_UP 0x1 /* enum */
+#define       FCDI_EVENT_DATA_LBN 0
+#define       FCDI_EVENT_DATA_WIDTH 32
+#define       FCDI_EVENT_SRC_LBN 36
+#define       FCDI_EVENT_SRC_WIDTH 8
+#define       FCDI_EVENT_EV_CODE_LBN 60
+#define       FCDI_EVENT_EV_CODE_WIDTH 4
+#define       FCDI_EVENT_CODE_LBN 44
+#define       FCDI_EVENT_CODE_WIDTH 8
+/* enum: The FC was rebooted. */
+#define          FCDI_EVENT_CODE_REBOOT 0x1
+/* enum: Bad assert. */
+#define          FCDI_EVENT_CODE_ASSERT 0x2
+/* enum: DDR3 test result. */
+#define          FCDI_EVENT_CODE_DDR_TEST_RESULT 0x3
+/* enum: Link status. */
+#define          FCDI_EVENT_CODE_LINK_STATE 0x4
+/* enum: A timed read is ready to be serviced. */
+#define          FCDI_EVENT_CODE_TIMED_READ 0x5
+/* enum: One or more PPS IN events */
+#define          FCDI_EVENT_CODE_PPS_IN 0x6
+/* enum: Tick event from PTP clock */
+#define          FCDI_EVENT_CODE_PTP_TICK 0x7
+/* enum: ECC error counters */
+#define          FCDI_EVENT_CODE_DDR_ECC_STATUS 0x8
+/* enum: Current status of PTP */
+#define          FCDI_EVENT_CODE_PTP_STATUS 0x9
+/* enum: Port id config to map MC-FC port idx */
+#define          FCDI_EVENT_CODE_PORT_CONFIG 0xa
+/* enum: Boot result or error code */
+#define          FCDI_EVENT_CODE_BOOT_RESULT 0xb
+#define       FCDI_EVENT_REBOOT_SRC_LBN 36
+#define       FCDI_EVENT_REBOOT_SRC_WIDTH 8
+#define          FCDI_EVENT_REBOOT_FC_FW 0x0 /* enum */
+#define          FCDI_EVENT_REBOOT_FC_BOOTLOADER 0x1 /* enum */
+#define       FCDI_EVENT_ASSERT_INSTR_ADDRESS_OFST 0
+#define       FCDI_EVENT_ASSERT_INSTR_ADDRESS_LEN 4
+#define       FCDI_EVENT_ASSERT_INSTR_ADDRESS_LBN 0
+#define       FCDI_EVENT_ASSERT_INSTR_ADDRESS_WIDTH 32
+#define       FCDI_EVENT_ASSERT_TYPE_LBN 36
+#define       FCDI_EVENT_ASSERT_TYPE_WIDTH 8
+#define       FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_LBN 36
+#define       FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_WIDTH 8
+#define       FCDI_EVENT_DDR_TEST_RESULT_RESULT_OFST 0
+#define       FCDI_EVENT_DDR_TEST_RESULT_RESULT_LEN 4
+#define       FCDI_EVENT_DDR_TEST_RESULT_RESULT_LBN 0
+#define       FCDI_EVENT_DDR_TEST_RESULT_RESULT_WIDTH 32
+#define       FCDI_EVENT_LINK_STATE_DATA_OFST 0
+#define       FCDI_EVENT_LINK_STATE_DATA_LEN 4
+#define       FCDI_EVENT_LINK_STATE_DATA_LBN 0
+#define       FCDI_EVENT_LINK_STATE_DATA_WIDTH 32
+#define       FCDI_EVENT_PTP_STATE_OFST 0
+#define       FCDI_EVENT_PTP_STATE_LEN 4
+#define          FCDI_EVENT_PTP_UNDEFINED 0x0 /* enum */
+#define          FCDI_EVENT_PTP_SETUP_FAILED 0x1 /* enum */
+#define          FCDI_EVENT_PTP_OPERATIONAL 0x2 /* enum */
+#define       FCDI_EVENT_PTP_STATE_LBN 0
+#define       FCDI_EVENT_PTP_STATE_WIDTH 32
+#define       FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_LBN 36
+#define       FCDI_EVENT_DDR_ECC_STATUS_BANK_ID_WIDTH 8
+#define       FCDI_EVENT_DDR_ECC_STATUS_STATUS_OFST 0
+#define       FCDI_EVENT_DDR_ECC_STATUS_STATUS_LEN 4
+#define       FCDI_EVENT_DDR_ECC_STATUS_STATUS_LBN 0
+#define       FCDI_EVENT_DDR_ECC_STATUS_STATUS_WIDTH 32
+/* Index of MC port being referred to */
+#define       FCDI_EVENT_PORT_CONFIG_SRC_LBN 36
+#define       FCDI_EVENT_PORT_CONFIG_SRC_WIDTH 8
+/* FC Port index that matches the MC port index in SRC */
+#define       FCDI_EVENT_PORT_CONFIG_DATA_OFST 0
+#define       FCDI_EVENT_PORT_CONFIG_DATA_LEN 4
+#define       FCDI_EVENT_PORT_CONFIG_DATA_LBN 0
+#define       FCDI_EVENT_PORT_CONFIG_DATA_WIDTH 32
+#define       FCDI_EVENT_BOOT_RESULT_OFST 0
+#define       FCDI_EVENT_BOOT_RESULT_LEN 4
+/*            Enum values, see field(s): */
+/*               MC_CMD_AOE/MC_CMD_AOE_OUT_INFO/FC_BOOT_RESULT */
+#define       FCDI_EVENT_BOOT_RESULT_LBN 0
+#define       FCDI_EVENT_BOOT_RESULT_WIDTH 32
+
+/* FCDI_EXTENDED_EVENT_PPS structuredef: Extended FCDI event to send PPS events
+ * to the MC. Note that this structure | is overlayed over a normal FCDI event
+ * such that bits 32-63 containing | event code, level, source etc remain the
+ * same. In this case the data | field of the header is defined to be the
+ * number of timestamps
+ */
+#define    FCDI_EXTENDED_EVENT_PPS_LENMIN 16
+#define    FCDI_EXTENDED_EVENT_PPS_LENMAX 248
+#define    FCDI_EXTENDED_EVENT_PPS_LENMAX_MCDI2 1016
+#define    FCDI_EXTENDED_EVENT_PPS_LEN(num) (8+8*(num))
+#define    FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_NUM(len) (((len)-8)/8)
+/* Number of timestamps following */
+#define       FCDI_EXTENDED_EVENT_PPS_COUNT_OFST 0
+#define       FCDI_EXTENDED_EVENT_PPS_COUNT_LEN 4
+#define       FCDI_EXTENDED_EVENT_PPS_COUNT_LBN 0
+#define       FCDI_EXTENDED_EVENT_PPS_COUNT_WIDTH 32
+/* Seconds field of a timestamp record */
+#define       FCDI_EXTENDED_EVENT_PPS_SECONDS_OFST 8
+#define       FCDI_EXTENDED_EVENT_PPS_SECONDS_LEN 4
+#define       FCDI_EXTENDED_EVENT_PPS_SECONDS_LBN 64
+#define       FCDI_EXTENDED_EVENT_PPS_SECONDS_WIDTH 32
+/* Nanoseconds field of a timestamp record */
+#define       FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_OFST 12
+#define       FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LEN 4
+#define       FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LBN 96
+#define       FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_WIDTH 32
+/* Timestamp records comprising the event */
+#define       FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_OFST 8
+#define       FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LEN 8
+#define       FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LO_OFST 8
+#define       FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_HI_OFST 12
+#define       FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MINNUM 1
+#define       FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MAXNUM 30
+#define       FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_MAXNUM_MCDI2 126
+#define       FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_LBN 64
+#define       FCDI_EXTENDED_EVENT_PPS_TIMESTAMPS_WIDTH 64
+
+/* MUM_EVENT structuredef */
+#define    MUM_EVENT_LEN 8
+#define       MUM_EVENT_CONT_LBN 32
+#define       MUM_EVENT_CONT_WIDTH 1
+#define       MUM_EVENT_LEVEL_LBN 33
+#define       MUM_EVENT_LEVEL_WIDTH 3
+/* enum: Info. */
+#define          MUM_EVENT_LEVEL_INFO 0x0
+/* enum: Warning. */
+#define          MUM_EVENT_LEVEL_WARN 0x1
+/* enum: Error. */
+#define          MUM_EVENT_LEVEL_ERR 0x2
+/* enum: Fatal. */
+#define          MUM_EVENT_LEVEL_FATAL 0x3
+#define       MUM_EVENT_DATA_OFST 0
+#define       MUM_EVENT_DATA_LEN 4
+#define        MUM_EVENT_SENSOR_ID_OFST 0
+#define        MUM_EVENT_SENSOR_ID_LBN 0
+#define        MUM_EVENT_SENSOR_ID_WIDTH 8
+/*             Enum values, see field(s): */
+/*                MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
+#define        MUM_EVENT_SENSOR_STATE_OFST 0
+#define        MUM_EVENT_SENSOR_STATE_LBN 8
+#define        MUM_EVENT_SENSOR_STATE_WIDTH 8
+#define        MUM_EVENT_PORT_PHY_READY_OFST 0
+#define        MUM_EVENT_PORT_PHY_READY_LBN 0
+#define        MUM_EVENT_PORT_PHY_READY_WIDTH 1
+#define        MUM_EVENT_PORT_PHY_LINK_UP_OFST 0
+#define        MUM_EVENT_PORT_PHY_LINK_UP_LBN 1
+#define        MUM_EVENT_PORT_PHY_LINK_UP_WIDTH 1
+#define        MUM_EVENT_PORT_PHY_TX_LOL_OFST 0
+#define        MUM_EVENT_PORT_PHY_TX_LOL_LBN 2
+#define        MUM_EVENT_PORT_PHY_TX_LOL_WIDTH 1
+#define        MUM_EVENT_PORT_PHY_RX_LOL_OFST 0
+#define        MUM_EVENT_PORT_PHY_RX_LOL_LBN 3
+#define        MUM_EVENT_PORT_PHY_RX_LOL_WIDTH 1
+#define        MUM_EVENT_PORT_PHY_TX_LOS_OFST 0
+#define        MUM_EVENT_PORT_PHY_TX_LOS_LBN 4
+#define        MUM_EVENT_PORT_PHY_TX_LOS_WIDTH 1
+#define        MUM_EVENT_PORT_PHY_RX_LOS_OFST 0
+#define        MUM_EVENT_PORT_PHY_RX_LOS_LBN 5
+#define        MUM_EVENT_PORT_PHY_RX_LOS_WIDTH 1
+#define        MUM_EVENT_PORT_PHY_TX_FAULT_OFST 0
+#define        MUM_EVENT_PORT_PHY_TX_FAULT_LBN 6
+#define        MUM_EVENT_PORT_PHY_TX_FAULT_WIDTH 1
+#define       MUM_EVENT_DATA_LBN 0
+#define       MUM_EVENT_DATA_WIDTH 32
+#define       MUM_EVENT_SRC_LBN 36
+#define       MUM_EVENT_SRC_WIDTH 8
+#define       MUM_EVENT_EV_CODE_LBN 60
+#define       MUM_EVENT_EV_CODE_WIDTH 4
+#define       MUM_EVENT_CODE_LBN 44
+#define       MUM_EVENT_CODE_WIDTH 8
+/* enum: The MUM was rebooted. */
+#define          MUM_EVENT_CODE_REBOOT 0x1
+/* enum: Bad assert. */
+#define          MUM_EVENT_CODE_ASSERT 0x2
+/* enum: Sensor failure. */
+#define          MUM_EVENT_CODE_SENSOR 0x3
+/* enum: Link fault has been asserted, or has cleared. */
+#define          MUM_EVENT_CODE_QSFP_LASI_INTERRUPT 0x4
+#define       MUM_EVENT_SENSOR_DATA_OFST 0
+#define       MUM_EVENT_SENSOR_DATA_LEN 4
+#define       MUM_EVENT_SENSOR_DATA_LBN 0
+#define       MUM_EVENT_SENSOR_DATA_WIDTH 32
+#define       MUM_EVENT_PORT_PHY_FLAGS_OFST 0
+#define       MUM_EVENT_PORT_PHY_FLAGS_LEN 4
+#define       MUM_EVENT_PORT_PHY_FLAGS_LBN 0
+#define       MUM_EVENT_PORT_PHY_FLAGS_WIDTH 32
+#define       MUM_EVENT_PORT_PHY_COPPER_LEN_OFST 0
+#define       MUM_EVENT_PORT_PHY_COPPER_LEN_LEN 4
+#define       MUM_EVENT_PORT_PHY_COPPER_LEN_LBN 0
+#define       MUM_EVENT_PORT_PHY_COPPER_LEN_WIDTH 32
+#define       MUM_EVENT_PORT_PHY_CAPS_OFST 0
+#define       MUM_EVENT_PORT_PHY_CAPS_LEN 4
+#define       MUM_EVENT_PORT_PHY_CAPS_LBN 0
+#define       MUM_EVENT_PORT_PHY_CAPS_WIDTH 32
+#define       MUM_EVENT_PORT_PHY_TECH_OFST 0
+#define       MUM_EVENT_PORT_PHY_TECH_LEN 4
+#define          MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_UNKNOWN 0x0 /* enum */
+#define          MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_OPTICAL 0x1 /* enum */
+#define          MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE 0x2 /* enum */
+#define          MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_PASSIVE_EQUALIZED 0x3 /* enum */
+#define          MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_ACTIVE_LIMITING 0x4 /* enum */
+#define          MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_COPPER_ACTIVE_LINEAR 0x5 /* enum */
+#define          MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_BASE_T 0x6 /* enum */
+#define          MUM_EVENT_PORT_PHY_STATE_QSFP_MODULE_TECH_LOOPBACK_PASSIVE 0x7 /* enum */
+#define       MUM_EVENT_PORT_PHY_TECH_LBN 0
+#define       MUM_EVENT_PORT_PHY_TECH_WIDTH 32
+#define       MUM_EVENT_PORT_PHY_SRC_DATA_ID_LBN 36
+#define       MUM_EVENT_PORT_PHY_SRC_DATA_ID_WIDTH 4
+#define          MUM_EVENT_PORT_PHY_SRC_DATA_ID_FLAGS 0x0 /* enum */
+#define          MUM_EVENT_PORT_PHY_SRC_DATA_ID_COPPER_LEN 0x1 /* enum */
+#define          MUM_EVENT_PORT_PHY_SRC_DATA_ID_CAPS 0x2 /* enum */
+#define          MUM_EVENT_PORT_PHY_SRC_DATA_ID_TECH 0x3 /* enum */
+#define          MUM_EVENT_PORT_PHY_SRC_DATA_ID_MAX 0x4 /* enum */
+#define       MUM_EVENT_PORT_PHY_SRC_PORT_NO_LBN 40
+#define       MUM_EVENT_PORT_PHY_SRC_PORT_NO_WIDTH 4
+
+
+/***********************************/
+/* MC_CMD_READ32
+ * Read multiple 32byte words from MC memory. Note - this command really
+ * belongs to INSECURE category but is required by shmboot. The command handler
+ * has additional checks to reject insecure calls.
+ */
+#define MC_CMD_READ32 0x1
+#undef MC_CMD_0x1_PRIVILEGE_CTG
+
+#define MC_CMD_0x1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_READ32_IN msgrequest */
+#define    MC_CMD_READ32_IN_LEN 8
+#define       MC_CMD_READ32_IN_ADDR_OFST 0
+#define       MC_CMD_READ32_IN_ADDR_LEN 4
+#define       MC_CMD_READ32_IN_NUMWORDS_OFST 4
+#define       MC_CMD_READ32_IN_NUMWORDS_LEN 4
+
+/* MC_CMD_READ32_OUT msgresponse */
+#define    MC_CMD_READ32_OUT_LENMIN 4
+#define    MC_CMD_READ32_OUT_LENMAX 252
+#define    MC_CMD_READ32_OUT_LENMAX_MCDI2 1020
+#define    MC_CMD_READ32_OUT_LEN(num) (0+4*(num))
+#define    MC_CMD_READ32_OUT_BUFFER_NUM(len) (((len)-0)/4)
+#define       MC_CMD_READ32_OUT_BUFFER_OFST 0
+#define       MC_CMD_READ32_OUT_BUFFER_LEN 4
+#define       MC_CMD_READ32_OUT_BUFFER_MINNUM 1
+#define       MC_CMD_READ32_OUT_BUFFER_MAXNUM 63
+#define       MC_CMD_READ32_OUT_BUFFER_MAXNUM_MCDI2 255
+
+
+/***********************************/
+/* MC_CMD_WRITE32
+ * Write multiple 32byte words to MC memory.
+ */
+#define MC_CMD_WRITE32 0x2
+#undef MC_CMD_0x2_PRIVILEGE_CTG
+
+#define MC_CMD_0x2_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_WRITE32_IN msgrequest */
+#define    MC_CMD_WRITE32_IN_LENMIN 8
+#define    MC_CMD_WRITE32_IN_LENMAX 252
+#define    MC_CMD_WRITE32_IN_LENMAX_MCDI2 1020
+#define    MC_CMD_WRITE32_IN_LEN(num) (4+4*(num))
+#define    MC_CMD_WRITE32_IN_BUFFER_NUM(len) (((len)-4)/4)
+#define       MC_CMD_WRITE32_IN_ADDR_OFST 0
+#define       MC_CMD_WRITE32_IN_ADDR_LEN 4
+#define       MC_CMD_WRITE32_IN_BUFFER_OFST 4
+#define       MC_CMD_WRITE32_IN_BUFFER_LEN 4
+#define       MC_CMD_WRITE32_IN_BUFFER_MINNUM 1
+#define       MC_CMD_WRITE32_IN_BUFFER_MAXNUM 62
+#define       MC_CMD_WRITE32_IN_BUFFER_MAXNUM_MCDI2 254
+
+/* MC_CMD_WRITE32_OUT msgresponse */
+#define    MC_CMD_WRITE32_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_COPYCODE
+ * Copy MC code between two locations and jump. Note - this command really
+ * belongs to INSECURE category but is required by shmboot. The command handler
+ * has additional checks to reject insecure calls.
+ */
+#define MC_CMD_COPYCODE 0x3
+#undef MC_CMD_0x3_PRIVILEGE_CTG
+
+#define MC_CMD_0x3_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_COPYCODE_IN msgrequest */
+#define    MC_CMD_COPYCODE_IN_LEN 16
+/* Source address
+ *
+ * The main image should be entered via a copy of a single word from and to a
+ * magic address, which controls various aspects of the boot. The magic address
+ * is a bitfield, with each bit as documented below.
+ */
+#define       MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0
+#define       MC_CMD_COPYCODE_IN_SRC_ADDR_LEN 4
+/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT (see below) */
+#define          MC_CMD_COPYCODE_HUNT_NO_MAGIC_ADDR 0x10000
+/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT and
+ * BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED (see below)
+ */
+#define          MC_CMD_COPYCODE_HUNT_NO_DATAPATH_MAGIC_ADDR 0x1d0d0
+/* enum: Deprecated; equivalent to setting BOOT_MAGIC_PRESENT,
+ * BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED and BOOT_MAGIC_IGNORE_CONFIG (see
+ * below)
+ */
+#define          MC_CMD_COPYCODE_HUNT_IGNORE_CONFIG_MAGIC_ADDR 0x1badc
+#define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_OFST 0
+#define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_LBN 17
+#define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_PRESENT_WIDTH 1
+#define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_OFST 0
+#define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_LBN 2
+#define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_SATELLITE_CPUS_NOT_LOADED_WIDTH 1
+#define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_OFST 0
+#define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_LBN 3
+#define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_IGNORE_CONFIG_WIDTH 1
+#define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_OFST 0
+#define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_LBN 4
+#define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_SKIP_BOOT_ICORE_SYNC_WIDTH 1
+#define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_OFST 0
+#define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_LBN 5
+#define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_FORCE_STANDALONE_WIDTH 1
+#define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_DISABLE_XIP_OFST 0
+#define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_DISABLE_XIP_LBN 6
+#define        MC_CMD_COPYCODE_IN_BOOT_MAGIC_DISABLE_XIP_WIDTH 1
+/* Destination address */
+#define       MC_CMD_COPYCODE_IN_DEST_ADDR_OFST 4
+#define       MC_CMD_COPYCODE_IN_DEST_ADDR_LEN 4
+#define       MC_CMD_COPYCODE_IN_NUMWORDS_OFST 8
+#define       MC_CMD_COPYCODE_IN_NUMWORDS_LEN 4
+/* Address of where to jump after copy. */
+#define       MC_CMD_COPYCODE_IN_JUMP_OFST 12
+#define       MC_CMD_COPYCODE_IN_JUMP_LEN 4
+/* enum: Control should return to the caller rather than jumping */
+#define          MC_CMD_COPYCODE_JUMP_NONE 0x1
+
+/* MC_CMD_COPYCODE_OUT msgresponse */
+#define    MC_CMD_COPYCODE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SET_FUNC
+ * Select function for function-specific commands.
+ */
+#define MC_CMD_SET_FUNC 0x4
+#undef MC_CMD_0x4_PRIVILEGE_CTG
+
+#define MC_CMD_0x4_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_SET_FUNC_IN msgrequest */
+#define    MC_CMD_SET_FUNC_IN_LEN 4
+/* Set function */
+#define       MC_CMD_SET_FUNC_IN_FUNC_OFST 0
+#define       MC_CMD_SET_FUNC_IN_FUNC_LEN 4
+
+/* MC_CMD_SET_FUNC_OUT msgresponse */
+#define    MC_CMD_SET_FUNC_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_BOOT_STATUS
+ * Get the instruction address from which the MC booted.
+ */
+#define MC_CMD_GET_BOOT_STATUS 0x5
+#undef MC_CMD_0x5_PRIVILEGE_CTG
+
+#define MC_CMD_0x5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_BOOT_STATUS_IN msgrequest */
+#define    MC_CMD_GET_BOOT_STATUS_IN_LEN 0
+
+/* MC_CMD_GET_BOOT_STATUS_OUT msgresponse */
+#define    MC_CMD_GET_BOOT_STATUS_OUT_LEN 8
+/* ?? */
+#define       MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_OFST 0
+#define       MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_LEN 4
+/* enum: indicates that the MC wasn't flash booted */
+#define          MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_NULL 0xdeadbeef
+#define       MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_OFST 4
+#define       MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_LEN 4
+#define        MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_OFST 4
+#define        MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_LBN 0
+#define        MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_WIDTH 1
+#define        MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_PRIMARY_OFST 4
+#define        MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_PRIMARY_LBN 1
+#define        MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_PRIMARY_WIDTH 1
+#define        MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_BACKUP_OFST 4
+#define        MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_BACKUP_LBN 2
+#define        MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_BACKUP_WIDTH 1
+
+
+/***********************************/
+/* MC_CMD_GET_ASSERTS
+ * Get (and optionally clear) the current assertion status. Only
+ * OUT.GLOBAL_FLAGS is guaranteed to exist in the completion payload. The other
+ * fields will only be present if OUT.GLOBAL_FLAGS != NO_FAILS
+ */
+#define MC_CMD_GET_ASSERTS 0x6
+#undef MC_CMD_0x6_PRIVILEGE_CTG
+
+#define MC_CMD_0x6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_GET_ASSERTS_IN msgrequest */
+#define    MC_CMD_GET_ASSERTS_IN_LEN 4
+/* Set to clear assertion */
+#define       MC_CMD_GET_ASSERTS_IN_CLEAR_OFST 0
+#define       MC_CMD_GET_ASSERTS_IN_CLEAR_LEN 4
+
+/* MC_CMD_GET_ASSERTS_OUT msgresponse */
+#define    MC_CMD_GET_ASSERTS_OUT_LEN 140
+/* Assertion status flag. */
+#define       MC_CMD_GET_ASSERTS_OUT_GLOBAL_FLAGS_OFST 0
+#define       MC_CMD_GET_ASSERTS_OUT_GLOBAL_FLAGS_LEN 4
+/* enum: No assertions have failed. */
+#define          MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS 0x1
+/* enum: A system-level assertion has failed. */
+#define          MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL 0x2
+/* enum: A thread-level assertion has failed. */
+#define          MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL 0x3
+/* enum: The system was reset by the watchdog. */
+#define          MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED 0x4
+/* enum: An illegal address trap stopped the system (huntington and later) */
+#define          MC_CMD_GET_ASSERTS_FLAGS_ADDR_TRAP 0x5
+/* Failing PC value */
+#define       MC_CMD_GET_ASSERTS_OUT_SAVED_PC_OFFS_OFST 4
+#define       MC_CMD_GET_ASSERTS_OUT_SAVED_PC_OFFS_LEN 4
+/* Saved GP regs */
+#define       MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST 8
+#define       MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_LEN 4
+#define       MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM 31
+/* enum: A magic value hinting that the value in this register at the time of
+ * the failure has likely been lost.
+ */
+#define          MC_CMD_GET_ASSERTS_REG_NO_DATA 0xda7a1057
+/* Failing thread address */
+#define       MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_OFST 132
+#define       MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_LEN 4
+#define       MC_CMD_GET_ASSERTS_OUT_RESERVED_OFST 136
+#define       MC_CMD_GET_ASSERTS_OUT_RESERVED_LEN 4
+
+/* MC_CMD_GET_ASSERTS_OUT_V2 msgresponse: Extended response for MicroBlaze CPUs
+ * found on Riverhead designs
+ */
+#define    MC_CMD_GET_ASSERTS_OUT_V2_LEN 240
+/* Assertion status flag. */
+#define       MC_CMD_GET_ASSERTS_OUT_V2_GLOBAL_FLAGS_OFST 0
+#define       MC_CMD_GET_ASSERTS_OUT_V2_GLOBAL_FLAGS_LEN 4
+/* enum: No assertions have failed. */
+/*               MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS 0x1 */
+/* enum: A system-level assertion has failed. */
+/*               MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL 0x2 */
+/* enum: A thread-level assertion has failed. */
+/*               MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL 0x3 */
+/* enum: The system was reset by the watchdog. */
+/*               MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED 0x4 */
+/* enum: An illegal address trap stopped the system (huntington and later) */
+/*               MC_CMD_GET_ASSERTS_FLAGS_ADDR_TRAP 0x5 */
+/* Failing PC value */
+#define       MC_CMD_GET_ASSERTS_OUT_V2_SAVED_PC_OFFS_OFST 4
+#define       MC_CMD_GET_ASSERTS_OUT_V2_SAVED_PC_OFFS_LEN 4
+/* Saved GP regs */
+#define       MC_CMD_GET_ASSERTS_OUT_V2_GP_REGS_OFFS_OFST 8
+#define       MC_CMD_GET_ASSERTS_OUT_V2_GP_REGS_OFFS_LEN 4
+#define       MC_CMD_GET_ASSERTS_OUT_V2_GP_REGS_OFFS_NUM 31
+/* enum: A magic value hinting that the value in this register at the time of
+ * the failure has likely been lost.
+ */
+/*               MC_CMD_GET_ASSERTS_REG_NO_DATA 0xda7a1057 */
+/* Failing thread address */
+#define       MC_CMD_GET_ASSERTS_OUT_V2_THREAD_OFFS_OFST 132
+#define       MC_CMD_GET_ASSERTS_OUT_V2_THREAD_OFFS_LEN 4
+#define       MC_CMD_GET_ASSERTS_OUT_V2_RESERVED_OFST 136
+#define       MC_CMD_GET_ASSERTS_OUT_V2_RESERVED_LEN 4
+/* Saved Special Function Registers */
+#define       MC_CMD_GET_ASSERTS_OUT_V2_SF_REGS_OFFS_OFST 136
+#define       MC_CMD_GET_ASSERTS_OUT_V2_SF_REGS_OFFS_LEN 4
+#define       MC_CMD_GET_ASSERTS_OUT_V2_SF_REGS_OFFS_NUM 26
+
+/* MC_CMD_GET_ASSERTS_OUT_V3 msgresponse: Extended response with asserted
+ * firmware version information
+ */
+#define    MC_CMD_GET_ASSERTS_OUT_V3_LEN 360
+/* Assertion status flag. */
+#define       MC_CMD_GET_ASSERTS_OUT_V3_GLOBAL_FLAGS_OFST 0
+#define       MC_CMD_GET_ASSERTS_OUT_V3_GLOBAL_FLAGS_LEN 4
+/* enum: No assertions have failed. */
+/*               MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS 0x1 */
+/* enum: A system-level assertion has failed. */
+/*               MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL 0x2 */
+/* enum: A thread-level assertion has failed. */
+/*               MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL 0x3 */
+/* enum: The system was reset by the watchdog. */
+/*               MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED 0x4 */
+/* enum: An illegal address trap stopped the system (huntington and later) */
+/*               MC_CMD_GET_ASSERTS_FLAGS_ADDR_TRAP 0x5 */
+/* Failing PC value */
+#define       MC_CMD_GET_ASSERTS_OUT_V3_SAVED_PC_OFFS_OFST 4
+#define       MC_CMD_GET_ASSERTS_OUT_V3_SAVED_PC_OFFS_LEN 4
+/* Saved GP regs */
+#define       MC_CMD_GET_ASSERTS_OUT_V3_GP_REGS_OFFS_OFST 8
+#define       MC_CMD_GET_ASSERTS_OUT_V3_GP_REGS_OFFS_LEN 4
+#define       MC_CMD_GET_ASSERTS_OUT_V3_GP_REGS_OFFS_NUM 31
+/* enum: A magic value hinting that the value in this register at the time of
+ * the failure has likely been lost.
+ */
+/*               MC_CMD_GET_ASSERTS_REG_NO_DATA 0xda7a1057 */
+/* Failing thread address */
+#define       MC_CMD_GET_ASSERTS_OUT_V3_THREAD_OFFS_OFST 132
+#define       MC_CMD_GET_ASSERTS_OUT_V3_THREAD_OFFS_LEN 4
+#define       MC_CMD_GET_ASSERTS_OUT_V3_RESERVED_OFST 136
+#define       MC_CMD_GET_ASSERTS_OUT_V3_RESERVED_LEN 4
+/* Saved Special Function Registers */
+#define       MC_CMD_GET_ASSERTS_OUT_V3_SF_REGS_OFFS_OFST 136
+#define       MC_CMD_GET_ASSERTS_OUT_V3_SF_REGS_OFFS_LEN 4
+#define       MC_CMD_GET_ASSERTS_OUT_V3_SF_REGS_OFFS_NUM 26
+/* MC firmware unique build ID (as binary SHA-1 value) */
+#define       MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_ID_OFST 240
+#define       MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_ID_LEN 20
+/* MC firmware build date (as Unix timestamp) */
+#define       MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_OFST 260
+#define       MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_LEN 8
+#define       MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_LO_OFST 260
+#define       MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_TIMESTAMP_HI_OFST 264
+/* MC firmware version number */
+#define       MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_OFST 268
+#define       MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_LEN 8
+#define       MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_LO_OFST 268
+#define       MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_VERSION_HI_OFST 272
+/* MC firmware security level */
+#define       MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_SECURITY_LEVEL_OFST 276
+#define       MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_SECURITY_LEVEL_LEN 4
+/* MC firmware extra version info (as null-terminated US-ASCII string) */
+#define       MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_EXTRA_INFO_OFST 280
+#define       MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_EXTRA_INFO_LEN 16
+/* MC firmware build name (as null-terminated US-ASCII string) */
+#define       MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_NAME_OFST 296
+#define       MC_CMD_GET_ASSERTS_OUT_V3_MC_FW_BUILD_NAME_LEN 64
+
+
+/***********************************/
+/* MC_CMD_LOG_CTRL
+ * Configure the output stream for log events such as link state changes,
+ * sensor notifications and MCDI completions
+ */
+#define MC_CMD_LOG_CTRL 0x7
+#undef MC_CMD_0x7_PRIVILEGE_CTG
+
+#define MC_CMD_0x7_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LOG_CTRL_IN msgrequest */
+#define    MC_CMD_LOG_CTRL_IN_LEN 8
+/* Log destination */
+#define       MC_CMD_LOG_CTRL_IN_LOG_DEST_OFST 0
+#define       MC_CMD_LOG_CTRL_IN_LOG_DEST_LEN 4
+/* enum: UART. */
+#define          MC_CMD_LOG_CTRL_IN_LOG_DEST_UART 0x1
+/* enum: Event queue. */
+#define          MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ 0x2
+/* Legacy argument. Must be zero. */
+#define       MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_OFST 4
+#define       MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_LEN 4
+
+/* MC_CMD_LOG_CTRL_OUT msgresponse */
+#define    MC_CMD_LOG_CTRL_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_VERSION
+ * Get version information about adapter components.
+ */
+#define MC_CMD_GET_VERSION 0x8
+#undef MC_CMD_0x8_PRIVILEGE_CTG
+
+#define MC_CMD_0x8_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_VERSION_IN msgrequest */
+#define    MC_CMD_GET_VERSION_IN_LEN 0
+
+/* MC_CMD_GET_VERSION_EXT_IN msgrequest: Asks for the extended version */
+#define    MC_CMD_GET_VERSION_EXT_IN_LEN 4
+/* placeholder, set to 0 */
+#define       MC_CMD_GET_VERSION_EXT_IN_EXT_FLAGS_OFST 0
+#define       MC_CMD_GET_VERSION_EXT_IN_EXT_FLAGS_LEN 4
+
+/* MC_CMD_GET_VERSION_V0_OUT msgresponse: deprecated version format */
+#define    MC_CMD_GET_VERSION_V0_OUT_LEN 4
+#define       MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0
+#define       MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4
+/* enum: Reserved version number to indicate "any" version. */
+#define          MC_CMD_GET_VERSION_OUT_FIRMWARE_ANY 0xffffffff
+/* enum: Bootrom version value for Siena. */
+#define          MC_CMD_GET_VERSION_OUT_FIRMWARE_SIENA_BOOTROM 0xb0070000
+/* enum: Bootrom version value for Huntington. */
+#define          MC_CMD_GET_VERSION_OUT_FIRMWARE_HUNT_BOOTROM 0xb0070001
+/* enum: Bootrom version value for Medford2. */
+#define          MC_CMD_GET_VERSION_OUT_FIRMWARE_MEDFORD2_BOOTROM 0xb0070002
+
+/* MC_CMD_GET_VERSION_OUT msgresponse */
+#define    MC_CMD_GET_VERSION_OUT_LEN 32
+/*            MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
+/*            MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
+/*            Enum values, see field(s): */
+/*               MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */
+#define       MC_CMD_GET_VERSION_OUT_PCOL_OFST 4
+#define       MC_CMD_GET_VERSION_OUT_PCOL_LEN 4
+/* 128bit mask of functions supported by the current firmware */
+#define       MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_OFST 8
+#define       MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_LEN 16
+#define       MC_CMD_GET_VERSION_OUT_VERSION_OFST 24
+#define       MC_CMD_GET_VERSION_OUT_VERSION_LEN 8
+#define       MC_CMD_GET_VERSION_OUT_VERSION_LO_OFST 24
+#define       MC_CMD_GET_VERSION_OUT_VERSION_HI_OFST 28
+
+/* MC_CMD_GET_VERSION_EXT_OUT msgresponse */
+#define    MC_CMD_GET_VERSION_EXT_OUT_LEN 48
+/*            MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
+/*            MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
+/*            Enum values, see field(s): */
+/*               MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */
+#define       MC_CMD_GET_VERSION_EXT_OUT_PCOL_OFST 4
+#define       MC_CMD_GET_VERSION_EXT_OUT_PCOL_LEN 4
+/* 128bit mask of functions supported by the current firmware */
+#define       MC_CMD_GET_VERSION_EXT_OUT_SUPPORTED_FUNCS_OFST 8
+#define       MC_CMD_GET_VERSION_EXT_OUT_SUPPORTED_FUNCS_LEN 16
+#define       MC_CMD_GET_VERSION_EXT_OUT_VERSION_OFST 24
+#define       MC_CMD_GET_VERSION_EXT_OUT_VERSION_LEN 8
+#define       MC_CMD_GET_VERSION_EXT_OUT_VERSION_LO_OFST 24
+#define       MC_CMD_GET_VERSION_EXT_OUT_VERSION_HI_OFST 28
+/* extra info */
+#define       MC_CMD_GET_VERSION_EXT_OUT_EXTRA_OFST 32
+#define       MC_CMD_GET_VERSION_EXT_OUT_EXTRA_LEN 16
+
+/* MC_CMD_GET_VERSION_V2_OUT msgresponse: Extended response providing version
+ * information for all adapter components. For Riverhead based designs, base MC
+ * firmware version fields refer to NMC firmware, while CMC firmware data is in
+ * dedicated CMC fields. Flags indicate which data is present in the response
+ * (depending on which components exist on a particular adapter)
+ */
+#define    MC_CMD_GET_VERSION_V2_OUT_LEN 304
+/*            MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
+/*            MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN 4 */
+/*            Enum values, see field(s): */
+/*               MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */
+#define       MC_CMD_GET_VERSION_V2_OUT_PCOL_OFST 4
+#define       MC_CMD_GET_VERSION_V2_OUT_PCOL_LEN 4
+/* 128bit mask of functions supported by the current firmware */
+#define       MC_CMD_GET_VERSION_V2_OUT_SUPPORTED_FUNCS_OFST 8
+#define       MC_CMD_GET_VERSION_V2_OUT_SUPPORTED_FUNCS_LEN 16
+#define       MC_CMD_GET_VERSION_V2_OUT_VERSION_OFST 24
+#define       MC_CMD_GET_VERSION_V2_OUT_VERSION_LEN 8
+#define       MC_CMD_GET_VERSION_V2_OUT_VERSION_LO_OFST 24
+#define       MC_CMD_GET_VERSION_V2_OUT_VERSION_HI_OFST 28
+/* extra info */
+#define       MC_CMD_GET_VERSION_V2_OUT_EXTRA_OFST 32
+#define       MC_CMD_GET_VERSION_V2_OUT_EXTRA_LEN 16
+/* Flags indicating which extended fields are valid */
+#define       MC_CMD_GET_VERSION_V2_OUT_FLAGS_OFST 48
+#define       MC_CMD_GET_VERSION_V2_OUT_FLAGS_LEN 4
+#define        MC_CMD_GET_VERSION_V2_OUT_MCFW_EXT_INFO_PRESENT_OFST 48
+#define        MC_CMD_GET_VERSION_V2_OUT_MCFW_EXT_INFO_PRESENT_LBN 0
+#define        MC_CMD_GET_VERSION_V2_OUT_MCFW_EXT_INFO_PRESENT_WIDTH 1
+#define        MC_CMD_GET_VERSION_V2_OUT_SUCFW_EXT_INFO_PRESENT_OFST 48
+#define        MC_CMD_GET_VERSION_V2_OUT_SUCFW_EXT_INFO_PRESENT_LBN 1
+#define        MC_CMD_GET_VERSION_V2_OUT_SUCFW_EXT_INFO_PRESENT_WIDTH 1
+#define        MC_CMD_GET_VERSION_V2_OUT_CMC_EXT_INFO_PRESENT_OFST 48
+#define        MC_CMD_GET_VERSION_V2_OUT_CMC_EXT_INFO_PRESENT_LBN 2
+#define        MC_CMD_GET_VERSION_V2_OUT_CMC_EXT_INFO_PRESENT_WIDTH 1
+#define        MC_CMD_GET_VERSION_V2_OUT_FPGA_EXT_INFO_PRESENT_OFST 48
+#define        MC_CMD_GET_VERSION_V2_OUT_FPGA_EXT_INFO_PRESENT_LBN 3
+#define        MC_CMD_GET_VERSION_V2_OUT_FPGA_EXT_INFO_PRESENT_WIDTH 1
+#define        MC_CMD_GET_VERSION_V2_OUT_BOARD_EXT_INFO_PRESENT_OFST 48
+#define        MC_CMD_GET_VERSION_V2_OUT_BOARD_EXT_INFO_PRESENT_LBN 4
+#define        MC_CMD_GET_VERSION_V2_OUT_BOARD_EXT_INFO_PRESENT_WIDTH 1
+/* MC firmware unique build ID (as binary SHA-1 value) */
+#define       MC_CMD_GET_VERSION_V2_OUT_MCFW_BUILD_ID_OFST 52
+#define       MC_CMD_GET_VERSION_V2_OUT_MCFW_BUILD_ID_LEN 20
+/* MC firmware security level */
+#define       MC_CMD_GET_VERSION_V2_OUT_MCFW_SECURITY_LEVEL_OFST 72
+#define       MC_CMD_GET_VERSION_V2_OUT_MCFW_SECURITY_LEVEL_LEN 4
+/* MC firmware build name (as null-terminated US-ASCII string) */
+#define       MC_CMD_GET_VERSION_V2_OUT_MCFW_BUILD_NAME_OFST 76
+#define       MC_CMD_GET_VERSION_V2_OUT_MCFW_BUILD_NAME_LEN 64
+/* The SUC firmware version as four numbers - a.b.c.d */
+#define       MC_CMD_GET_VERSION_V2_OUT_SUCFW_VERSION_OFST 140
+#define       MC_CMD_GET_VERSION_V2_OUT_SUCFW_VERSION_LEN 4
+#define       MC_CMD_GET_VERSION_V2_OUT_SUCFW_VERSION_NUM 4
+/* SUC firmware build date (as 64-bit Unix timestamp) */
+#define       MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_OFST 156
+#define       MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_LEN 8
+#define       MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_LO_OFST 156
+#define       MC_CMD_GET_VERSION_V2_OUT_SUCFW_BUILD_DATE_HI_OFST 160
+/* The ID of the SUC chip. This is specific to the platform but typically
+ * indicates family, memory sizes etc. See SF-116728-SW for further details.
+ */
+#define       MC_CMD_GET_VERSION_V2_OUT_SUCFW_CHIP_ID_OFST 164
+#define       MC_CMD_GET_VERSION_V2_OUT_SUCFW_CHIP_ID_LEN 4
+/* The CMC firmware version as four numbers - a.b.c.d */
+#define       MC_CMD_GET_VERSION_V2_OUT_CMCFW_VERSION_OFST 168
+#define       MC_CMD_GET_VERSION_V2_OUT_CMCFW_VERSION_LEN 4
+#define       MC_CMD_GET_VERSION_V2_OUT_CMCFW_VERSION_NUM 4
+/* CMC firmware build date (as 64-bit Unix timestamp) */
+#define       MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_OFST 184
+#define       MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_LEN 8
+#define       MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_LO_OFST 184
+#define       MC_CMD_GET_VERSION_V2_OUT_CMCFW_BUILD_DATE_HI_OFST 188
+/* FPGA version as three numbers. On Riverhead based systems this field uses
+ * the same encoding as hardware version ID registers (MC_FPGA_BUILD_HWRD_REG):
+ * FPGA_VERSION[0]: x => Image H{x} FPGA_VERSION[1]: Revision letter (0 => A, 1
+ * => B, ...) FPGA_VERSION[2]: Sub-revision number
+ */
+#define       MC_CMD_GET_VERSION_V2_OUT_FPGA_VERSION_OFST 192
+#define       MC_CMD_GET_VERSION_V2_OUT_FPGA_VERSION_LEN 4
+#define       MC_CMD_GET_VERSION_V2_OUT_FPGA_VERSION_NUM 3
+/* Extra FPGA revision information (as null-terminated US-ASCII string) */
+#define       MC_CMD_GET_VERSION_V2_OUT_FPGA_EXTRA_OFST 204
+#define       MC_CMD_GET_VERSION_V2_OUT_FPGA_EXTRA_LEN 16
+/* Board name / adapter model (as null-terminated US-ASCII string) */
+#define       MC_CMD_GET_VERSION_V2_OUT_BOARD_NAME_OFST 220
+#define       MC_CMD_GET_VERSION_V2_OUT_BOARD_NAME_LEN 16
+/* Board revision number */
+#define       MC_CMD_GET_VERSION_V2_OUT_BOARD_REVISION_OFST 236
+#define       MC_CMD_GET_VERSION_V2_OUT_BOARD_REVISION_LEN 4
+/* Board serial number (as null-terminated US-ASCII string) */
+#define       MC_CMD_GET_VERSION_V2_OUT_BOARD_SERIAL_OFST 240
+#define       MC_CMD_GET_VERSION_V2_OUT_BOARD_SERIAL_LEN 64
+
+
+/***********************************/
+/* MC_CMD_PTP
+ * Perform PTP operation
+ */
+#define MC_CMD_PTP 0xb
+#undef MC_CMD_0xb_PRIVILEGE_CTG
+
+#define MC_CMD_0xb_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_PTP_IN msgrequest */
+#define    MC_CMD_PTP_IN_LEN 1
+/* PTP operation code */
+#define       MC_CMD_PTP_IN_OP_OFST 0
+#define       MC_CMD_PTP_IN_OP_LEN 1
+/* enum: Enable PTP packet timestamping operation. */
+#define          MC_CMD_PTP_OP_ENABLE 0x1
+/* enum: Disable PTP packet timestamping operation. */
+#define          MC_CMD_PTP_OP_DISABLE 0x2
+/* enum: Send a PTP packet. This operation is used on Siena and Huntington.
+ * From Medford onwards it is not supported: on those platforms PTP transmit
+ * timestamping is done using the fast path.
+ */
+#define          MC_CMD_PTP_OP_TRANSMIT 0x3
+/* enum: Read the current NIC time. */
+#define          MC_CMD_PTP_OP_READ_NIC_TIME 0x4
+/* enum: Get the current PTP status. Note that the clock frequency returned (in
+ * Hz) is rounded to the nearest MHz (e.g. 666000000 for 666666666).
+ */
+#define          MC_CMD_PTP_OP_STATUS 0x5
+/* enum: Adjust the PTP NIC's time. */
+#define          MC_CMD_PTP_OP_ADJUST 0x6
+/* enum: Synchronize host and NIC time. */
+#define          MC_CMD_PTP_OP_SYNCHRONIZE 0x7
+/* enum: Basic manufacturing tests. Siena PTP adapters only. */
+#define          MC_CMD_PTP_OP_MANFTEST_BASIC 0x8
+/* enum: Packet based manufacturing tests. Siena PTP adapters only. */
+#define          MC_CMD_PTP_OP_MANFTEST_PACKET 0x9
+/* enum: Reset some of the PTP related statistics */
+#define          MC_CMD_PTP_OP_RESET_STATS 0xa
+/* enum: Debug operations to MC. */
+#define          MC_CMD_PTP_OP_DEBUG 0xb
+/* enum: Read an FPGA register. Siena PTP adapters only. */
+#define          MC_CMD_PTP_OP_FPGAREAD 0xc
+/* enum: Write an FPGA register. Siena PTP adapters only. */
+#define          MC_CMD_PTP_OP_FPGAWRITE 0xd
+/* enum: Apply an offset to the NIC clock */
+#define          MC_CMD_PTP_OP_CLOCK_OFFSET_ADJUST 0xe
+/* enum: Change the frequency correction applied to the NIC clock */
+#define          MC_CMD_PTP_OP_CLOCK_FREQ_ADJUST 0xf
+/* enum: Set the MC packet filter VLAN tags for received PTP packets.
+ * Deprecated for Huntington onwards.
+ */
+#define          MC_CMD_PTP_OP_RX_SET_VLAN_FILTER 0x10
+/* enum: Set the MC packet filter UUID for received PTP packets. Deprecated for
+ * Huntington onwards.
+ */
+#define          MC_CMD_PTP_OP_RX_SET_UUID_FILTER 0x11
+/* enum: Set the MC packet filter Domain for received PTP packets. Deprecated
+ * for Huntington onwards.
+ */
+#define          MC_CMD_PTP_OP_RX_SET_DOMAIN_FILTER 0x12
+/* enum: Set the clock source. Required for snapper tests on Huntington and
+ * Medford. Not implemented for Siena or Medford2.
+ */
+#define          MC_CMD_PTP_OP_SET_CLK_SRC 0x13
+/* enum: Reset value of Timer Reg. Not implemented. */
+#define          MC_CMD_PTP_OP_RST_CLK 0x14
+/* enum: Enable the forwarding of PPS events to the host */
+#define          MC_CMD_PTP_OP_PPS_ENABLE 0x15
+/* enum: Get the time format used by this NIC for PTP operations */
+#define          MC_CMD_PTP_OP_GET_TIME_FORMAT 0x16
+/* enum: Get the clock attributes. NOTE- extended version of
+ * MC_CMD_PTP_OP_GET_TIME_FORMAT
+ */
+#define          MC_CMD_PTP_OP_GET_ATTRIBUTES 0x16
+/* enum: Get corrections that should be applied to the various different
+ * timestamps
+ */
+#define          MC_CMD_PTP_OP_GET_TIMESTAMP_CORRECTIONS 0x17
+/* enum: Subscribe to receive periodic time events indicating the current NIC
+ * time
+ */
+#define          MC_CMD_PTP_OP_TIME_EVENT_SUBSCRIBE 0x18
+/* enum: Unsubscribe to stop receiving time events */
+#define          MC_CMD_PTP_OP_TIME_EVENT_UNSUBSCRIBE 0x19
+/* enum: PPS based manfacturing tests. Requires PPS output to be looped to PPS
+ * input on the same NIC. Siena PTP adapters only.
+ */
+#define          MC_CMD_PTP_OP_MANFTEST_PPS 0x1a
+/* enum: Set the PTP sync status. Status is used by firmware to report to event
+ * subscribers.
+ */
+#define          MC_CMD_PTP_OP_SET_SYNC_STATUS 0x1b
+/* enum: Above this for future use. */
+#define          MC_CMD_PTP_OP_MAX 0x1c
+
+/* MC_CMD_PTP_IN_ENABLE msgrequest */
+#define    MC_CMD_PTP_IN_ENABLE_LEN 16
+#define       MC_CMD_PTP_IN_CMD_OFST 0
+#define       MC_CMD_PTP_IN_CMD_LEN 4
+#define       MC_CMD_PTP_IN_PERIPH_ID_OFST 4
+#define       MC_CMD_PTP_IN_PERIPH_ID_LEN 4
+/* Not used. Events are always sent to function relative queue 0. */
+#define       MC_CMD_PTP_IN_ENABLE_QUEUE_OFST 8
+#define       MC_CMD_PTP_IN_ENABLE_QUEUE_LEN 4
+/* PTP timestamping mode. Not used from Huntington onwards. */
+#define       MC_CMD_PTP_IN_ENABLE_MODE_OFST 12
+#define       MC_CMD_PTP_IN_ENABLE_MODE_LEN 4
+/* enum: PTP, version 1 */
+#define          MC_CMD_PTP_MODE_V1 0x0
+/* enum: PTP, version 1, with VLAN headers - deprecated */
+#define          MC_CMD_PTP_MODE_V1_VLAN 0x1
+/* enum: PTP, version 2 */
+#define          MC_CMD_PTP_MODE_V2 0x2
+/* enum: PTP, version 2, with VLAN headers - deprecated */
+#define          MC_CMD_PTP_MODE_V2_VLAN 0x3
+/* enum: PTP, version 2, with improved UUID filtering */
+#define          MC_CMD_PTP_MODE_V2_ENHANCED 0x4
+/* enum: FCoE (seconds and microseconds) */
+#define          MC_CMD_PTP_MODE_FCOE 0x5
+
+/* MC_CMD_PTP_IN_DISABLE msgrequest */
+#define    MC_CMD_PTP_IN_DISABLE_LEN 8
+/*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+
+/* MC_CMD_PTP_IN_TRANSMIT msgrequest */
+#define    MC_CMD_PTP_IN_TRANSMIT_LENMIN 13
+#define    MC_CMD_PTP_IN_TRANSMIT_LENMAX 252
+#define    MC_CMD_PTP_IN_TRANSMIT_LENMAX_MCDI2 1020
+#define    MC_CMD_PTP_IN_TRANSMIT_LEN(num) (12+1*(num))
+#define    MC_CMD_PTP_IN_TRANSMIT_PACKET_NUM(len) (((len)-12)/1)
+/*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Transmit packet length */
+#define       MC_CMD_PTP_IN_TRANSMIT_LENGTH_OFST 8
+#define       MC_CMD_PTP_IN_TRANSMIT_LENGTH_LEN 4
+/* Transmit packet data */
+#define       MC_CMD_PTP_IN_TRANSMIT_PACKET_OFST 12
+#define       MC_CMD_PTP_IN_TRANSMIT_PACKET_LEN 1
+#define       MC_CMD_PTP_IN_TRANSMIT_PACKET_MINNUM 1
+#define       MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM 240
+#define       MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM_MCDI2 1008
+
+/* MC_CMD_PTP_IN_READ_NIC_TIME msgrequest */
+#define    MC_CMD_PTP_IN_READ_NIC_TIME_LEN 8
+/*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+
+/* MC_CMD_PTP_IN_READ_NIC_TIME_V2 msgrequest */
+#define    MC_CMD_PTP_IN_READ_NIC_TIME_V2_LEN 8
+/*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+
+/* MC_CMD_PTP_IN_STATUS msgrequest */
+#define    MC_CMD_PTP_IN_STATUS_LEN 8
+/*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+
+/* MC_CMD_PTP_IN_ADJUST msgrequest */
+#define    MC_CMD_PTP_IN_ADJUST_LEN 24
+/*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Frequency adjustment 40 bit fixed point ns */
+#define       MC_CMD_PTP_IN_ADJUST_FREQ_OFST 8
+#define       MC_CMD_PTP_IN_ADJUST_FREQ_LEN 8
+#define       MC_CMD_PTP_IN_ADJUST_FREQ_LO_OFST 8
+#define       MC_CMD_PTP_IN_ADJUST_FREQ_HI_OFST 12
+/* enum: Number of fractional bits in frequency adjustment */
+#define          MC_CMD_PTP_IN_ADJUST_BITS 0x28
+/* enum: Number of fractional bits in frequency adjustment when FP44_FREQ_ADJ
+ * is indicated in the MC_CMD_PTP_OUT_GET_ATTRIBUTES command CAPABILITIES
+ * field.
+ */
+#define          MC_CMD_PTP_IN_ADJUST_BITS_FP44 0x2c
+/* Time adjustment in seconds */
+#define       MC_CMD_PTP_IN_ADJUST_SECONDS_OFST 16
+#define       MC_CMD_PTP_IN_ADJUST_SECONDS_LEN 4
+/* Time adjustment major value */
+#define       MC_CMD_PTP_IN_ADJUST_MAJOR_OFST 16
+#define       MC_CMD_PTP_IN_ADJUST_MAJOR_LEN 4
+/* Time adjustment in nanoseconds */
+#define       MC_CMD_PTP_IN_ADJUST_NANOSECONDS_OFST 20
+#define       MC_CMD_PTP_IN_ADJUST_NANOSECONDS_LEN 4
+/* Time adjustment minor value */
+#define       MC_CMD_PTP_IN_ADJUST_MINOR_OFST 20
+#define       MC_CMD_PTP_IN_ADJUST_MINOR_LEN 4
+
+/* MC_CMD_PTP_IN_ADJUST_V2 msgrequest */
+#define    MC_CMD_PTP_IN_ADJUST_V2_LEN 28
+/*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Frequency adjustment 40 bit fixed point ns */
+#define       MC_CMD_PTP_IN_ADJUST_V2_FREQ_OFST 8
+#define       MC_CMD_PTP_IN_ADJUST_V2_FREQ_LEN 8
+#define       MC_CMD_PTP_IN_ADJUST_V2_FREQ_LO_OFST 8
+#define       MC_CMD_PTP_IN_ADJUST_V2_FREQ_HI_OFST 12
+/* enum: Number of fractional bits in frequency adjustment */
+/*               MC_CMD_PTP_IN_ADJUST_BITS 0x28 */
+/* enum: Number of fractional bits in frequency adjustment when FP44_FREQ_ADJ
+ * is indicated in the MC_CMD_PTP_OUT_GET_ATTRIBUTES command CAPABILITIES
+ * field.
+ */
+/*               MC_CMD_PTP_IN_ADJUST_BITS_FP44 0x2c */
+/* Time adjustment in seconds */
+#define       MC_CMD_PTP_IN_ADJUST_V2_SECONDS_OFST 16
+#define       MC_CMD_PTP_IN_ADJUST_V2_SECONDS_LEN 4
+/* Time adjustment major value */
+#define       MC_CMD_PTP_IN_ADJUST_V2_MAJOR_OFST 16
+#define       MC_CMD_PTP_IN_ADJUST_V2_MAJOR_LEN 4
+/* Time adjustment in nanoseconds */
+#define       MC_CMD_PTP_IN_ADJUST_V2_NANOSECONDS_OFST 20
+#define       MC_CMD_PTP_IN_ADJUST_V2_NANOSECONDS_LEN 4
+/* Time adjustment minor value */
+#define       MC_CMD_PTP_IN_ADJUST_V2_MINOR_OFST 20
+#define       MC_CMD_PTP_IN_ADJUST_V2_MINOR_LEN 4
+/* Upper 32bits of major time offset adjustment */
+#define       MC_CMD_PTP_IN_ADJUST_V2_MAJOR_HI_OFST 24
+#define       MC_CMD_PTP_IN_ADJUST_V2_MAJOR_HI_LEN 4
+
+/* MC_CMD_PTP_IN_SYNCHRONIZE msgrequest */
+#define    MC_CMD_PTP_IN_SYNCHRONIZE_LEN 20
+/*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Number of time readings to capture */
+#define       MC_CMD_PTP_IN_SYNCHRONIZE_NUMTIMESETS_OFST 8
+#define       MC_CMD_PTP_IN_SYNCHRONIZE_NUMTIMESETS_LEN 4
+/* Host address in which to write "synchronization started" indication (64
+ * bits)
+ */
+#define       MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_OFST 12
+#define       MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_LEN 8
+#define       MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_LO_OFST 12
+#define       MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_HI_OFST 16
+
+/* MC_CMD_PTP_IN_MANFTEST_BASIC msgrequest */
+#define    MC_CMD_PTP_IN_MANFTEST_BASIC_LEN 8
+/*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+
+/* MC_CMD_PTP_IN_MANFTEST_PACKET msgrequest */
+#define    MC_CMD_PTP_IN_MANFTEST_PACKET_LEN 12
+/*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Enable or disable packet testing */
+#define       MC_CMD_PTP_IN_MANFTEST_PACKET_TEST_ENABLE_OFST 8
+#define       MC_CMD_PTP_IN_MANFTEST_PACKET_TEST_ENABLE_LEN 4
+
+/* MC_CMD_PTP_IN_RESET_STATS msgrequest: Reset PTP statistics */
+#define    MC_CMD_PTP_IN_RESET_STATS_LEN 8
+/*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+
+/* MC_CMD_PTP_IN_DEBUG msgrequest */
+#define    MC_CMD_PTP_IN_DEBUG_LEN 12
+/*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Debug operations */
+#define       MC_CMD_PTP_IN_DEBUG_DEBUG_PARAM_OFST 8
+#define       MC_CMD_PTP_IN_DEBUG_DEBUG_PARAM_LEN 4
+
+/* MC_CMD_PTP_IN_FPGAREAD msgrequest */
+#define    MC_CMD_PTP_IN_FPGAREAD_LEN 16
+/*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+#define       MC_CMD_PTP_IN_FPGAREAD_ADDR_OFST 8
+#define       MC_CMD_PTP_IN_FPGAREAD_ADDR_LEN 4
+#define       MC_CMD_PTP_IN_FPGAREAD_NUMBYTES_OFST 12
+#define       MC_CMD_PTP_IN_FPGAREAD_NUMBYTES_LEN 4
+
+/* MC_CMD_PTP_IN_FPGAWRITE msgrequest */
+#define    MC_CMD_PTP_IN_FPGAWRITE_LENMIN 13
+#define    MC_CMD_PTP_IN_FPGAWRITE_LENMAX 252
+#define    MC_CMD_PTP_IN_FPGAWRITE_LENMAX_MCDI2 1020
+#define    MC_CMD_PTP_IN_FPGAWRITE_LEN(num) (12+1*(num))
+#define    MC_CMD_PTP_IN_FPGAWRITE_BUFFER_NUM(len) (((len)-12)/1)
+/*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+#define       MC_CMD_PTP_IN_FPGAWRITE_ADDR_OFST 8
+#define       MC_CMD_PTP_IN_FPGAWRITE_ADDR_LEN 4
+#define       MC_CMD_PTP_IN_FPGAWRITE_BUFFER_OFST 12
+#define       MC_CMD_PTP_IN_FPGAWRITE_BUFFER_LEN 1
+#define       MC_CMD_PTP_IN_FPGAWRITE_BUFFER_MINNUM 1
+#define       MC_CMD_PTP_IN_FPGAWRITE_BUFFER_MAXNUM 240
+#define       MC_CMD_PTP_IN_FPGAWRITE_BUFFER_MAXNUM_MCDI2 1008
+
+/* MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST msgrequest */
+#define    MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_LEN 16
+/*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Time adjustment in seconds */
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_SECONDS_OFST 8
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_SECONDS_LEN 4
+/* Time adjustment major value */
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MAJOR_OFST 8
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MAJOR_LEN 4
+/* Time adjustment in nanoseconds */
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_NANOSECONDS_OFST 12
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_NANOSECONDS_LEN 4
+/* Time adjustment minor value */
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MINOR_OFST 12
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_MINOR_LEN 4
+
+/* MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2 msgrequest */
+#define    MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_LEN 20
+/*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Time adjustment in seconds */
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_SECONDS_OFST 8
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_SECONDS_LEN 4
+/* Time adjustment major value */
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_OFST 8
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_LEN 4
+/* Time adjustment in nanoseconds */
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_NANOSECONDS_OFST 12
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_NANOSECONDS_LEN 4
+/* Time adjustment minor value */
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MINOR_OFST 12
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MINOR_LEN 4
+/* Upper 32bits of major time offset adjustment */
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_HI_OFST 16
+#define       MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_V2_MAJOR_HI_LEN 4
+
+/* MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST msgrequest */
+#define    MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_LEN 16
+/*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Frequency adjustment 40 bit fixed point ns */
+#define       MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_OFST 8
+#define       MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LEN 8
+#define       MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LO_OFST 8
+#define       MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_HI_OFST 12
+/*            Enum values, see field(s): */
+/*               MC_CMD_PTP/MC_CMD_PTP_IN_ADJUST/FREQ */
+
+/* MC_CMD_PTP_IN_RX_SET_VLAN_FILTER msgrequest */
+#define    MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_LEN 24
+/*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Number of VLAN tags, 0 if not VLAN */
+#define       MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_NUM_VLAN_TAGS_OFST 8
+#define       MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_NUM_VLAN_TAGS_LEN 4
+/* Set of VLAN tags to filter against */
+#define       MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_OFST 12
+#define       MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_LEN 4
+#define       MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_NUM 3
+
+/* MC_CMD_PTP_IN_RX_SET_UUID_FILTER msgrequest */
+#define    MC_CMD_PTP_IN_RX_SET_UUID_FILTER_LEN 20
+/*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* 1 to enable UUID filtering, 0 to disable */
+#define       MC_CMD_PTP_IN_RX_SET_UUID_FILTER_ENABLE_OFST 8
+#define       MC_CMD_PTP_IN_RX_SET_UUID_FILTER_ENABLE_LEN 4
+/* UUID to filter against */
+#define       MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_OFST 12
+#define       MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LEN 8
+#define       MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LO_OFST 12
+#define       MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_HI_OFST 16
+
+/* MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER msgrequest */
+#define    MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_LEN 16
+/*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* 1 to enable Domain filtering, 0 to disable */
+#define       MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_ENABLE_OFST 8
+#define       MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_ENABLE_LEN 4
+/* Domain number to filter against */
+#define       MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_DOMAIN_OFST 12
+#define       MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_DOMAIN_LEN 4
+
+/* MC_CMD_PTP_IN_SET_CLK_SRC msgrequest */
+#define    MC_CMD_PTP_IN_SET_CLK_SRC_LEN 12
+/*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Set the clock source. */
+#define       MC_CMD_PTP_IN_SET_CLK_SRC_CLK_OFST 8
+#define       MC_CMD_PTP_IN_SET_CLK_SRC_CLK_LEN 4
+/* enum: Internal. */
+#define          MC_CMD_PTP_CLK_SRC_INTERNAL 0x0
+/* enum: External. */
+#define          MC_CMD_PTP_CLK_SRC_EXTERNAL 0x1
+
+/* MC_CMD_PTP_IN_RST_CLK msgrequest: Reset value of Timer Reg. */
+#define    MC_CMD_PTP_IN_RST_CLK_LEN 8
+/*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+
+/* MC_CMD_PTP_IN_PPS_ENABLE msgrequest */
+#define    MC_CMD_PTP_IN_PPS_ENABLE_LEN 12
+/*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
+/* Enable or disable */
+#define       MC_CMD_PTP_IN_PPS_ENABLE_OP_OFST 4
+#define       MC_CMD_PTP_IN_PPS_ENABLE_OP_LEN 4
+/* enum: Enable */
+#define          MC_CMD_PTP_ENABLE_PPS 0x0
+/* enum: Disable */
+#define          MC_CMD_PTP_DISABLE_PPS 0x1
+/* Not used. Events are always sent to function relative queue 0. */
+#define       MC_CMD_PTP_IN_PPS_ENABLE_QUEUE_ID_OFST 8
+#define       MC_CMD_PTP_IN_PPS_ENABLE_QUEUE_ID_LEN 4
+
+/* MC_CMD_PTP_IN_GET_TIME_FORMAT msgrequest */
+#define    MC_CMD_PTP_IN_GET_TIME_FORMAT_LEN 8
+/*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+
+/* MC_CMD_PTP_IN_GET_ATTRIBUTES msgrequest */
+#define    MC_CMD_PTP_IN_GET_ATTRIBUTES_LEN 8
+/*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+
+/* MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS msgrequest */
+#define    MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS_LEN 8
+/*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+
+/* MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE msgrequest */
+#define    MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN 12
+/*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Original field containing queue ID. Now extended to include flags. */
+#define       MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_OFST 8
+#define       MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_LEN 4
+#define        MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_OFST 8
+#define        MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_LBN 0
+#define        MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE_ID_WIDTH 16
+#define        MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_REPORT_SYNC_STATUS_OFST 8
+#define        MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_REPORT_SYNC_STATUS_LBN 31
+#define        MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_REPORT_SYNC_STATUS_WIDTH 1
+
+/* MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE msgrequest */
+#define    MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN 16
+/*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* Unsubscribe options */
+#define       MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL_OFST 8
+#define       MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL_LEN 4
+/* enum: Unsubscribe a single queue */
+#define          MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_SINGLE 0x0
+/* enum: Unsubscribe all queues */
+#define          MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_ALL 0x1
+/* Event queue ID */
+#define       MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE_OFST 12
+#define       MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE_LEN 4
+
+/* MC_CMD_PTP_IN_MANFTEST_PPS msgrequest */
+#define    MC_CMD_PTP_IN_MANFTEST_PPS_LEN 12
+/*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* 1 to enable PPS test mode, 0 to disable and return result. */
+#define       MC_CMD_PTP_IN_MANFTEST_PPS_TEST_ENABLE_OFST 8
+#define       MC_CMD_PTP_IN_MANFTEST_PPS_TEST_ENABLE_LEN 4
+
+/* MC_CMD_PTP_IN_SET_SYNC_STATUS msgrequest */
+#define    MC_CMD_PTP_IN_SET_SYNC_STATUS_LEN 24
+/*            MC_CMD_PTP_IN_CMD_OFST 0 */
+/*            MC_CMD_PTP_IN_CMD_LEN 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/*            MC_CMD_PTP_IN_PERIPH_ID_LEN 4 */
+/* NIC - Host System Clock Synchronization status */
+#define       MC_CMD_PTP_IN_SET_SYNC_STATUS_STATUS_OFST 8
+#define       MC_CMD_PTP_IN_SET_SYNC_STATUS_STATUS_LEN 4
+/* enum: Host System clock and NIC clock are not in sync */
+#define          MC_CMD_PTP_IN_SET_SYNC_STATUS_NOT_IN_SYNC 0x0
+/* enum: Host System clock and NIC clock are synchronized */
+#define          MC_CMD_PTP_IN_SET_SYNC_STATUS_IN_SYNC 0x1
+/* If synchronized, number of seconds until clocks should be considered to be
+ * no longer in sync.
+ */
+#define       MC_CMD_PTP_IN_SET_SYNC_STATUS_TIMEOUT_OFST 12
+#define       MC_CMD_PTP_IN_SET_SYNC_STATUS_TIMEOUT_LEN 4
+#define       MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED0_OFST 16
+#define       MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED0_LEN 4
+#define       MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED1_OFST 20
+#define       MC_CMD_PTP_IN_SET_SYNC_STATUS_RESERVED1_LEN 4
+
+/* MC_CMD_PTP_OUT msgresponse */
+#define    MC_CMD_PTP_OUT_LEN 0
+
+/* MC_CMD_PTP_OUT_TRANSMIT msgresponse */
+#define    MC_CMD_PTP_OUT_TRANSMIT_LEN 8
+/* Value of seconds timestamp */
+#define       MC_CMD_PTP_OUT_TRANSMIT_SECONDS_OFST 0
+#define       MC_CMD_PTP_OUT_TRANSMIT_SECONDS_LEN 4
+/* Timestamp major value */
+#define       MC_CMD_PTP_OUT_TRANSMIT_MAJOR_OFST 0
+#define       MC_CMD_PTP_OUT_TRANSMIT_MAJOR_LEN 4
+/* Value of nanoseconds timestamp */
+#define       MC_CMD_PTP_OUT_TRANSMIT_NANOSECONDS_OFST 4
+#define       MC_CMD_PTP_OUT_TRANSMIT_NANOSECONDS_LEN 4
+/* Timestamp minor value */
+#define       MC_CMD_PTP_OUT_TRANSMIT_MINOR_OFST 4
+#define       MC_CMD_PTP_OUT_TRANSMIT_MINOR_LEN 4
+
+/* MC_CMD_PTP_OUT_TIME_EVENT_SUBSCRIBE msgresponse */
+#define    MC_CMD_PTP_OUT_TIME_EVENT_SUBSCRIBE_LEN 0
+
+/* MC_CMD_PTP_OUT_TIME_EVENT_UNSUBSCRIBE msgresponse */
+#define    MC_CMD_PTP_OUT_TIME_EVENT_UNSUBSCRIBE_LEN 0
+
+/* MC_CMD_PTP_OUT_READ_NIC_TIME msgresponse */
+#define    MC_CMD_PTP_OUT_READ_NIC_TIME_LEN 8
+/* Value of seconds timestamp */
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_SECONDS_OFST 0
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_SECONDS_LEN 4
+/* Timestamp major value */
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_MAJOR_OFST 0
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_MAJOR_LEN 4
+/* Value of nanoseconds timestamp */
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_NANOSECONDS_OFST 4
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_NANOSECONDS_LEN 4
+/* Timestamp minor value */
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_MINOR_OFST 4
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_MINOR_LEN 4
+
+/* MC_CMD_PTP_OUT_READ_NIC_TIME_V2 msgresponse */
+#define    MC_CMD_PTP_OUT_READ_NIC_TIME_V2_LEN 12
+/* Value of seconds timestamp */
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_V2_SECONDS_OFST 0
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_V2_SECONDS_LEN 4
+/* Timestamp major value */
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_OFST 0
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_LEN 4
+/* Value of nanoseconds timestamp */
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_V2_NANOSECONDS_OFST 4
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_V2_NANOSECONDS_LEN 4
+/* Timestamp minor value */
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MINOR_OFST 4
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MINOR_LEN 4
+/* Upper 32bits of major timestamp value */
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_HI_OFST 8
+#define       MC_CMD_PTP_OUT_READ_NIC_TIME_V2_MAJOR_HI_LEN 4
+
+/* MC_CMD_PTP_OUT_STATUS msgresponse */
+#define    MC_CMD_PTP_OUT_STATUS_LEN 64
+/* Frequency of NIC's hardware clock */
+#define       MC_CMD_PTP_OUT_STATUS_CLOCK_FREQ_OFST 0
+#define       MC_CMD_PTP_OUT_STATUS_CLOCK_FREQ_LEN 4
+/* Number of packets transmitted and timestamped */
+#define       MC_CMD_PTP_OUT_STATUS_STATS_TX_OFST 4
+#define       MC_CMD_PTP_OUT_STATUS_STATS_TX_LEN 4
+/* Number of packets received and timestamped */
+#define       MC_CMD_PTP_OUT_STATUS_STATS_RX_OFST 8
+#define       MC_CMD_PTP_OUT_STATUS_STATS_RX_LEN 4
+/* Number of packets timestamped by the FPGA */
+#define       MC_CMD_PTP_OUT_STATUS_STATS_TS_OFST 12
+#define       MC_CMD_PTP_OUT_STATUS_STATS_TS_LEN 4
+/* Number of packets filter matched */
+#define       MC_CMD_PTP_OUT_STATUS_STATS_FM_OFST 16
+#define       MC_CMD_PTP_OUT_STATUS_STATS_FM_LEN 4
+/* Number of packets not filter matched */
+#define       MC_CMD_PTP_OUT_STATUS_STATS_NFM_OFST 20
+#define       MC_CMD_PTP_OUT_STATUS_STATS_NFM_LEN 4
+/* Number of PPS overflows (noise on input?) */
+#define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFLOW_OFST 24
+#define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFLOW_LEN 4
+/* Number of PPS bad periods */
+#define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_BAD_OFST 28
+#define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_BAD_LEN 4
+/* Minimum period of PPS pulse in nanoseconds */
+#define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MIN_OFST 32
+#define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MIN_LEN 4
+/* Maximum period of PPS pulse in nanoseconds */
+#define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MAX_OFST 36
+#define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MAX_LEN 4
+/* Last period of PPS pulse in nanoseconds */
+#define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_LAST_OFST 40
+#define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_LAST_LEN 4
+/* Mean period of PPS pulse in nanoseconds */
+#define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MEAN_OFST 44
+#define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MEAN_LEN 4
+/* Minimum offset of PPS pulse in nanoseconds (signed) */
+#define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MIN_OFST 48
+#define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MIN_LEN 4
+/* Maximum offset of PPS pulse in nanoseconds (signed) */
+#define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MAX_OFST 52
+#define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MAX_LEN 4
+/* Last offset of PPS pulse in nanoseconds (signed) */
+#define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_LAST_OFST 56
+#define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_LAST_LEN 4
+/* Mean offset of PPS pulse in nanoseconds (signed) */
+#define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MEAN_OFST 60
+#define       MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MEAN_LEN 4
+
+/* MC_CMD_PTP_OUT_SYNCHRONIZE msgresponse */
+#define    MC_CMD_PTP_OUT_SYNCHRONIZE_LENMIN 20
+#define    MC_CMD_PTP_OUT_SYNCHRONIZE_LENMAX 240
+#define    MC_CMD_PTP_OUT_SYNCHRONIZE_LENMAX_MCDI2 1020
+#define    MC_CMD_PTP_OUT_SYNCHRONIZE_LEN(num) (0+20*(num))
+#define    MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_NUM(len) (((len)-0)/20)
+/* A set of host and NIC times */
+#define       MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_OFST 0
+#define       MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN 20
+#define       MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MINNUM 1
+#define       MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM 12
+#define       MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM_MCDI2 51
+/* Host time immediately before NIC's hardware clock read */
+#define       MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTSTART_OFST 0
+#define       MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTSTART_LEN 4
+/* Value of seconds timestamp */
+#define       MC_CMD_PTP_OUT_SYNCHRONIZE_SECONDS_OFST 4
+#define       MC_CMD_PTP_OUT_SYNCHRONIZE_SECONDS_LEN 4
+/* Timestamp major value */
+#define       MC_CMD_PTP_OUT_SYNCHRONIZE_MAJOR_OFST 4
+#define       MC_CMD_PTP_OUT_SYNCHRONIZE_MAJOR_LEN 4
+/* Value of nanoseconds timestamp */
+#define       MC_CMD_PTP_OUT_SYNCHRONIZE_NANOSECONDS_OFST 8
+#define       MC_CMD_PTP_OUT_SYNCHRONIZE_NANOSECONDS_LEN 4
+/* Timestamp minor value */
+#define       MC_CMD_PTP_OUT_SYNCHRONIZE_MINOR_OFST 8
+#define       MC_CMD_PTP_OUT_SYNCHRONIZE_MINOR_LEN 4
+/* Host time immediately after NIC's hardware clock read */
+#define       MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTEND_OFST 12
+#define       MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTEND_LEN 4
+/* Number of nanoseconds waited after reading NIC's hardware clock */
+#define       MC_CMD_PTP_OUT_SYNCHRONIZE_WAITNS_OFST 16
+#define       MC_CMD_PTP_OUT_SYNCHRONIZE_WAITNS_LEN 4
+
+/* MC_CMD_PTP_OUT_MANFTEST_BASIC msgresponse */
+#define    MC_CMD_PTP_OUT_MANFTEST_BASIC_LEN 8
+/* Results of testing */
+#define       MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_RESULT_OFST 0
+#define       MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_RESULT_LEN 4
+/* enum: Successful test */
+#define          MC_CMD_PTP_MANF_SUCCESS 0x0
+/* enum: FPGA load failed */
+#define          MC_CMD_PTP_MANF_FPGA_LOAD 0x1
+/* enum: FPGA version invalid */
+#define          MC_CMD_PTP_MANF_FPGA_VERSION 0x2
+/* enum: FPGA registers incorrect */
+#define          MC_CMD_PTP_MANF_FPGA_REGISTERS 0x3
+/* enum: Oscillator possibly not working? */
+#define          MC_CMD_PTP_MANF_OSCILLATOR 0x4
+/* enum: Timestamps not increasing */
+#define          MC_CMD_PTP_MANF_TIMESTAMPS 0x5
+/* enum: Mismatched packet count */
+#define          MC_CMD_PTP_MANF_PACKET_COUNT 0x6
+/* enum: Mismatched packet count (Siena filter and FPGA) */
+#define          MC_CMD_PTP_MANF_FILTER_COUNT 0x7
+/* enum: Not enough packets to perform timestamp check */
+#define          MC_CMD_PTP_MANF_PACKET_ENOUGH 0x8
+/* enum: Timestamp trigger GPIO not working */
+#define          MC_CMD_PTP_MANF_GPIO_TRIGGER 0x9
+/* enum: Insufficient PPS events to perform checks */
+#define          MC_CMD_PTP_MANF_PPS_ENOUGH 0xa
+/* enum: PPS time event period not sufficiently close to 1s. */
+#define          MC_CMD_PTP_MANF_PPS_PERIOD 0xb
+/* enum: PPS time event nS reading not sufficiently close to zero. */
+#define          MC_CMD_PTP_MANF_PPS_NS 0xc
+/* enum: PTP peripheral registers incorrect */
+#define          MC_CMD_PTP_MANF_REGISTERS 0xd
+/* enum: Failed to read time from PTP peripheral */
+#define          MC_CMD_PTP_MANF_CLOCK_READ 0xe
+/* Presence of external oscillator */
+#define       MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_EXTOSC_OFST 4
+#define       MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_EXTOSC_LEN 4
+
+/* MC_CMD_PTP_OUT_MANFTEST_PACKET msgresponse */
+#define    MC_CMD_PTP_OUT_MANFTEST_PACKET_LEN 12
+/* Results of testing */
+#define       MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_RESULT_OFST 0
+#define       MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_RESULT_LEN 4
+/* Number of packets received by FPGA */
+#define       MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FPGACOUNT_OFST 4
+#define       MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FPGACOUNT_LEN 4
+/* Number of packets received by Siena filters */
+#define       MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FILTERCOUNT_OFST 8
+#define       MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FILTERCOUNT_LEN 4
+
+/* MC_CMD_PTP_OUT_FPGAREAD msgresponse */
+#define    MC_CMD_PTP_OUT_FPGAREAD_LENMIN 1
+#define    MC_CMD_PTP_OUT_FPGAREAD_LENMAX 252
+#define    MC_CMD_PTP_OUT_FPGAREAD_LENMAX_MCDI2 1020
+#define    MC_CMD_PTP_OUT_FPGAREAD_LEN(num) (0+1*(num))
+#define    MC_CMD_PTP_OUT_FPGAREAD_BUFFER_NUM(len) (((len)-0)/1)
+#define       MC_CMD_PTP_OUT_FPGAREAD_BUFFER_OFST 0
+#define       MC_CMD_PTP_OUT_FPGAREAD_BUFFER_LEN 1
+#define       MC_CMD_PTP_OUT_FPGAREAD_BUFFER_MINNUM 1
+#define       MC_CMD_PTP_OUT_FPGAREAD_BUFFER_MAXNUM 252
+#define       MC_CMD_PTP_OUT_FPGAREAD_BUFFER_MAXNUM_MCDI2 1020
+
+/* MC_CMD_PTP_OUT_GET_TIME_FORMAT msgresponse */
+#define    MC_CMD_PTP_OUT_GET_TIME_FORMAT_LEN 4
+/* Time format required/used by for this NIC. Applies to all PTP MCDI
+ * operations that pass times between the host and firmware. If this operation
+ * is not supported (older firmware) a format of seconds and nanoseconds should
+ * be assumed. Note this enum is deprecated. Do not add to it- use the
+ * TIME_FORMAT field in MC_CMD_PTP_OUT_GET_ATTRIBUTES instead.
+ */
+#define       MC_CMD_PTP_OUT_GET_TIME_FORMAT_FORMAT_OFST 0
+#define       MC_CMD_PTP_OUT_GET_TIME_FORMAT_FORMAT_LEN 4
+/* enum: Times are in seconds and nanoseconds */
+#define          MC_CMD_PTP_OUT_GET_TIME_FORMAT_SECONDS_NANOSECONDS 0x0
+/* enum: Major register has units of 16 second per tick, minor 8 ns per tick */
+#define          MC_CMD_PTP_OUT_GET_TIME_FORMAT_16SECONDS_8NANOSECONDS 0x1
+/* enum: Major register has units of seconds, minor 2^-27s per tick */
+#define          MC_CMD_PTP_OUT_GET_TIME_FORMAT_SECONDS_27FRACTION 0x2
+
+/* MC_CMD_PTP_OUT_GET_ATTRIBUTES msgresponse */
+#define    MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN 24
+/* Time format required/used by for this NIC. Applies to all PTP MCDI
+ * operations that pass times between the host and firmware. If this operation
+ * is not supported (older firmware) a format of seconds and nanoseconds should
+ * be assumed.
+ */
+#define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT_OFST 0
+#define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT_LEN 4
+/* enum: Times are in seconds and nanoseconds */
+#define          MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS 0x0
+/* enum: Major register has units of 16 second per tick, minor 8 ns per tick */
+#define          MC_CMD_PTP_OUT_GET_ATTRIBUTES_16SECONDS_8NANOSECONDS 0x1
+/* enum: Major register has units of seconds, minor 2^-27s per tick */
+#define          MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_27FRACTION 0x2
+/* enum: Major register units are seconds, minor units are quarter nanoseconds
+ */
+#define          MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_QTR_NANOSECONDS 0x3
+/* Minimum acceptable value for a corrected synchronization timeset. When
+ * comparing host and NIC clock times, the MC returns a set of samples that
+ * contain the host start and end time, the MC time when the host start was
+ * detected and the time the MC waited between reading the time and detecting
+ * the host end. The corrected sync window is the difference between the host
+ * end and start times minus the time that the MC waited for host end.
+ */
+#define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN_OFST 4
+#define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN_LEN 4
+/* Various PTP capabilities */
+#define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_CAPABILITIES_OFST 8
+#define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_CAPABILITIES_LEN 4
+#define        MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_OFST 8
+#define        MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_LBN 0
+#define        MC_CMD_PTP_OUT_GET_ATTRIBUTES_REPORT_SYNC_STATUS_WIDTH 1
+#define        MC_CMD_PTP_OUT_GET_ATTRIBUTES_RX_TSTAMP_OOB_OFST 8
+#define        MC_CMD_PTP_OUT_GET_ATTRIBUTES_RX_TSTAMP_OOB_LBN 1
+#define        MC_CMD_PTP_OUT_GET_ATTRIBUTES_RX_TSTAMP_OOB_WIDTH 1
+#define        MC_CMD_PTP_OUT_GET_ATTRIBUTES_64BIT_SECONDS_OFST 8
+#define        MC_CMD_PTP_OUT_GET_ATTRIBUTES_64BIT_SECONDS_LBN 2
+#define        MC_CMD_PTP_OUT_GET_ATTRIBUTES_64BIT_SECONDS_WIDTH 1
+#define        MC_CMD_PTP_OUT_GET_ATTRIBUTES_FP44_FREQ_ADJ_OFST 8
+#define        MC_CMD_PTP_OUT_GET_ATTRIBUTES_FP44_FREQ_ADJ_LBN 3
+#define        MC_CMD_PTP_OUT_GET_ATTRIBUTES_FP44_FREQ_ADJ_WIDTH 1
+#define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED0_OFST 12
+#define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED0_LEN 4
+#define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED1_OFST 16
+#define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED1_LEN 4
+#define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED2_OFST 20
+#define       MC_CMD_PTP_OUT_GET_ATTRIBUTES_RESERVED2_LEN 4
+
+/* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS msgresponse */
+#define    MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_LEN 16
+/* Uncorrected error on PTP transmit timestamps in NIC clock format */
+#define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT_OFST 0
+#define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT_LEN 4
+/* Uncorrected error on PTP receive timestamps in NIC clock format */
+#define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE_OFST 4
+#define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE_LEN 4
+/* Uncorrected error on PPS output in NIC clock format */
+#define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT_OFST 8
+#define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT_LEN 4
+/* Uncorrected error on PPS input in NIC clock format */
+#define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN_OFST 12
+#define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN_LEN 4
+
+/* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2 msgresponse */
+#define    MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_LEN 24
+/* Uncorrected error on PTP transmit timestamps in NIC clock format */
+#define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_TX_OFST 0
+#define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_TX_LEN 4
+/* Uncorrected error on PTP receive timestamps in NIC clock format */
+#define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_RX_OFST 4
+#define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_RX_LEN 4
+/* Uncorrected error on PPS output in NIC clock format */
+#define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_OUT_OFST 8
+#define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_OUT_LEN 4
+/* Uncorrected error on PPS input in NIC clock format */
+#define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_IN_OFST 12
+#define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_IN_LEN 4
+/* Uncorrected error on non-PTP transmit timestamps in NIC clock format */
+#define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_TX_OFST 16
+#define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_TX_LEN 4
+/* Uncorrected error on non-PTP receive timestamps in NIC clock format */
+#define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_RX_OFST 20
+#define       MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_RX_LEN 4
+
+/* MC_CMD_PTP_OUT_MANFTEST_PPS msgresponse */
+#define    MC_CMD_PTP_OUT_MANFTEST_PPS_LEN 4
+/* Results of testing */
+#define       MC_CMD_PTP_OUT_MANFTEST_PPS_TEST_RESULT_OFST 0
+#define       MC_CMD_PTP_OUT_MANFTEST_PPS_TEST_RESULT_LEN 4
+/*            Enum values, see field(s): */
+/*               MC_CMD_PTP_OUT_MANFTEST_BASIC/TEST_RESULT */
+
+/* MC_CMD_PTP_OUT_SET_SYNC_STATUS msgresponse */
+#define    MC_CMD_PTP_OUT_SET_SYNC_STATUS_LEN 0
+
+
+/***********************************/
+/* MC_CMD_CSR_READ32
+ * Read 32bit words from the indirect memory map.
+ */
+#define MC_CMD_CSR_READ32 0xc
+#undef MC_CMD_0xc_PRIVILEGE_CTG
+
+#define MC_CMD_0xc_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_CSR_READ32_IN msgrequest */
+#define    MC_CMD_CSR_READ32_IN_LEN 12
+/* Address */
+#define       MC_CMD_CSR_READ32_IN_ADDR_OFST 0
+#define       MC_CMD_CSR_READ32_IN_ADDR_LEN 4
+#define       MC_CMD_CSR_READ32_IN_STEP_OFST 4
+#define       MC_CMD_CSR_READ32_IN_STEP_LEN 4
+#define       MC_CMD_CSR_READ32_IN_NUMWORDS_OFST 8
+#define       MC_CMD_CSR_READ32_IN_NUMWORDS_LEN 4
+
+/* MC_CMD_CSR_READ32_OUT msgresponse */
+#define    MC_CMD_CSR_READ32_OUT_LENMIN 4
+#define    MC_CMD_CSR_READ32_OUT_LENMAX 252
+#define    MC_CMD_CSR_READ32_OUT_LENMAX_MCDI2 1020
+#define    MC_CMD_CSR_READ32_OUT_LEN(num) (0+4*(num))
+#define    MC_CMD_CSR_READ32_OUT_BUFFER_NUM(len) (((len)-0)/4)
+/* The last dword is the status, not a value read */
+#define       MC_CMD_CSR_READ32_OUT_BUFFER_OFST 0
+#define       MC_CMD_CSR_READ32_OUT_BUFFER_LEN 4
+#define       MC_CMD_CSR_READ32_OUT_BUFFER_MINNUM 1
+#define       MC_CMD_CSR_READ32_OUT_BUFFER_MAXNUM 63
+#define       MC_CMD_CSR_READ32_OUT_BUFFER_MAXNUM_MCDI2 255
+
+
+/***********************************/
+/* MC_CMD_CSR_WRITE32
+ * Write 32bit dwords to the indirect memory map.
+ */
+#define MC_CMD_CSR_WRITE32 0xd
+#undef MC_CMD_0xd_PRIVILEGE_CTG
+
+#define MC_CMD_0xd_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_CSR_WRITE32_IN msgrequest */
+#define    MC_CMD_CSR_WRITE32_IN_LENMIN 12
+#define    MC_CMD_CSR_WRITE32_IN_LENMAX 252
+#define    MC_CMD_CSR_WRITE32_IN_LENMAX_MCDI2 1020
+#define    MC_CMD_CSR_WRITE32_IN_LEN(num) (8+4*(num))
+#define    MC_CMD_CSR_WRITE32_IN_BUFFER_NUM(len) (((len)-8)/4)
+/* Address */
+#define       MC_CMD_CSR_WRITE32_IN_ADDR_OFST 0
+#define       MC_CMD_CSR_WRITE32_IN_ADDR_LEN 4
+#define       MC_CMD_CSR_WRITE32_IN_STEP_OFST 4
+#define       MC_CMD_CSR_WRITE32_IN_STEP_LEN 4
+#define       MC_CMD_CSR_WRITE32_IN_BUFFER_OFST 8
+#define       MC_CMD_CSR_WRITE32_IN_BUFFER_LEN 4
+#define       MC_CMD_CSR_WRITE32_IN_BUFFER_MINNUM 1
+#define       MC_CMD_CSR_WRITE32_IN_BUFFER_MAXNUM 61
+#define       MC_CMD_CSR_WRITE32_IN_BUFFER_MAXNUM_MCDI2 253
+
+/* MC_CMD_CSR_WRITE32_OUT msgresponse */
+#define    MC_CMD_CSR_WRITE32_OUT_LEN 4
+#define       MC_CMD_CSR_WRITE32_OUT_STATUS_OFST 0
+#define       MC_CMD_CSR_WRITE32_OUT_STATUS_LEN 4
+
+
+/***********************************/
+/* MC_CMD_HP
+ * These commands are used for HP related features. They are grouped under one
+ * MCDI command to avoid creating too many MCDI commands.
+ */
+#define MC_CMD_HP 0x54
+#undef MC_CMD_0x54_PRIVILEGE_CTG
+
+#define MC_CMD_0x54_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_HP_IN msgrequest */
+#define    MC_CMD_HP_IN_LEN 16
+/* HP OCSD sub-command. When address is not NULL, request activation of OCSD at
+ * the specified address with the specified interval.When address is NULL,
+ * INTERVAL is interpreted as a command: 0: stop OCSD / 1: Report OCSD current
+ * state / 2: (debug) Show temperature reported by one of the supported
+ * sensors.
+ */
+#define       MC_CMD_HP_IN_SUBCMD_OFST 0
+#define       MC_CMD_HP_IN_SUBCMD_LEN 4
+/* enum: OCSD (Option Card Sensor Data) sub-command. */
+#define          MC_CMD_HP_IN_OCSD_SUBCMD 0x0
+/* enum: Last known valid HP sub-command. */
+#define          MC_CMD_HP_IN_LAST_SUBCMD 0x0
+/* The address to the array of sensor fields. (Or NULL to use a sub-command.)
+ */
+#define       MC_CMD_HP_IN_OCSD_ADDR_OFST 4
+#define       MC_CMD_HP_IN_OCSD_ADDR_LEN 8
+#define       MC_CMD_HP_IN_OCSD_ADDR_LO_OFST 4
+#define       MC_CMD_HP_IN_OCSD_ADDR_HI_OFST 8
+/* The requested update interval, in seconds. (Or the sub-command if ADDR is
+ * NULL.)
+ */
+#define       MC_CMD_HP_IN_OCSD_INTERVAL_OFST 12
+#define       MC_CMD_HP_IN_OCSD_INTERVAL_LEN 4
+
+/* MC_CMD_HP_OUT msgresponse */
+#define    MC_CMD_HP_OUT_LEN 4
+#define       MC_CMD_HP_OUT_OCSD_STATUS_OFST 0
+#define       MC_CMD_HP_OUT_OCSD_STATUS_LEN 4
+/* enum: OCSD stopped for this card. */
+#define          MC_CMD_HP_OUT_OCSD_STOPPED 0x1
+/* enum: OCSD was successfully started with the address provided. */
+#define          MC_CMD_HP_OUT_OCSD_STARTED 0x2
+/* enum: OCSD was already started for this card. */
+#define          MC_CMD_HP_OUT_OCSD_ALREADY_STARTED 0x3
+
+
+/***********************************/
+/* MC_CMD_STACKINFO
+ * Get stack information.
+ */
+#define MC_CMD_STACKINFO 0xf
+#undef MC_CMD_0xf_PRIVILEGE_CTG
+
+#define MC_CMD_0xf_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_STACKINFO_IN msgrequest */
+#define    MC_CMD_STACKINFO_IN_LEN 0
+
+/* MC_CMD_STACKINFO_OUT msgresponse */
+#define    MC_CMD_STACKINFO_OUT_LENMIN 12
+#define    MC_CMD_STACKINFO_OUT_LENMAX 252
+#define    MC_CMD_STACKINFO_OUT_LENMAX_MCDI2 1020
+#define    MC_CMD_STACKINFO_OUT_LEN(num) (0+12*(num))
+#define    MC_CMD_STACKINFO_OUT_THREAD_INFO_NUM(len) (((len)-0)/12)
+/* (thread ptr, stack size, free space) for each thread in system */
+#define       MC_CMD_STACKINFO_OUT_THREAD_INFO_OFST 0
+#define       MC_CMD_STACKINFO_OUT_THREAD_INFO_LEN 12
+#define       MC_CMD_STACKINFO_OUT_THREAD_INFO_MINNUM 1
+#define       MC_CMD_STACKINFO_OUT_THREAD_INFO_MAXNUM 21
+#define       MC_CMD_STACKINFO_OUT_THREAD_INFO_MAXNUM_MCDI2 85
+
+
+/***********************************/
+/* MC_CMD_MDIO_READ
+ * MDIO register read.
+ */
+#define MC_CMD_MDIO_READ 0x10
+#undef MC_CMD_0x10_PRIVILEGE_CTG
+
+#define MC_CMD_0x10_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_MDIO_READ_IN msgrequest */
+#define    MC_CMD_MDIO_READ_IN_LEN 16
+/* Bus number; there are two MDIO buses: one for the internal PHY, and one for
+ * external devices.
+ */
+#define       MC_CMD_MDIO_READ_IN_BUS_OFST 0
+#define       MC_CMD_MDIO_READ_IN_BUS_LEN 4
+/* enum: Internal. */
+#define          MC_CMD_MDIO_BUS_INTERNAL 0x0
+/* enum: External. */
+#define          MC_CMD_MDIO_BUS_EXTERNAL 0x1
+/* Port address */
+#define       MC_CMD_MDIO_READ_IN_PRTAD_OFST 4
+#define       MC_CMD_MDIO_READ_IN_PRTAD_LEN 4
+/* Device Address or clause 22. */
+#define       MC_CMD_MDIO_READ_IN_DEVAD_OFST 8
+#define       MC_CMD_MDIO_READ_IN_DEVAD_LEN 4
+/* enum: By default all the MCDI MDIO operations perform clause45 mode. If you
+ * want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22.
+ */
+#define          MC_CMD_MDIO_CLAUSE22 0x20
+/* Address */
+#define       MC_CMD_MDIO_READ_IN_ADDR_OFST 12
+#define       MC_CMD_MDIO_READ_IN_ADDR_LEN 4
+
+/* MC_CMD_MDIO_READ_OUT msgresponse */
+#define    MC_CMD_MDIO_READ_OUT_LEN 8
+/* Value */
+#define       MC_CMD_MDIO_READ_OUT_VALUE_OFST 0
+#define       MC_CMD_MDIO_READ_OUT_VALUE_LEN 4
+/* Status the MDIO commands return the raw status bits from the MDIO block. A
+ * "good" transaction should have the DONE bit set and all other bits clear.
+ */
+#define       MC_CMD_MDIO_READ_OUT_STATUS_OFST 4
+#define       MC_CMD_MDIO_READ_OUT_STATUS_LEN 4
+/* enum: Good. */
+#define          MC_CMD_MDIO_STATUS_GOOD 0x8
+
+
+/***********************************/
+/* MC_CMD_MDIO_WRITE
+ * MDIO register write.
+ */
+#define MC_CMD_MDIO_WRITE 0x11
+#undef MC_CMD_0x11_PRIVILEGE_CTG
+
+#define MC_CMD_0x11_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_MDIO_WRITE_IN msgrequest */
+#define    MC_CMD_MDIO_WRITE_IN_LEN 20
+/* Bus number; there are two MDIO buses: one for the internal PHY, and one for
+ * external devices.
+ */
+#define       MC_CMD_MDIO_WRITE_IN_BUS_OFST 0
+#define       MC_CMD_MDIO_WRITE_IN_BUS_LEN 4
+/* enum: Internal. */
+/*               MC_CMD_MDIO_BUS_INTERNAL 0x0 */
+/* enum: External. */
+/*               MC_CMD_MDIO_BUS_EXTERNAL 0x1 */
+/* Port address */
+#define       MC_CMD_MDIO_WRITE_IN_PRTAD_OFST 4
+#define       MC_CMD_MDIO_WRITE_IN_PRTAD_LEN 4
+/* Device Address or clause 22. */
+#define       MC_CMD_MDIO_WRITE_IN_DEVAD_OFST 8
+#define       MC_CMD_MDIO_WRITE_IN_DEVAD_LEN 4
+/* enum: By default all the MCDI MDIO operations perform clause45 mode. If you
+ * want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22.
+ */
+/*               MC_CMD_MDIO_CLAUSE22 0x20 */
+/* Address */
+#define       MC_CMD_MDIO_WRITE_IN_ADDR_OFST 12
+#define       MC_CMD_MDIO_WRITE_IN_ADDR_LEN 4
+/* Value */
+#define       MC_CMD_MDIO_WRITE_IN_VALUE_OFST 16
+#define       MC_CMD_MDIO_WRITE_IN_VALUE_LEN 4
+
+/* MC_CMD_MDIO_WRITE_OUT msgresponse */
+#define    MC_CMD_MDIO_WRITE_OUT_LEN 4
+/* Status; the MDIO commands return the raw status bits from the MDIO block. A
+ * "good" transaction should have the DONE bit set and all other bits clear.
+ */
+#define       MC_CMD_MDIO_WRITE_OUT_STATUS_OFST 0
+#define       MC_CMD_MDIO_WRITE_OUT_STATUS_LEN 4
+/* enum: Good. */
+/*               MC_CMD_MDIO_STATUS_GOOD 0x8 */
+
+
+/***********************************/
+/* MC_CMD_DBI_WRITE
+ * Write DBI register(s).
+ */
+#define MC_CMD_DBI_WRITE 0x12
+#undef MC_CMD_0x12_PRIVILEGE_CTG
+
+#define MC_CMD_0x12_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_DBI_WRITE_IN msgrequest */
+#define    MC_CMD_DBI_WRITE_IN_LENMIN 12
+#define    MC_CMD_DBI_WRITE_IN_LENMAX 252
+#define    MC_CMD_DBI_WRITE_IN_LENMAX_MCDI2 1020
+#define    MC_CMD_DBI_WRITE_IN_LEN(num) (0+12*(num))
+#define    MC_CMD_DBI_WRITE_IN_DBIWROP_NUM(len) (((len)-0)/12)
+/* Each write op consists of an address (offset 0), byte enable/VF/CS2 (offset
+ * 32) and value (offset 64). See MC_CMD_DBIWROP_TYPEDEF.
+ */
+#define       MC_CMD_DBI_WRITE_IN_DBIWROP_OFST 0
+#define       MC_CMD_DBI_WRITE_IN_DBIWROP_LEN 12
+#define       MC_CMD_DBI_WRITE_IN_DBIWROP_MINNUM 1
+#define       MC_CMD_DBI_WRITE_IN_DBIWROP_MAXNUM 21
+#define       MC_CMD_DBI_WRITE_IN_DBIWROP_MAXNUM_MCDI2 85
+
+/* MC_CMD_DBI_WRITE_OUT msgresponse */
+#define    MC_CMD_DBI_WRITE_OUT_LEN 0
+
+/* MC_CMD_DBIWROP_TYPEDEF structuredef */
+#define    MC_CMD_DBIWROP_TYPEDEF_LEN 12
+#define       MC_CMD_DBIWROP_TYPEDEF_ADDRESS_OFST 0
+#define       MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LEN 4
+#define       MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LBN 0
+#define       MC_CMD_DBIWROP_TYPEDEF_ADDRESS_WIDTH 32
+#define       MC_CMD_DBIWROP_TYPEDEF_PARMS_OFST 4
+#define       MC_CMD_DBIWROP_TYPEDEF_PARMS_LEN 4
+#define        MC_CMD_DBIWROP_TYPEDEF_VF_NUM_OFST 4
+#define        MC_CMD_DBIWROP_TYPEDEF_VF_NUM_LBN 16
+#define        MC_CMD_DBIWROP_TYPEDEF_VF_NUM_WIDTH 16
+#define        MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_OFST 4
+#define        MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_LBN 15
+#define        MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_WIDTH 1
+#define        MC_CMD_DBIWROP_TYPEDEF_CS2_OFST 4
+#define        MC_CMD_DBIWROP_TYPEDEF_CS2_LBN 14
+#define        MC_CMD_DBIWROP_TYPEDEF_CS2_WIDTH 1
+#define       MC_CMD_DBIWROP_TYPEDEF_PARMS_LBN 32
+#define       MC_CMD_DBIWROP_TYPEDEF_PARMS_WIDTH 32
+#define       MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST 8
+#define       MC_CMD_DBIWROP_TYPEDEF_VALUE_LEN 4
+#define       MC_CMD_DBIWROP_TYPEDEF_VALUE_LBN 64
+#define       MC_CMD_DBIWROP_TYPEDEF_VALUE_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_PORT_READ32
+ * Read a 32-bit register from the indirect port register map. The port to
+ * access is implied by the Shared memory channel used.
+ */
+#define MC_CMD_PORT_READ32 0x14
+
+/* MC_CMD_PORT_READ32_IN msgrequest */
+#define    MC_CMD_PORT_READ32_IN_LEN 4
+/* Address */
+#define       MC_CMD_PORT_READ32_IN_ADDR_OFST 0
+#define       MC_CMD_PORT_READ32_IN_ADDR_LEN 4
+
+/* MC_CMD_PORT_READ32_OUT msgresponse */
+#define    MC_CMD_PORT_READ32_OUT_LEN 8
+/* Value */
+#define       MC_CMD_PORT_READ32_OUT_VALUE_OFST 0
+#define       MC_CMD_PORT_READ32_OUT_VALUE_LEN 4
+/* Status */
+#define       MC_CMD_PORT_READ32_OUT_STATUS_OFST 4
+#define       MC_CMD_PORT_READ32_OUT_STATUS_LEN 4
+
+
+/***********************************/
+/* MC_CMD_PORT_WRITE32
+ * Write a 32-bit register to the indirect port register map. The port to
+ * access is implied by the Shared memory channel used.
+ */
+#define MC_CMD_PORT_WRITE32 0x15
+
+/* MC_CMD_PORT_WRITE32_IN msgrequest */
+#define    MC_CMD_PORT_WRITE32_IN_LEN 8
+/* Address */
+#define       MC_CMD_PORT_WRITE32_IN_ADDR_OFST 0
+#define       MC_CMD_PORT_WRITE32_IN_ADDR_LEN 4
+/* Value */
+#define       MC_CMD_PORT_WRITE32_IN_VALUE_OFST 4
+#define       MC_CMD_PORT_WRITE32_IN_VALUE_LEN 4
+
+/* MC_CMD_PORT_WRITE32_OUT msgresponse */
+#define    MC_CMD_PORT_WRITE32_OUT_LEN 4
+/* Status */
+#define       MC_CMD_PORT_WRITE32_OUT_STATUS_OFST 0
+#define       MC_CMD_PORT_WRITE32_OUT_STATUS_LEN 4
+
+
+/***********************************/
+/* MC_CMD_PORT_READ128
+ * Read a 128-bit register from the indirect port register map. The port to
+ * access is implied by the Shared memory channel used.
+ */
+#define MC_CMD_PORT_READ128 0x16
+
+/* MC_CMD_PORT_READ128_IN msgrequest */
+#define    MC_CMD_PORT_READ128_IN_LEN 4
+/* Address */
+#define       MC_CMD_PORT_READ128_IN_ADDR_OFST 0
+#define       MC_CMD_PORT_READ128_IN_ADDR_LEN 4
+
+/* MC_CMD_PORT_READ128_OUT msgresponse */
+#define    MC_CMD_PORT_READ128_OUT_LEN 20
+/* Value */
+#define       MC_CMD_PORT_READ128_OUT_VALUE_OFST 0
+#define       MC_CMD_PORT_READ128_OUT_VALUE_LEN 16
+/* Status */
+#define       MC_CMD_PORT_READ128_OUT_STATUS_OFST 16
+#define       MC_CMD_PORT_READ128_OUT_STATUS_LEN 4
+
+
+/***********************************/
+/* MC_CMD_PORT_WRITE128
+ * Write a 128-bit register to the indirect port register map. The port to
+ * access is implied by the Shared memory channel used.
+ */
+#define MC_CMD_PORT_WRITE128 0x17
+
+/* MC_CMD_PORT_WRITE128_IN msgrequest */
+#define    MC_CMD_PORT_WRITE128_IN_LEN 20
+/* Address */
+#define       MC_CMD_PORT_WRITE128_IN_ADDR_OFST 0
+#define       MC_CMD_PORT_WRITE128_IN_ADDR_LEN 4
+/* Value */
+#define       MC_CMD_PORT_WRITE128_IN_VALUE_OFST 4
+#define       MC_CMD_PORT_WRITE128_IN_VALUE_LEN 16
+
+/* MC_CMD_PORT_WRITE128_OUT msgresponse */
+#define    MC_CMD_PORT_WRITE128_OUT_LEN 4
+/* Status */
+#define       MC_CMD_PORT_WRITE128_OUT_STATUS_OFST 0
+#define       MC_CMD_PORT_WRITE128_OUT_STATUS_LEN 4
+
+/* MC_CMD_CAPABILITIES structuredef */
+#define    MC_CMD_CAPABILITIES_LEN 4
+/* Small buf table. */
+#define       MC_CMD_CAPABILITIES_SMALL_BUF_TBL_LBN 0
+#define       MC_CMD_CAPABILITIES_SMALL_BUF_TBL_WIDTH 1
+/* Turbo mode (for Maranello). */
+#define       MC_CMD_CAPABILITIES_TURBO_LBN 1
+#define       MC_CMD_CAPABILITIES_TURBO_WIDTH 1
+/* Turbo mode active (for Maranello). */
+#define       MC_CMD_CAPABILITIES_TURBO_ACTIVE_LBN 2
+#define       MC_CMD_CAPABILITIES_TURBO_ACTIVE_WIDTH 1
+/* PTP offload. */
+#define       MC_CMD_CAPABILITIES_PTP_LBN 3
+#define       MC_CMD_CAPABILITIES_PTP_WIDTH 1
+/* AOE mode. */
+#define       MC_CMD_CAPABILITIES_AOE_LBN 4
+#define       MC_CMD_CAPABILITIES_AOE_WIDTH 1
+/* AOE mode active. */
+#define       MC_CMD_CAPABILITIES_AOE_ACTIVE_LBN 5
+#define       MC_CMD_CAPABILITIES_AOE_ACTIVE_WIDTH 1
+/* AOE mode active. */
+#define       MC_CMD_CAPABILITIES_FC_ACTIVE_LBN 6
+#define       MC_CMD_CAPABILITIES_FC_ACTIVE_WIDTH 1
+#define       MC_CMD_CAPABILITIES_RESERVED_LBN 7
+#define       MC_CMD_CAPABILITIES_RESERVED_WIDTH 25
+
+
+/***********************************/
+/* MC_CMD_GET_BOARD_CFG
+ * Returns the MC firmware configuration structure.
+ */
+#define MC_CMD_GET_BOARD_CFG 0x18
+#undef MC_CMD_0x18_PRIVILEGE_CTG
+
+#define MC_CMD_0x18_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_BOARD_CFG_IN msgrequest */
+#define    MC_CMD_GET_BOARD_CFG_IN_LEN 0
+
+/* MC_CMD_GET_BOARD_CFG_OUT msgresponse */
+#define    MC_CMD_GET_BOARD_CFG_OUT_LENMIN 96
+#define    MC_CMD_GET_BOARD_CFG_OUT_LENMAX 136
+#define    MC_CMD_GET_BOARD_CFG_OUT_LENMAX_MCDI2 136
+#define    MC_CMD_GET_BOARD_CFG_OUT_LEN(num) (72+2*(num))
+#define    MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_NUM(len) (((len)-72)/2)
+#define       MC_CMD_GET_BOARD_CFG_OUT_BOARD_TYPE_OFST 0
+#define       MC_CMD_GET_BOARD_CFG_OUT_BOARD_TYPE_LEN 4
+#define       MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_OFST 4
+#define       MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_LEN 32
+/* Capabilities for Siena Port0 (see struct MC_CMD_CAPABILITIES). Unused on
+ * EF10 and later (use MC_CMD_GET_CAPABILITIES).
+ */
+#define       MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT0_OFST 36
+#define       MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT0_LEN 4
+/* Capabilities for Siena Port1 (see struct MC_CMD_CAPABILITIES). Unused on
+ * EF10 and later (use MC_CMD_GET_CAPABILITIES).
+ */
+#define       MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT1_OFST 40
+#define       MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT1_LEN 4
+/* Base MAC address for Siena Port0. Unused on EF10 and later (use
+ * MC_CMD_GET_MAC_ADDRESSES).
+ */
+#define       MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST 44
+#define       MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_LEN 6
+/* Base MAC address for Siena Port1. Unused on EF10 and later (use
+ * MC_CMD_GET_MAC_ADDRESSES).
+ */
+#define       MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST 50
+#define       MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_LEN 6
+/* Size of MAC address pool for Siena Port0. Unused on EF10 and later (use
+ * MC_CMD_GET_MAC_ADDRESSES).
+ */
+#define       MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT0_OFST 56
+#define       MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT0_LEN 4
+/* Size of MAC address pool for Siena Port1. Unused on EF10 and later (use
+ * MC_CMD_GET_MAC_ADDRESSES).
+ */
+#define       MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT1_OFST 60
+#define       MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT1_LEN 4
+/* Increment between addresses in MAC address pool for Siena Port0. Unused on
+ * EF10 and later (use MC_CMD_GET_MAC_ADDRESSES).
+ */
+#define       MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT0_OFST 64
+#define       MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT0_LEN 4
+/* Increment between addresses in MAC address pool for Siena Port1. Unused on
+ * EF10 and later (use MC_CMD_GET_MAC_ADDRESSES).
+ */
+#define       MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT1_OFST 68
+#define       MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT1_LEN 4
+/* Siena only. This field contains a 16-bit value for each of the types of
+ * NVRAM area. The values are defined in the firmware/mc/platform/.c file for a
+ * specific board type, but otherwise have no meaning to the MC; they are used
+ * by the driver to manage selection of appropriate firmware updates. Unused on
+ * EF10 and later (use MC_CMD_NVRAM_METADATA).
+ */
+#define       MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST 72
+#define       MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN 2
+#define       MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MINNUM 12
+#define       MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM 32
+#define       MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM_MCDI2 32
+
+
+/***********************************/
+/* MC_CMD_DBI_READX
+ * Read DBI register(s) -- extended functionality
+ */
+#define MC_CMD_DBI_READX 0x19
+#undef MC_CMD_0x19_PRIVILEGE_CTG
+
+#define MC_CMD_0x19_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_DBI_READX_IN msgrequest */
+#define    MC_CMD_DBI_READX_IN_LENMIN 8
+#define    MC_CMD_DBI_READX_IN_LENMAX 248
+#define    MC_CMD_DBI_READX_IN_LENMAX_MCDI2 1016
+#define    MC_CMD_DBI_READX_IN_LEN(num) (0+8*(num))
+#define    MC_CMD_DBI_READX_IN_DBIRDOP_NUM(len) (((len)-0)/8)
+/* Each Read op consists of an address (offset 0), VF/CS2) */
+#define       MC_CMD_DBI_READX_IN_DBIRDOP_OFST 0
+#define       MC_CMD_DBI_READX_IN_DBIRDOP_LEN 8
+#define       MC_CMD_DBI_READX_IN_DBIRDOP_LO_OFST 0
+#define       MC_CMD_DBI_READX_IN_DBIRDOP_HI_OFST 4
+#define       MC_CMD_DBI_READX_IN_DBIRDOP_MINNUM 1
+#define       MC_CMD_DBI_READX_IN_DBIRDOP_MAXNUM 31
+#define       MC_CMD_DBI_READX_IN_DBIRDOP_MAXNUM_MCDI2 127
+
+/* MC_CMD_DBI_READX_OUT msgresponse */
+#define    MC_CMD_DBI_READX_OUT_LENMIN 4
+#define    MC_CMD_DBI_READX_OUT_LENMAX 252
+#define    MC_CMD_DBI_READX_OUT_LENMAX_MCDI2 1020
+#define    MC_CMD_DBI_READX_OUT_LEN(num) (0+4*(num))
+#define    MC_CMD_DBI_READX_OUT_VALUE_NUM(len) (((len)-0)/4)
+/* Value */
+#define       MC_CMD_DBI_READX_OUT_VALUE_OFST 0
+#define       MC_CMD_DBI_READX_OUT_VALUE_LEN 4
+#define       MC_CMD_DBI_READX_OUT_VALUE_MINNUM 1
+#define       MC_CMD_DBI_READX_OUT_VALUE_MAXNUM 63
+#define       MC_CMD_DBI_READX_OUT_VALUE_MAXNUM_MCDI2 255
+
+/* MC_CMD_DBIRDOP_TYPEDEF structuredef */
+#define    MC_CMD_DBIRDOP_TYPEDEF_LEN 8
+#define       MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_OFST 0
+#define       MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_LEN 4
+#define       MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_LBN 0
+#define       MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_WIDTH 32
+#define       MC_CMD_DBIRDOP_TYPEDEF_PARMS_OFST 4
+#define       MC_CMD_DBIRDOP_TYPEDEF_PARMS_LEN 4
+#define        MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_OFST 4
+#define        MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_LBN 16
+#define        MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_WIDTH 16
+#define        MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_OFST 4
+#define        MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_LBN 15
+#define        MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_WIDTH 1
+#define        MC_CMD_DBIRDOP_TYPEDEF_CS2_OFST 4
+#define        MC_CMD_DBIRDOP_TYPEDEF_CS2_LBN 14
+#define        MC_CMD_DBIRDOP_TYPEDEF_CS2_WIDTH 1
+#define       MC_CMD_DBIRDOP_TYPEDEF_PARMS_LBN 32
+#define       MC_CMD_DBIRDOP_TYPEDEF_PARMS_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_SET_RAND_SEED
+ * Set the 16byte seed for the MC pseudo-random generator.
+ */
+#define MC_CMD_SET_RAND_SEED 0x1a
+#undef MC_CMD_0x1a_PRIVILEGE_CTG
+
+#define MC_CMD_0x1a_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_SET_RAND_SEED_IN msgrequest */
+#define    MC_CMD_SET_RAND_SEED_IN_LEN 16
+/* Seed value. */
+#define       MC_CMD_SET_RAND_SEED_IN_SEED_OFST 0
+#define       MC_CMD_SET_RAND_SEED_IN_SEED_LEN 16
+
+/* MC_CMD_SET_RAND_SEED_OUT msgresponse */
+#define    MC_CMD_SET_RAND_SEED_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_LTSSM_HIST
+ * Retrieve the history of the LTSSM, if the build supports it.
+ */
+#define MC_CMD_LTSSM_HIST 0x1b
+
+/* MC_CMD_LTSSM_HIST_IN msgrequest */
+#define    MC_CMD_LTSSM_HIST_IN_LEN 0
+
+/* MC_CMD_LTSSM_HIST_OUT msgresponse */
+#define    MC_CMD_LTSSM_HIST_OUT_LENMIN 0
+#define    MC_CMD_LTSSM_HIST_OUT_LENMAX 252
+#define    MC_CMD_LTSSM_HIST_OUT_LENMAX_MCDI2 1020
+#define    MC_CMD_LTSSM_HIST_OUT_LEN(num) (0+4*(num))
+#define    MC_CMD_LTSSM_HIST_OUT_DATA_NUM(len) (((len)-0)/4)
+/* variable number of LTSSM values, as bytes. The history is read-to-clear. */
+#define       MC_CMD_LTSSM_HIST_OUT_DATA_OFST 0
+#define       MC_CMD_LTSSM_HIST_OUT_DATA_LEN 4
+#define       MC_CMD_LTSSM_HIST_OUT_DATA_MINNUM 0
+#define       MC_CMD_LTSSM_HIST_OUT_DATA_MAXNUM 63
+#define       MC_CMD_LTSSM_HIST_OUT_DATA_MAXNUM_MCDI2 255
+
+
+/***********************************/
+/* MC_CMD_DRV_ATTACH
+ * Inform MCPU that this port is managed on the host (i.e. driver active). For
+ * Huntington, also request the preferred datapath firmware to use if possible
+ * (it may not be possible for this request to be fulfilled; the driver must
+ * issue a subsequent MC_CMD_GET_CAPABILITIES command to determine which
+ * features are actually available). The FIRMWARE_ID field is ignored by older
+ * platforms.
+ */
+#define MC_CMD_DRV_ATTACH 0x1c
+#undef MC_CMD_0x1c_PRIVILEGE_CTG
+
+#define MC_CMD_0x1c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_DRV_ATTACH_IN msgrequest */
+#define    MC_CMD_DRV_ATTACH_IN_LEN 12
+/* new state to set if UPDATE=1 */
+#define       MC_CMD_DRV_ATTACH_IN_NEW_STATE_OFST 0
+#define       MC_CMD_DRV_ATTACH_IN_NEW_STATE_LEN 4
+#define        MC_CMD_DRV_ATTACH_OFST 0
+#define        MC_CMD_DRV_ATTACH_LBN 0
+#define        MC_CMD_DRV_ATTACH_WIDTH 1
+#define        MC_CMD_DRV_ATTACH_IN_ATTACH_OFST 0
+#define        MC_CMD_DRV_ATTACH_IN_ATTACH_LBN 0
+#define        MC_CMD_DRV_ATTACH_IN_ATTACH_WIDTH 1
+#define        MC_CMD_DRV_PREBOOT_OFST 0
+#define        MC_CMD_DRV_PREBOOT_LBN 1
+#define        MC_CMD_DRV_PREBOOT_WIDTH 1
+#define        MC_CMD_DRV_ATTACH_IN_PREBOOT_OFST 0
+#define        MC_CMD_DRV_ATTACH_IN_PREBOOT_LBN 1
+#define        MC_CMD_DRV_ATTACH_IN_PREBOOT_WIDTH 1
+#define        MC_CMD_DRV_ATTACH_IN_SUBVARIANT_AWARE_OFST 0
+#define        MC_CMD_DRV_ATTACH_IN_SUBVARIANT_AWARE_LBN 2
+#define        MC_CMD_DRV_ATTACH_IN_SUBVARIANT_AWARE_WIDTH 1
+#define        MC_CMD_DRV_ATTACH_IN_WANT_VI_SPREADING_OFST 0
+#define        MC_CMD_DRV_ATTACH_IN_WANT_VI_SPREADING_LBN 3
+#define        MC_CMD_DRV_ATTACH_IN_WANT_VI_SPREADING_WIDTH 1
+#define        MC_CMD_DRV_ATTACH_IN_WANT_V2_LINKCHANGES_OFST 0
+#define        MC_CMD_DRV_ATTACH_IN_WANT_V2_LINKCHANGES_LBN 4
+#define        MC_CMD_DRV_ATTACH_IN_WANT_V2_LINKCHANGES_WIDTH 1
+#define        MC_CMD_DRV_ATTACH_IN_WANT_RX_VI_SPREADING_INHIBIT_OFST 0
+#define        MC_CMD_DRV_ATTACH_IN_WANT_RX_VI_SPREADING_INHIBIT_LBN 5
+#define        MC_CMD_DRV_ATTACH_IN_WANT_RX_VI_SPREADING_INHIBIT_WIDTH 1
+#define        MC_CMD_DRV_ATTACH_IN_WANT_TX_ONLY_SPREADING_OFST 0
+#define        MC_CMD_DRV_ATTACH_IN_WANT_TX_ONLY_SPREADING_LBN 5
+#define        MC_CMD_DRV_ATTACH_IN_WANT_TX_ONLY_SPREADING_WIDTH 1
+/* 1 to set new state, or 0 to just report the existing state */
+#define       MC_CMD_DRV_ATTACH_IN_UPDATE_OFST 4
+#define       MC_CMD_DRV_ATTACH_IN_UPDATE_LEN 4
+/* preferred datapath firmware (for Huntington; ignored for Siena) */
+#define       MC_CMD_DRV_ATTACH_IN_FIRMWARE_ID_OFST 8
+#define       MC_CMD_DRV_ATTACH_IN_FIRMWARE_ID_LEN 4
+/* enum: Prefer to use full featured firmware */
+#define          MC_CMD_FW_FULL_FEATURED 0x0
+/* enum: Prefer to use firmware with fewer features but lower latency */
+#define          MC_CMD_FW_LOW_LATENCY 0x1
+/* enum: Prefer to use firmware for SolarCapture packed stream mode */
+#define          MC_CMD_FW_PACKED_STREAM 0x2
+/* enum: Prefer to use firmware with fewer features and simpler TX event
+ * batching but higher TX packet rate
+ */
+#define          MC_CMD_FW_HIGH_TX_RATE 0x3
+/* enum: Reserved value */
+#define          MC_CMD_FW_PACKED_STREAM_HASH_MODE_1 0x4
+/* enum: Prefer to use firmware with additional "rules engine" filtering
+ * support
+ */
+#define          MC_CMD_FW_RULES_ENGINE 0x5
+/* enum: Prefer to use firmware with additional DPDK support */
+#define          MC_CMD_FW_DPDK 0x6
+/* enum: Prefer to use "l3xudp" custom datapath firmware (see SF-119495-PD and
+ * bug69716)
+ */
+#define          MC_CMD_FW_L3XUDP 0x7
+/* enum: Requests that the MC keep whatever datapath firmware is currently
+ * running. It's used for test purposes, where we want to be able to shmboot
+ * special test firmware variants. This option is only recognised in eftest
+ * (i.e. non-production) builds.
+ */
+#define          MC_CMD_FW_KEEP_CURRENT_EFTEST_ONLY 0xfffffffe
+/* enum: Only this option is allowed for non-admin functions */
+#define          MC_CMD_FW_DONT_CARE 0xffffffff
+
+/* MC_CMD_DRV_ATTACH_IN_V2 msgrequest: Updated DRV_ATTACH to include driver
+ * version
+ */
+#define    MC_CMD_DRV_ATTACH_IN_V2_LEN 32
+/* new state to set if UPDATE=1 */
+#define       MC_CMD_DRV_ATTACH_IN_V2_NEW_STATE_OFST 0
+#define       MC_CMD_DRV_ATTACH_IN_V2_NEW_STATE_LEN 4
+/*             MC_CMD_DRV_ATTACH_OFST 0 */
+/*             MC_CMD_DRV_ATTACH_LBN 0 */
+/*             MC_CMD_DRV_ATTACH_WIDTH 1 */
+#define        MC_CMD_DRV_ATTACH_IN_V2_ATTACH_OFST 0
+#define        MC_CMD_DRV_ATTACH_IN_V2_ATTACH_LBN 0
+#define        MC_CMD_DRV_ATTACH_IN_V2_ATTACH_WIDTH 1
+/*             MC_CMD_DRV_PREBOOT_OFST 0 */
+/*             MC_CMD_DRV_PREBOOT_LBN 1 */
+/*             MC_CMD_DRV_PREBOOT_WIDTH 1 */
+#define        MC_CMD_DRV_ATTACH_IN_V2_PREBOOT_OFST 0
+#define        MC_CMD_DRV_ATTACH_IN_V2_PREBOOT_LBN 1
+#define        MC_CMD_DRV_ATTACH_IN_V2_PREBOOT_WIDTH 1
+#define        MC_CMD_DRV_ATTACH_IN_V2_SUBVARIANT_AWARE_OFST 0
+#define        MC_CMD_DRV_ATTACH_IN_V2_SUBVARIANT_AWARE_LBN 2
+#define        MC_CMD_DRV_ATTACH_IN_V2_SUBVARIANT_AWARE_WIDTH 1
+#define        MC_CMD_DRV_ATTACH_IN_V2_WANT_VI_SPREADING_OFST 0
+#define        MC_CMD_DRV_ATTACH_IN_V2_WANT_VI_SPREADING_LBN 3
+#define        MC_CMD_DRV_ATTACH_IN_V2_WANT_VI_SPREADING_WIDTH 1
+#define        MC_CMD_DRV_ATTACH_IN_V2_WANT_V2_LINKCHANGES_OFST 0
+#define        MC_CMD_DRV_ATTACH_IN_V2_WANT_V2_LINKCHANGES_LBN 4
+#define        MC_CMD_DRV_ATTACH_IN_V2_WANT_V2_LINKCHANGES_WIDTH 1
+#define        MC_CMD_DRV_ATTACH_IN_V2_WANT_RX_VI_SPREADING_INHIBIT_OFST 0
+#define        MC_CMD_DRV_ATTACH_IN_V2_WANT_RX_VI_SPREADING_INHIBIT_LBN 5
+#define        MC_CMD_DRV_ATTACH_IN_V2_WANT_RX_VI_SPREADING_INHIBIT_WIDTH 1
+#define        MC_CMD_DRV_ATTACH_IN_V2_WANT_TX_ONLY_SPREADING_OFST 0
+#define        MC_CMD_DRV_ATTACH_IN_V2_WANT_TX_ONLY_SPREADING_LBN 5
+#define        MC_CMD_DRV_ATTACH_IN_V2_WANT_TX_ONLY_SPREADING_WIDTH 1
+/* 1 to set new state, or 0 to just report the existing state */
+#define       MC_CMD_DRV_ATTACH_IN_V2_UPDATE_OFST 4
+#define       MC_CMD_DRV_ATTACH_IN_V2_UPDATE_LEN 4
+/* preferred datapath firmware (for Huntington; ignored for Siena) */
+#define       MC_CMD_DRV_ATTACH_IN_V2_FIRMWARE_ID_OFST 8
+#define       MC_CMD_DRV_ATTACH_IN_V2_FIRMWARE_ID_LEN 4
+/* enum: Prefer to use full featured firmware */
+/*               MC_CMD_FW_FULL_FEATURED 0x0 */
+/* enum: Prefer to use firmware with fewer features but lower latency */
+/*               MC_CMD_FW_LOW_LATENCY 0x1 */
+/* enum: Prefer to use firmware for SolarCapture packed stream mode */
+/*               MC_CMD_FW_PACKED_STREAM 0x2 */
+/* enum: Prefer to use firmware with fewer features and simpler TX event
+ * batching but higher TX packet rate
+ */
+/*               MC_CMD_FW_HIGH_TX_RATE 0x3 */
+/* enum: Reserved value */
+/*               MC_CMD_FW_PACKED_STREAM_HASH_MODE_1 0x4 */
+/* enum: Prefer to use firmware with additional "rules engine" filtering
+ * support
+ */
+/*               MC_CMD_FW_RULES_ENGINE 0x5 */
+/* enum: Prefer to use firmware with additional DPDK support */
+/*               MC_CMD_FW_DPDK 0x6 */
+/* enum: Prefer to use "l3xudp" custom datapath firmware (see SF-119495-PD and
+ * bug69716)
+ */
+/*               MC_CMD_FW_L3XUDP 0x7 */
+/* enum: Requests that the MC keep whatever datapath firmware is currently
+ * running. It's used for test purposes, where we want to be able to shmboot
+ * special test firmware variants. This option is only recognised in eftest
+ * (i.e. non-production) builds.
+ */
+/*               MC_CMD_FW_KEEP_CURRENT_EFTEST_ONLY 0xfffffffe */
+/* enum: Only this option is allowed for non-admin functions */
+/*               MC_CMD_FW_DONT_CARE 0xffffffff */
+/* Version of the driver to be reported by management protocols (e.g. NC-SI)
+ * handled by the NIC. This is a zero-terminated ASCII string.
+ */
+#define       MC_CMD_DRV_ATTACH_IN_V2_DRIVER_VERSION_OFST 12
+#define       MC_CMD_DRV_ATTACH_IN_V2_DRIVER_VERSION_LEN 20
+
+/* MC_CMD_DRV_ATTACH_OUT msgresponse */
+#define    MC_CMD_DRV_ATTACH_OUT_LEN 4
+/* previous or existing state, see the bitmask at NEW_STATE */
+#define       MC_CMD_DRV_ATTACH_OUT_OLD_STATE_OFST 0
+#define       MC_CMD_DRV_ATTACH_OUT_OLD_STATE_LEN 4
+
+/* MC_CMD_DRV_ATTACH_EXT_OUT msgresponse */
+#define    MC_CMD_DRV_ATTACH_EXT_OUT_LEN 8
+/* previous or existing state, see the bitmask at NEW_STATE */
+#define       MC_CMD_DRV_ATTACH_EXT_OUT_OLD_STATE_OFST 0
+#define       MC_CMD_DRV_ATTACH_EXT_OUT_OLD_STATE_LEN 4
+/* Flags associated with this function */
+#define       MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_OFST 4
+#define       MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_LEN 4
+/* enum: Labels the lowest-numbered function visible to the OS */
+#define          MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY 0x0
+/* enum: The function can control the link state of the physical port it is
+ * bound to.
+ */
+#define          MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL 0x1
+/* enum: The function can perform privileged operations */
+#define          MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED 0x2
+/* enum: The function does not have an active port associated with it. The port
+ * refers to the Sorrento external FPGA port.
+ */
+#define          MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_NO_ACTIVE_PORT 0x3
+/* enum: If set, indicates that VI spreading is currently enabled. Will always
+ * indicate the current state, regardless of the value in the WANT_VI_SPREADING
+ * input.
+ */
+#define          MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_VI_SPREADING_ENABLED 0x4
+/* enum: Used during development only. Should no longer be used. */
+#define          MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_RX_VI_SPREADING_INHIBITED 0x5
+/* enum: If set, indicates that TX only spreading is enabled. Even-numbered
+ * TXQs will use one engine, and odd-numbered TXQs will use the other. This
+ * also has the effect that only even-numbered RXQs will receive traffic.
+ */
+#define          MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TX_ONLY_VI_SPREADING_ENABLED 0x5
+
+
+/***********************************/
+/* MC_CMD_SHMUART
+ * Route UART output to circular buffer in shared memory instead.
+ */
+#define MC_CMD_SHMUART 0x1f
+
+/* MC_CMD_SHMUART_IN msgrequest */
+#define    MC_CMD_SHMUART_IN_LEN 4
+/* ??? */
+#define       MC_CMD_SHMUART_IN_FLAG_OFST 0
+#define       MC_CMD_SHMUART_IN_FLAG_LEN 4
+
+/* MC_CMD_SHMUART_OUT msgresponse */
+#define    MC_CMD_SHMUART_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_PORT_RESET
+ * Generic per-port reset. There is no equivalent for per-board reset. Locks
+ * required: None; Return code: 0, ETIME. NOTE: This command is deprecated -
+ * use MC_CMD_ENTITY_RESET instead.
+ */
+#define MC_CMD_PORT_RESET 0x20
+#undef MC_CMD_0x20_PRIVILEGE_CTG
+
+#define MC_CMD_0x20_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_PORT_RESET_IN msgrequest */
+#define    MC_CMD_PORT_RESET_IN_LEN 0
+
+/* MC_CMD_PORT_RESET_OUT msgresponse */
+#define    MC_CMD_PORT_RESET_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_ENTITY_RESET
+ * Generic per-resource reset. There is no equivalent for per-board reset.
+ * Locks required: None; Return code: 0, ETIME. NOTE: This command is an
+ * extended version of the deprecated MC_CMD_PORT_RESET with added fields.
+ */
+#define MC_CMD_ENTITY_RESET 0x20
+/*      MC_CMD_0x20_PRIVILEGE_CTG SRIOV_CTG_GENERAL */
+
+/* MC_CMD_ENTITY_RESET_IN msgrequest */
+#define    MC_CMD_ENTITY_RESET_IN_LEN 4
+/* Optional flags field. Omitting this will perform a "legacy" reset action
+ * (TBD).
+ */
+#define       MC_CMD_ENTITY_RESET_IN_FLAG_OFST 0
+#define       MC_CMD_ENTITY_RESET_IN_FLAG_LEN 4
+#define        MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_OFST 0
+#define        MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_LBN 0
+#define        MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_WIDTH 1
+
+/* MC_CMD_ENTITY_RESET_OUT msgresponse */
+#define    MC_CMD_ENTITY_RESET_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_PCIE_CREDITS
+ * Read instantaneous and minimum flow control thresholds.
+ */
+#define MC_CMD_PCIE_CREDITS 0x21
+
+/* MC_CMD_PCIE_CREDITS_IN msgrequest */
+#define    MC_CMD_PCIE_CREDITS_IN_LEN 8
+/* poll period. 0 is disabled */
+#define       MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_OFST 0
+#define       MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_LEN 4
+/* wipe statistics */
+#define       MC_CMD_PCIE_CREDITS_IN_WIPE_OFST 4
+#define       MC_CMD_PCIE_CREDITS_IN_WIPE_LEN 4
+
+/* MC_CMD_PCIE_CREDITS_OUT msgresponse */
+#define    MC_CMD_PCIE_CREDITS_OUT_LEN 16
+#define       MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_HDR_OFST 0
+#define       MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_HDR_LEN 2
+#define       MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_DATA_OFST 2
+#define       MC_CMD_PCIE_CREDITS_OUT_CURRENT_P_DATA_LEN 2
+#define       MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_HDR_OFST 4
+#define       MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_HDR_LEN 2
+#define       MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_DATA_OFST 6
+#define       MC_CMD_PCIE_CREDITS_OUT_CURRENT_NP_DATA_LEN 2
+#define       MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_HDR_OFST 8
+#define       MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_HDR_LEN 2
+#define       MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_DATA_OFST 10
+#define       MC_CMD_PCIE_CREDITS_OUT_MINIMUM_P_DATA_LEN 2
+#define       MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_HDR_OFST 12
+#define       MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_HDR_LEN 2
+#define       MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_DATA_OFST 14
+#define       MC_CMD_PCIE_CREDITS_OUT_MINIMUM_NP_DATA_LEN 2
+
+
+/***********************************/
+/* MC_CMD_RXD_MONITOR
+ * Get histogram of RX queue fill level.
+ */
+#define MC_CMD_RXD_MONITOR 0x22
+
+/* MC_CMD_RXD_MONITOR_IN msgrequest */
+#define    MC_CMD_RXD_MONITOR_IN_LEN 12
+#define       MC_CMD_RXD_MONITOR_IN_QID_OFST 0
+#define       MC_CMD_RXD_MONITOR_IN_QID_LEN 4
+#define       MC_CMD_RXD_MONITOR_IN_POLL_PERIOD_OFST 4
+#define       MC_CMD_RXD_MONITOR_IN_POLL_PERIOD_LEN 4
+#define       MC_CMD_RXD_MONITOR_IN_WIPE_OFST 8
+#define       MC_CMD_RXD_MONITOR_IN_WIPE_LEN 4
+
+/* MC_CMD_RXD_MONITOR_OUT msgresponse */
+#define    MC_CMD_RXD_MONITOR_OUT_LEN 80
+#define       MC_CMD_RXD_MONITOR_OUT_QID_OFST 0
+#define       MC_CMD_RXD_MONITOR_OUT_QID_LEN 4
+#define       MC_CMD_RXD_MONITOR_OUT_RING_FILL_OFST 4
+#define       MC_CMD_RXD_MONITOR_OUT_RING_FILL_LEN 4
+#define       MC_CMD_RXD_MONITOR_OUT_CACHE_FILL_OFST 8
+#define       MC_CMD_RXD_MONITOR_OUT_CACHE_FILL_LEN 4
+#define       MC_CMD_RXD_MONITOR_OUT_RING_LT_1_OFST 12
+#define       MC_CMD_RXD_MONITOR_OUT_RING_LT_1_LEN 4
+#define       MC_CMD_RXD_MONITOR_OUT_RING_LT_2_OFST 16
+#define       MC_CMD_RXD_MONITOR_OUT_RING_LT_2_LEN 4
+#define       MC_CMD_RXD_MONITOR_OUT_RING_LT_4_OFST 20
+#define       MC_CMD_RXD_MONITOR_OUT_RING_LT_4_LEN 4
+#define       MC_CMD_RXD_MONITOR_OUT_RING_LT_8_OFST 24
+#define       MC_CMD_RXD_MONITOR_OUT_RING_LT_8_LEN 4
+#define       MC_CMD_RXD_MONITOR_OUT_RING_LT_16_OFST 28
+#define       MC_CMD_RXD_MONITOR_OUT_RING_LT_16_LEN 4
+#define       MC_CMD_RXD_MONITOR_OUT_RING_LT_32_OFST 32
+#define       MC_CMD_RXD_MONITOR_OUT_RING_LT_32_LEN 4
+#define       MC_CMD_RXD_MONITOR_OUT_RING_LT_64_OFST 36
+#define       MC_CMD_RXD_MONITOR_OUT_RING_LT_64_LEN 4
+#define       MC_CMD_RXD_MONITOR_OUT_RING_LT_128_OFST 40
+#define       MC_CMD_RXD_MONITOR_OUT_RING_LT_128_LEN 4
+#define       MC_CMD_RXD_MONITOR_OUT_RING_LT_256_OFST 44
+#define       MC_CMD_RXD_MONITOR_OUT_RING_LT_256_LEN 4
+#define       MC_CMD_RXD_MONITOR_OUT_RING_GE_256_OFST 48
+#define       MC_CMD_RXD_MONITOR_OUT_RING_GE_256_LEN 4
+#define       MC_CMD_RXD_MONITOR_OUT_CACHE_LT_1_OFST 52
+#define       MC_CMD_RXD_MONITOR_OUT_CACHE_LT_1_LEN 4
+#define       MC_CMD_RXD_MONITOR_OUT_CACHE_LT_2_OFST 56
+#define       MC_CMD_RXD_MONITOR_OUT_CACHE_LT_2_LEN 4
+#define       MC_CMD_RXD_MONITOR_OUT_CACHE_LT_4_OFST 60
+#define       MC_CMD_RXD_MONITOR_OUT_CACHE_LT_4_LEN 4
+#define       MC_CMD_RXD_MONITOR_OUT_CACHE_LT_8_OFST 64
+#define       MC_CMD_RXD_MONITOR_OUT_CACHE_LT_8_LEN 4
+#define       MC_CMD_RXD_MONITOR_OUT_CACHE_LT_16_OFST 68
+#define       MC_CMD_RXD_MONITOR_OUT_CACHE_LT_16_LEN 4
+#define       MC_CMD_RXD_MONITOR_OUT_CACHE_LT_32_OFST 72
+#define       MC_CMD_RXD_MONITOR_OUT_CACHE_LT_32_LEN 4
+#define       MC_CMD_RXD_MONITOR_OUT_CACHE_GE_32_OFST 76
+#define       MC_CMD_RXD_MONITOR_OUT_CACHE_GE_32_LEN 4
+
+
+/***********************************/
+/* MC_CMD_PUTS
+ * Copy the given ASCII string out onto UART and/or out of the network port.
+ */
+#define MC_CMD_PUTS 0x23
+#undef MC_CMD_0x23_PRIVILEGE_CTG
+
+#define MC_CMD_0x23_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_PUTS_IN msgrequest */
+#define    MC_CMD_PUTS_IN_LENMIN 13
+#define    MC_CMD_PUTS_IN_LENMAX 252
+#define    MC_CMD_PUTS_IN_LENMAX_MCDI2 1020
+#define    MC_CMD_PUTS_IN_LEN(num) (12+1*(num))
+#define    MC_CMD_PUTS_IN_STRING_NUM(len) (((len)-12)/1)
+#define       MC_CMD_PUTS_IN_DEST_OFST 0
+#define       MC_CMD_PUTS_IN_DEST_LEN 4
+#define        MC_CMD_PUTS_IN_UART_OFST 0
+#define        MC_CMD_PUTS_IN_UART_LBN 0
+#define        MC_CMD_PUTS_IN_UART_WIDTH 1
+#define        MC_CMD_PUTS_IN_PORT_OFST 0
+#define        MC_CMD_PUTS_IN_PORT_LBN 1
+#define        MC_CMD_PUTS_IN_PORT_WIDTH 1
+#define       MC_CMD_PUTS_IN_DHOST_OFST 4
+#define       MC_CMD_PUTS_IN_DHOST_LEN 6
+#define       MC_CMD_PUTS_IN_STRING_OFST 12
+#define       MC_CMD_PUTS_IN_STRING_LEN 1
+#define       MC_CMD_PUTS_IN_STRING_MINNUM 1
+#define       MC_CMD_PUTS_IN_STRING_MAXNUM 240
+#define       MC_CMD_PUTS_IN_STRING_MAXNUM_MCDI2 1008
+
+/* MC_CMD_PUTS_OUT msgresponse */
+#define    MC_CMD_PUTS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_PHY_CFG
+ * Report PHY configuration. This guarantees to succeed even if the PHY is in a
+ * 'zombie' state. Locks required: None
+ */
+#define MC_CMD_GET_PHY_CFG 0x24
+#undef MC_CMD_0x24_PRIVILEGE_CTG
+
+#define MC_CMD_0x24_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PHY_CFG_IN msgrequest */
+#define    MC_CMD_GET_PHY_CFG_IN_LEN 0
+
+/* MC_CMD_GET_PHY_CFG_OUT msgresponse */
+#define    MC_CMD_GET_PHY_CFG_OUT_LEN 72
+/* flags */
+#define       MC_CMD_GET_PHY_CFG_OUT_FLAGS_OFST 0
+#define       MC_CMD_GET_PHY_CFG_OUT_FLAGS_LEN 4
+#define        MC_CMD_GET_PHY_CFG_OUT_PRESENT_OFST 0
+#define        MC_CMD_GET_PHY_CFG_OUT_PRESENT_LBN 0
+#define        MC_CMD_GET_PHY_CFG_OUT_PRESENT_WIDTH 1
+#define        MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_OFST 0
+#define        MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_LBN 1
+#define        MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_WIDTH 1
+#define        MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_OFST 0
+#define        MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_LBN 2
+#define        MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_WIDTH 1
+#define        MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_OFST 0
+#define        MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_LBN 3
+#define        MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_WIDTH 1
+#define        MC_CMD_GET_PHY_CFG_OUT_POWEROFF_OFST 0
+#define        MC_CMD_GET_PHY_CFG_OUT_POWEROFF_LBN 4
+#define        MC_CMD_GET_PHY_CFG_OUT_POWEROFF_WIDTH 1
+#define        MC_CMD_GET_PHY_CFG_OUT_TXDIS_OFST 0
+#define        MC_CMD_GET_PHY_CFG_OUT_TXDIS_LBN 5
+#define        MC_CMD_GET_PHY_CFG_OUT_TXDIS_WIDTH 1
+#define        MC_CMD_GET_PHY_CFG_OUT_BIST_OFST 0
+#define        MC_CMD_GET_PHY_CFG_OUT_BIST_LBN 6
+#define        MC_CMD_GET_PHY_CFG_OUT_BIST_WIDTH 1
+/* ?? */
+#define       MC_CMD_GET_PHY_CFG_OUT_TYPE_OFST 4
+#define       MC_CMD_GET_PHY_CFG_OUT_TYPE_LEN 4
+/* Bitmask of supported capabilities */
+#define       MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_OFST 8
+#define       MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_LEN 4
+#define        MC_CMD_PHY_CAP_10HDX_OFST 8
+#define        MC_CMD_PHY_CAP_10HDX_LBN 1
+#define        MC_CMD_PHY_CAP_10HDX_WIDTH 1
+#define        MC_CMD_PHY_CAP_10FDX_OFST 8
+#define        MC_CMD_PHY_CAP_10FDX_LBN 2
+#define        MC_CMD_PHY_CAP_10FDX_WIDTH 1
+#define        MC_CMD_PHY_CAP_100HDX_OFST 8
+#define        MC_CMD_PHY_CAP_100HDX_LBN 3
+#define        MC_CMD_PHY_CAP_100HDX_WIDTH 1
+#define        MC_CMD_PHY_CAP_100FDX_OFST 8
+#define        MC_CMD_PHY_CAP_100FDX_LBN 4
+#define        MC_CMD_PHY_CAP_100FDX_WIDTH 1
+#define        MC_CMD_PHY_CAP_1000HDX_OFST 8
+#define        MC_CMD_PHY_CAP_1000HDX_LBN 5
+#define        MC_CMD_PHY_CAP_1000HDX_WIDTH 1
+#define        MC_CMD_PHY_CAP_1000FDX_OFST 8
+#define        MC_CMD_PHY_CAP_1000FDX_LBN 6
+#define        MC_CMD_PHY_CAP_1000FDX_WIDTH 1
+#define        MC_CMD_PHY_CAP_10000FDX_OFST 8
+#define        MC_CMD_PHY_CAP_10000FDX_LBN 7
+#define        MC_CMD_PHY_CAP_10000FDX_WIDTH 1
+#define        MC_CMD_PHY_CAP_PAUSE_OFST 8
+#define        MC_CMD_PHY_CAP_PAUSE_LBN 8
+#define        MC_CMD_PHY_CAP_PAUSE_WIDTH 1
+#define        MC_CMD_PHY_CAP_ASYM_OFST 8
+#define        MC_CMD_PHY_CAP_ASYM_LBN 9
+#define        MC_CMD_PHY_CAP_ASYM_WIDTH 1
+#define        MC_CMD_PHY_CAP_AN_OFST 8
+#define        MC_CMD_PHY_CAP_AN_LBN 10
+#define        MC_CMD_PHY_CAP_AN_WIDTH 1
+#define        MC_CMD_PHY_CAP_40000FDX_OFST 8
+#define        MC_CMD_PHY_CAP_40000FDX_LBN 11
+#define        MC_CMD_PHY_CAP_40000FDX_WIDTH 1
+#define        MC_CMD_PHY_CAP_DDM_OFST 8
+#define        MC_CMD_PHY_CAP_DDM_LBN 12
+#define        MC_CMD_PHY_CAP_DDM_WIDTH 1
+#define        MC_CMD_PHY_CAP_100000FDX_OFST 8
+#define        MC_CMD_PHY_CAP_100000FDX_LBN 13
+#define        MC_CMD_PHY_CAP_100000FDX_WIDTH 1
+#define        MC_CMD_PHY_CAP_25000FDX_OFST 8
+#define        MC_CMD_PHY_CAP_25000FDX_LBN 14
+#define        MC_CMD_PHY_CAP_25000FDX_WIDTH 1
+#define        MC_CMD_PHY_CAP_50000FDX_OFST 8
+#define        MC_CMD_PHY_CAP_50000FDX_LBN 15
+#define        MC_CMD_PHY_CAP_50000FDX_WIDTH 1
+#define        MC_CMD_PHY_CAP_BASER_FEC_OFST 8
+#define        MC_CMD_PHY_CAP_BASER_FEC_LBN 16
+#define        MC_CMD_PHY_CAP_BASER_FEC_WIDTH 1
+#define        MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_OFST 8
+#define        MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_LBN 17
+#define        MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_WIDTH 1
+#define        MC_CMD_PHY_CAP_RS_FEC_OFST 8
+#define        MC_CMD_PHY_CAP_RS_FEC_LBN 18
+#define        MC_CMD_PHY_CAP_RS_FEC_WIDTH 1
+#define        MC_CMD_PHY_CAP_RS_FEC_REQUESTED_OFST 8
+#define        MC_CMD_PHY_CAP_RS_FEC_REQUESTED_LBN 19
+#define        MC_CMD_PHY_CAP_RS_FEC_REQUESTED_WIDTH 1
+#define        MC_CMD_PHY_CAP_25G_BASER_FEC_OFST 8
+#define        MC_CMD_PHY_CAP_25G_BASER_FEC_LBN 20
+#define        MC_CMD_PHY_CAP_25G_BASER_FEC_WIDTH 1
+#define        MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_OFST 8
+#define        MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_LBN 21
+#define        MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_WIDTH 1
+/* ?? */
+#define       MC_CMD_GET_PHY_CFG_OUT_CHANNEL_OFST 12
+#define       MC_CMD_GET_PHY_CFG_OUT_CHANNEL_LEN 4
+/* ?? */
+#define       MC_CMD_GET_PHY_CFG_OUT_PRT_OFST 16
+#define       MC_CMD_GET_PHY_CFG_OUT_PRT_LEN 4
+/* ?? */
+#define       MC_CMD_GET_PHY_CFG_OUT_STATS_MASK_OFST 20
+#define       MC_CMD_GET_PHY_CFG_OUT_STATS_MASK_LEN 4
+/* ?? */
+#define       MC_CMD_GET_PHY_CFG_OUT_NAME_OFST 24
+#define       MC_CMD_GET_PHY_CFG_OUT_NAME_LEN 20
+/* ?? */
+#define       MC_CMD_GET_PHY_CFG_OUT_MEDIA_TYPE_OFST 44
+#define       MC_CMD_GET_PHY_CFG_OUT_MEDIA_TYPE_LEN 4
+/* enum: Xaui. */
+#define          MC_CMD_MEDIA_XAUI 0x1
+/* enum: CX4. */
+#define          MC_CMD_MEDIA_CX4 0x2
+/* enum: KX4. */
+#define          MC_CMD_MEDIA_KX4 0x3
+/* enum: XFP Far. */
+#define          MC_CMD_MEDIA_XFP 0x4
+/* enum: SFP+. */
+#define          MC_CMD_MEDIA_SFP_PLUS 0x5
+/* enum: 10GBaseT. */
+#define          MC_CMD_MEDIA_BASE_T 0x6
+/* enum: QSFP+. */
+#define          MC_CMD_MEDIA_QSFP_PLUS 0x7
+#define       MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_OFST 48
+#define       MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_LEN 4
+/* enum: Native clause 22 */
+#define          MC_CMD_MMD_CLAUSE22 0x0
+#define          MC_CMD_MMD_CLAUSE45_PMAPMD 0x1 /* enum */
+#define          MC_CMD_MMD_CLAUSE45_WIS 0x2 /* enum */
+#define          MC_CMD_MMD_CLAUSE45_PCS 0x3 /* enum */
+#define          MC_CMD_MMD_CLAUSE45_PHYXS 0x4 /* enum */
+#define          MC_CMD_MMD_CLAUSE45_DTEXS 0x5 /* enum */
+#define          MC_CMD_MMD_CLAUSE45_TC 0x6 /* enum */
+#define          MC_CMD_MMD_CLAUSE45_AN 0x7 /* enum */
+/* enum: Clause22 proxied over clause45 by PHY. */
+#define          MC_CMD_MMD_CLAUSE45_C22EXT 0x1d
+#define          MC_CMD_MMD_CLAUSE45_VEND1 0x1e /* enum */
+#define          MC_CMD_MMD_CLAUSE45_VEND2 0x1f /* enum */
+#define       MC_CMD_GET_PHY_CFG_OUT_REVISION_OFST 52
+#define       MC_CMD_GET_PHY_CFG_OUT_REVISION_LEN 20
+
+
+/***********************************/
+/* MC_CMD_START_BIST
+ * Start a BIST test on the PHY. Locks required: PHY_LOCK if doing a PHY BIST
+ * Return code: 0, EINVAL, EACCES (if PHY_LOCK is not held)
+ */
+#define MC_CMD_START_BIST 0x25
+#undef MC_CMD_0x25_PRIVILEGE_CTG
+
+#define MC_CMD_0x25_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_START_BIST_IN msgrequest */
+#define    MC_CMD_START_BIST_IN_LEN 4
+/* Type of test. */
+#define       MC_CMD_START_BIST_IN_TYPE_OFST 0
+#define       MC_CMD_START_BIST_IN_TYPE_LEN 4
+/* enum: Run the PHY's short cable BIST. */
+#define          MC_CMD_PHY_BIST_CABLE_SHORT 0x1
+/* enum: Run the PHY's long cable BIST. */
+#define          MC_CMD_PHY_BIST_CABLE_LONG 0x2
+/* enum: Run BIST on the currently selected BPX Serdes (XAUI or XFI) . */
+#define          MC_CMD_BPX_SERDES_BIST 0x3
+/* enum: Run the MC loopback tests. */
+#define          MC_CMD_MC_LOOPBACK_BIST 0x4
+/* enum: Run the PHY's standard BIST. */
+#define          MC_CMD_PHY_BIST 0x5
+/* enum: Run MC RAM test. */
+#define          MC_CMD_MC_MEM_BIST 0x6
+/* enum: Run Port RAM test. */
+#define          MC_CMD_PORT_MEM_BIST 0x7
+/* enum: Run register test. */
+#define          MC_CMD_REG_BIST 0x8
+
+/* MC_CMD_START_BIST_OUT msgresponse */
+#define    MC_CMD_START_BIST_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_POLL_BIST
+ * Poll for BIST completion. Returns a single status code, and optionally some
+ * PHY specific bist output. The driver should only consume the BIST output
+ * after validating OUTLEN and MC_CMD_GET_PHY_CFG.TYPE. If a driver can't
+ * successfully parse the BIST output, it should still respect the pass/Fail in
+ * OUT.RESULT. Locks required: PHY_LOCK if doing a PHY BIST. Return code: 0,
+ * EACCES (if PHY_LOCK is not held).
+ */
+#define MC_CMD_POLL_BIST 0x26
+#undef MC_CMD_0x26_PRIVILEGE_CTG
+
+#define MC_CMD_0x26_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_POLL_BIST_IN msgrequest */
+#define    MC_CMD_POLL_BIST_IN_LEN 0
+
+/* MC_CMD_POLL_BIST_OUT msgresponse */
+#define    MC_CMD_POLL_BIST_OUT_LEN 8
+/* result */
+#define       MC_CMD_POLL_BIST_OUT_RESULT_OFST 0
+#define       MC_CMD_POLL_BIST_OUT_RESULT_LEN 4
+/* enum: Running. */
+#define          MC_CMD_POLL_BIST_RUNNING 0x1
+/* enum: Passed. */
+#define          MC_CMD_POLL_BIST_PASSED 0x2
+/* enum: Failed. */
+#define          MC_CMD_POLL_BIST_FAILED 0x3
+/* enum: Timed-out. */
+#define          MC_CMD_POLL_BIST_TIMEOUT 0x4
+#define       MC_CMD_POLL_BIST_OUT_PRIVATE_OFST 4
+#define       MC_CMD_POLL_BIST_OUT_PRIVATE_LEN 4
+
+/* MC_CMD_POLL_BIST_OUT_SFT9001 msgresponse */
+#define    MC_CMD_POLL_BIST_OUT_SFT9001_LEN 36
+/* result */
+/*            MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
+/*            MC_CMD_POLL_BIST_OUT_RESULT_LEN 4 */
+/*            Enum values, see field(s): */
+/*               MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
+#define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_OFST 4
+#define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_LEN 4
+#define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_OFST 8
+#define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_LEN 4
+#define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_OFST 12
+#define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_LEN 4
+#define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_OFST 16
+#define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_LEN 4
+/* Status of each channel A */
+#define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_OFST 20
+#define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_LEN 4
+/* enum: Ok. */
+#define          MC_CMD_POLL_BIST_SFT9001_PAIR_OK 0x1
+/* enum: Open. */
+#define          MC_CMD_POLL_BIST_SFT9001_PAIR_OPEN 0x2
+/* enum: Intra-pair short. */
+#define          MC_CMD_POLL_BIST_SFT9001_INTRA_PAIR_SHORT 0x3
+/* enum: Inter-pair short. */
+#define          MC_CMD_POLL_BIST_SFT9001_INTER_PAIR_SHORT 0x4
+/* enum: Busy. */
+#define          MC_CMD_POLL_BIST_SFT9001_PAIR_BUSY 0x9
+/* Status of each channel B */
+#define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_OFST 24
+#define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_LEN 4
+/*            Enum values, see field(s): */
+/*               CABLE_STATUS_A */
+/* Status of each channel C */
+#define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_OFST 28
+#define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_LEN 4
+/*            Enum values, see field(s): */
+/*               CABLE_STATUS_A */
+/* Status of each channel D */
+#define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_OFST 32
+#define       MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_LEN 4
+/*            Enum values, see field(s): */
+/*               CABLE_STATUS_A */
+
+/* MC_CMD_POLL_BIST_OUT_MRSFP msgresponse */
+#define    MC_CMD_POLL_BIST_OUT_MRSFP_LEN 8
+/* result */
+/*            MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
+/*            MC_CMD_POLL_BIST_OUT_RESULT_LEN 4 */
+/*            Enum values, see field(s): */
+/*               MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
+#define       MC_CMD_POLL_BIST_OUT_MRSFP_TEST_OFST 4
+#define       MC_CMD_POLL_BIST_OUT_MRSFP_TEST_LEN 4
+/* enum: Complete. */
+#define          MC_CMD_POLL_BIST_MRSFP_TEST_COMPLETE 0x0
+/* enum: Bus switch off I2C write. */
+#define          MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_WRITE 0x1
+/* enum: Bus switch off I2C no access IO exp. */
+#define          MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_IO_EXP 0x2
+/* enum: Bus switch off I2C no access module. */
+#define          MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_MODULE 0x3
+/* enum: IO exp I2C configure. */
+#define          MC_CMD_POLL_BIST_MRSFP_TEST_IO_EXP_I2C_CONFIGURE 0x4
+/* enum: Bus switch I2C no cross talk. */
+#define          MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_I2C_NO_CROSSTALK 0x5
+/* enum: Module presence. */
+#define          MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_PRESENCE 0x6
+/* enum: Module ID I2C access. */
+#define          MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_I2C_ACCESS 0x7
+/* enum: Module ID sane value. */
+#define          MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_SANE_VALUE 0x8
+
+/* MC_CMD_POLL_BIST_OUT_MEM msgresponse */
+#define    MC_CMD_POLL_BIST_OUT_MEM_LEN 36
+/* result */
+/*            MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
+/*            MC_CMD_POLL_BIST_OUT_RESULT_LEN 4 */
+/*            Enum values, see field(s): */
+/*               MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
+#define       MC_CMD_POLL_BIST_OUT_MEM_TEST_OFST 4
+#define       MC_CMD_POLL_BIST_OUT_MEM_TEST_LEN 4
+/* enum: Test has completed. */
+#define          MC_CMD_POLL_BIST_MEM_COMPLETE 0x0
+/* enum: RAM test - walk ones. */
+#define          MC_CMD_POLL_BIST_MEM_MEM_WALK_ONES 0x1
+/* enum: RAM test - walk zeros. */
+#define          MC_CMD_POLL_BIST_MEM_MEM_WALK_ZEROS 0x2
+/* enum: RAM test - walking inversions zeros/ones. */
+#define          MC_CMD_POLL_BIST_MEM_MEM_INV_ZERO_ONE 0x3
+/* enum: RAM test - walking inversions checkerboard. */
+#define          MC_CMD_POLL_BIST_MEM_MEM_INV_CHKBOARD 0x4
+/* enum: Register test - set / clear individual bits. */
+#define          MC_CMD_POLL_BIST_MEM_REG 0x5
+/* enum: ECC error detected. */
+#define          MC_CMD_POLL_BIST_MEM_ECC 0x6
+/* Failure address, only valid if result is POLL_BIST_FAILED */
+#define       MC_CMD_POLL_BIST_OUT_MEM_ADDR_OFST 8
+#define       MC_CMD_POLL_BIST_OUT_MEM_ADDR_LEN 4
+/* Bus or address space to which the failure address corresponds */
+#define       MC_CMD_POLL_BIST_OUT_MEM_BUS_OFST 12
+#define       MC_CMD_POLL_BIST_OUT_MEM_BUS_LEN 4
+/* enum: MC MIPS bus. */
+#define          MC_CMD_POLL_BIST_MEM_BUS_MC 0x0
+/* enum: CSR IREG bus. */
+#define          MC_CMD_POLL_BIST_MEM_BUS_CSR 0x1
+/* enum: RX0 DPCPU bus. */
+#define          MC_CMD_POLL_BIST_MEM_BUS_DPCPU_RX 0x2
+/* enum: TX0 DPCPU bus. */
+#define          MC_CMD_POLL_BIST_MEM_BUS_DPCPU_TX0 0x3
+/* enum: TX1 DPCPU bus. */
+#define          MC_CMD_POLL_BIST_MEM_BUS_DPCPU_TX1 0x4
+/* enum: RX0 DICPU bus. */
+#define          MC_CMD_POLL_BIST_MEM_BUS_DICPU_RX 0x5
+/* enum: TX DICPU bus. */
+#define          MC_CMD_POLL_BIST_MEM_BUS_DICPU_TX 0x6
+/* enum: RX1 DPCPU bus. */
+#define          MC_CMD_POLL_BIST_MEM_BUS_DPCPU_RX1 0x7
+/* enum: RX1 DICPU bus. */
+#define          MC_CMD_POLL_BIST_MEM_BUS_DICPU_RX1 0x8
+/* Pattern written to RAM / register */
+#define       MC_CMD_POLL_BIST_OUT_MEM_EXPECT_OFST 16
+#define       MC_CMD_POLL_BIST_OUT_MEM_EXPECT_LEN 4
+/* Actual value read from RAM / register */
+#define       MC_CMD_POLL_BIST_OUT_MEM_ACTUAL_OFST 20
+#define       MC_CMD_POLL_BIST_OUT_MEM_ACTUAL_LEN 4
+/* ECC error mask */
+#define       MC_CMD_POLL_BIST_OUT_MEM_ECC_OFST 24
+#define       MC_CMD_POLL_BIST_OUT_MEM_ECC_LEN 4
+/* ECC parity error mask */
+#define       MC_CMD_POLL_BIST_OUT_MEM_ECC_PARITY_OFST 28
+#define       MC_CMD_POLL_BIST_OUT_MEM_ECC_PARITY_LEN 4
+/* ECC fatal error mask */
+#define       MC_CMD_POLL_BIST_OUT_MEM_ECC_FATAL_OFST 32
+#define       MC_CMD_POLL_BIST_OUT_MEM_ECC_FATAL_LEN 4
+
+
+/***********************************/
+/* MC_CMD_FLUSH_RX_QUEUES
+ * Flush receive queue(s). If SRIOV is enabled (via MC_CMD_SRIOV), then RXQ
+ * flushes should be initiated via this MCDI operation, rather than via
+ * directly writing FLUSH_CMD.
+ *
+ * The flush is completed (either done/fail) asynchronously (after this command
+ * returns). The driver must still wait for flush done/failure events as usual.
+ */
+#define MC_CMD_FLUSH_RX_QUEUES 0x27
+
+/* MC_CMD_FLUSH_RX_QUEUES_IN msgrequest */
+#define    MC_CMD_FLUSH_RX_QUEUES_IN_LENMIN 4
+#define    MC_CMD_FLUSH_RX_QUEUES_IN_LENMAX 252
+#define    MC_CMD_FLUSH_RX_QUEUES_IN_LENMAX_MCDI2 1020
+#define    MC_CMD_FLUSH_RX_QUEUES_IN_LEN(num) (0+4*(num))
+#define    MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_NUM(len) (((len)-0)/4)
+#define       MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_OFST 0
+#define       MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_LEN 4
+#define       MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MINNUM 1
+#define       MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM 63
+#define       MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM_MCDI2 255
+
+/* MC_CMD_FLUSH_RX_QUEUES_OUT msgresponse */
+#define    MC_CMD_FLUSH_RX_QUEUES_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_LOOPBACK_MODES
+ * Returns a bitmask of loopback modes available at each speed.
+ */
+#define MC_CMD_GET_LOOPBACK_MODES 0x28
+#undef MC_CMD_0x28_PRIVILEGE_CTG
+
+#define MC_CMD_0x28_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_LOOPBACK_MODES_IN msgrequest */
+#define    MC_CMD_GET_LOOPBACK_MODES_IN_LEN 0
+
+/* MC_CMD_GET_LOOPBACK_MODES_OUT msgresponse */
+#define    MC_CMD_GET_LOOPBACK_MODES_OUT_LEN 40
+/* Supported loopbacks. */
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_100M_OFST 0
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LEN 8
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LO_OFST 0
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_100M_HI_OFST 4
+/* enum: None. */
+#define          MC_CMD_LOOPBACK_NONE 0x0
+/* enum: Data. */
+#define          MC_CMD_LOOPBACK_DATA 0x1
+/* enum: GMAC. */
+#define          MC_CMD_LOOPBACK_GMAC 0x2
+/* enum: XGMII. */
+#define          MC_CMD_LOOPBACK_XGMII 0x3
+/* enum: XGXS. */
+#define          MC_CMD_LOOPBACK_XGXS 0x4
+/* enum: XAUI. */
+#define          MC_CMD_LOOPBACK_XAUI 0x5
+/* enum: GMII. */
+#define          MC_CMD_LOOPBACK_GMII 0x6
+/* enum: SGMII. */
+#define          MC_CMD_LOOPBACK_SGMII 0x7
+/* enum: XGBR. */
+#define          MC_CMD_LOOPBACK_XGBR 0x8
+/* enum: XFI. */
+#define          MC_CMD_LOOPBACK_XFI 0x9
+/* enum: XAUI Far. */
+#define          MC_CMD_LOOPBACK_XAUI_FAR 0xa
+/* enum: GMII Far. */
+#define          MC_CMD_LOOPBACK_GMII_FAR 0xb
+/* enum: SGMII Far. */
+#define          MC_CMD_LOOPBACK_SGMII_FAR 0xc
+/* enum: XFI Far. */
+#define          MC_CMD_LOOPBACK_XFI_FAR 0xd
+/* enum: GPhy. */
+#define          MC_CMD_LOOPBACK_GPHY 0xe
+/* enum: PhyXS. */
+#define          MC_CMD_LOOPBACK_PHYXS 0xf
+/* enum: PCS. */
+#define          MC_CMD_LOOPBACK_PCS 0x10
+/* enum: PMA-PMD. */
+#define          MC_CMD_LOOPBACK_PMAPMD 0x11
+/* enum: Cross-Port. */
+#define          MC_CMD_LOOPBACK_XPORT 0x12
+/* enum: XGMII-Wireside. */
+#define          MC_CMD_LOOPBACK_XGMII_WS 0x13
+/* enum: XAUI Wireside. */
+#define          MC_CMD_LOOPBACK_XAUI_WS 0x14
+/* enum: XAUI Wireside Far. */
+#define          MC_CMD_LOOPBACK_XAUI_WS_FAR 0x15
+/* enum: XAUI Wireside near. */
+#define          MC_CMD_LOOPBACK_XAUI_WS_NEAR 0x16
+/* enum: GMII Wireside. */
+#define          MC_CMD_LOOPBACK_GMII_WS 0x17
+/* enum: XFI Wireside. */
+#define          MC_CMD_LOOPBACK_XFI_WS 0x18
+/* enum: XFI Wireside Far. */
+#define          MC_CMD_LOOPBACK_XFI_WS_FAR 0x19
+/* enum: PhyXS Wireside. */
+#define          MC_CMD_LOOPBACK_PHYXS_WS 0x1a
+/* enum: PMA lanes MAC-Serdes. */
+#define          MC_CMD_LOOPBACK_PMA_INT 0x1b
+/* enum: KR Serdes Parallel (Encoder). */
+#define          MC_CMD_LOOPBACK_SD_NEAR 0x1c
+/* enum: KR Serdes Serial. */
+#define          MC_CMD_LOOPBACK_SD_FAR 0x1d
+/* enum: PMA lanes MAC-Serdes Wireside. */
+#define          MC_CMD_LOOPBACK_PMA_INT_WS 0x1e
+/* enum: KR Serdes Parallel Wireside (Full PCS). */
+#define          MC_CMD_LOOPBACK_SD_FEP2_WS 0x1f
+/* enum: KR Serdes Parallel Wireside (Sym Aligner to TX). */
+#define          MC_CMD_LOOPBACK_SD_FEP1_5_WS 0x20
+/* enum: KR Serdes Parallel Wireside (Deserializer to Serializer). */
+#define          MC_CMD_LOOPBACK_SD_FEP_WS 0x21
+/* enum: KR Serdes Serial Wireside. */
+#define          MC_CMD_LOOPBACK_SD_FES_WS 0x22
+/* enum: Near side of AOE Siena side port */
+#define          MC_CMD_LOOPBACK_AOE_INT_NEAR 0x23
+/* enum: Medford Wireside datapath loopback */
+#define          MC_CMD_LOOPBACK_DATA_WS 0x24
+/* enum: Force link up without setting up any physical loopback (snapper use
+ * only)
+ */
+#define          MC_CMD_LOOPBACK_FORCE_EXT_LINK 0x25
+/* Supported loopbacks. */
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_1G_OFST 8
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LEN 8
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LO_OFST 8
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_1G_HI_OFST 12
+/*            Enum values, see field(s): */
+/*               100M */
+/* Supported loopbacks. */
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_10G_OFST 16
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LEN 8
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LO_OFST 16
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_10G_HI_OFST 20
+/*            Enum values, see field(s): */
+/*               100M */
+/* Supported loopbacks. */
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_OFST 24
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LEN 8
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LO_OFST 24
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_HI_OFST 28
+/*            Enum values, see field(s): */
+/*               100M */
+/* Supported loopbacks. */
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_40G_OFST 32
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LEN 8
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LO_OFST 32
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_40G_HI_OFST 36
+/*            Enum values, see field(s): */
+/*               100M */
+
+/* MC_CMD_GET_LOOPBACK_MODES_OUT_V2 msgresponse: Supported loopback modes for
+ * newer NICs with 25G/50G/100G support
+ */
+#define    MC_CMD_GET_LOOPBACK_MODES_OUT_V2_LEN 64
+/* Supported loopbacks. */
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_OFST 0
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_LEN 8
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_LO_OFST 0
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100M_HI_OFST 4
+/* enum: None. */
+/*               MC_CMD_LOOPBACK_NONE 0x0 */
+/* enum: Data. */
+/*               MC_CMD_LOOPBACK_DATA 0x1 */
+/* enum: GMAC. */
+/*               MC_CMD_LOOPBACK_GMAC 0x2 */
+/* enum: XGMII. */
+/*               MC_CMD_LOOPBACK_XGMII 0x3 */
+/* enum: XGXS. */
+/*               MC_CMD_LOOPBACK_XGXS 0x4 */
+/* enum: XAUI. */
+/*               MC_CMD_LOOPBACK_XAUI 0x5 */
+/* enum: GMII. */
+/*               MC_CMD_LOOPBACK_GMII 0x6 */
+/* enum: SGMII. */
+/*               MC_CMD_LOOPBACK_SGMII 0x7 */
+/* enum: XGBR. */
+/*               MC_CMD_LOOPBACK_XGBR 0x8 */
+/* enum: XFI. */
+/*               MC_CMD_LOOPBACK_XFI 0x9 */
+/* enum: XAUI Far. */
+/*               MC_CMD_LOOPBACK_XAUI_FAR 0xa */
+/* enum: GMII Far. */
+/*               MC_CMD_LOOPBACK_GMII_FAR 0xb */
+/* enum: SGMII Far. */
+/*               MC_CMD_LOOPBACK_SGMII_FAR 0xc */
+/* enum: XFI Far. */
+/*               MC_CMD_LOOPBACK_XFI_FAR 0xd */
+/* enum: GPhy. */
+/*               MC_CMD_LOOPBACK_GPHY 0xe */
+/* enum: PhyXS. */
+/*               MC_CMD_LOOPBACK_PHYXS 0xf */
+/* enum: PCS. */
+/*               MC_CMD_LOOPBACK_PCS 0x10 */
+/* enum: PMA-PMD. */
+/*               MC_CMD_LOOPBACK_PMAPMD 0x11 */
+/* enum: Cross-Port. */
+/*               MC_CMD_LOOPBACK_XPORT 0x12 */
+/* enum: XGMII-Wireside. */
+/*               MC_CMD_LOOPBACK_XGMII_WS 0x13 */
+/* enum: XAUI Wireside. */
+/*               MC_CMD_LOOPBACK_XAUI_WS 0x14 */
+/* enum: XAUI Wireside Far. */
+/*               MC_CMD_LOOPBACK_XAUI_WS_FAR 0x15 */
+/* enum: XAUI Wireside near. */
+/*               MC_CMD_LOOPBACK_XAUI_WS_NEAR 0x16 */
+/* enum: GMII Wireside. */
+/*               MC_CMD_LOOPBACK_GMII_WS 0x17 */
+/* enum: XFI Wireside. */
+/*               MC_CMD_LOOPBACK_XFI_WS 0x18 */
+/* enum: XFI Wireside Far. */
+/*               MC_CMD_LOOPBACK_XFI_WS_FAR 0x19 */
+/* enum: PhyXS Wireside. */
+/*               MC_CMD_LOOPBACK_PHYXS_WS 0x1a */
+/* enum: PMA lanes MAC-Serdes. */
+/*               MC_CMD_LOOPBACK_PMA_INT 0x1b */
+/* enum: KR Serdes Parallel (Encoder). */
+/*               MC_CMD_LOOPBACK_SD_NEAR 0x1c */
+/* enum: KR Serdes Serial. */
+/*               MC_CMD_LOOPBACK_SD_FAR 0x1d */
+/* enum: PMA lanes MAC-Serdes Wireside. */
+/*               MC_CMD_LOOPBACK_PMA_INT_WS 0x1e */
+/* enum: KR Serdes Parallel Wireside (Full PCS). */
+/*               MC_CMD_LOOPBACK_SD_FEP2_WS 0x1f */
+/* enum: KR Serdes Parallel Wireside (Sym Aligner to TX). */
+/*               MC_CMD_LOOPBACK_SD_FEP1_5_WS 0x20 */
+/* enum: KR Serdes Parallel Wireside (Deserializer to Serializer). */
+/*               MC_CMD_LOOPBACK_SD_FEP_WS 0x21 */
+/* enum: KR Serdes Serial Wireside. */
+/*               MC_CMD_LOOPBACK_SD_FES_WS 0x22 */
+/* enum: Near side of AOE Siena side port */
+/*               MC_CMD_LOOPBACK_AOE_INT_NEAR 0x23 */
+/* enum: Medford Wireside datapath loopback */
+/*               MC_CMD_LOOPBACK_DATA_WS 0x24 */
+/* enum: Force link up without setting up any physical loopback (snapper use
+ * only)
+ */
+/*               MC_CMD_LOOPBACK_FORCE_EXT_LINK 0x25 */
+/* Supported loopbacks. */
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_OFST 8
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_LEN 8
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_LO_OFST 8
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_1G_HI_OFST 12
+/*            Enum values, see field(s): */
+/*               100M */
+/* Supported loopbacks. */
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_OFST 16
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_LEN 8
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_LO_OFST 16
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_10G_HI_OFST 20
+/*            Enum values, see field(s): */
+/*               100M */
+/* Supported loopbacks. */
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_OFST 24
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_LEN 8
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_LO_OFST 24
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_SUGGESTED_HI_OFST 28
+/*            Enum values, see field(s): */
+/*               100M */
+/* Supported loopbacks. */
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_OFST 32
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_LEN 8
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_LO_OFST 32
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_40G_HI_OFST 36
+/*            Enum values, see field(s): */
+/*               100M */
+/* Supported 25G loopbacks. */
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_OFST 40
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_LEN 8
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_LO_OFST 40
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_HI_OFST 44
+/*            Enum values, see field(s): */
+/*               100M */
+/* Supported 50 loopbacks. */
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_OFST 48
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_LEN 8
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_LO_OFST 48
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_HI_OFST 52
+/*            Enum values, see field(s): */
+/*               100M */
+/* Supported 100G loopbacks. */
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_OFST 56
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_LEN 8
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_LO_OFST 56
+#define       MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_HI_OFST 60
+/*            Enum values, see field(s): */
+/*               100M */
+
+/* AN_TYPE structuredef: Auto-negotiation types defined in IEEE802.3 */
+#define    AN_TYPE_LEN 4
+#define       AN_TYPE_TYPE_OFST 0
+#define       AN_TYPE_TYPE_LEN 4
+/* enum: None, AN disabled or not supported */
+#define          MC_CMD_AN_NONE 0x0
+/* enum: Clause 28 - BASE-T */
+#define          MC_CMD_AN_CLAUSE28 0x1
+/* enum: Clause 37 - BASE-X */
+#define          MC_CMD_AN_CLAUSE37 0x2
+/* enum: Clause 73 - BASE-R startup protocol for backplane and copper cable
+ * assemblies. Includes Clause 72/Clause 92 link-training.
+ */
+#define          MC_CMD_AN_CLAUSE73 0x3
+#define       AN_TYPE_TYPE_LBN 0
+#define       AN_TYPE_TYPE_WIDTH 32
+
+/* FEC_TYPE structuredef: Forward error correction types defined in IEEE802.3
+ */
+#define    FEC_TYPE_LEN 4
+#define       FEC_TYPE_TYPE_OFST 0
+#define       FEC_TYPE_TYPE_LEN 4
+/* enum: No FEC */
+#define          MC_CMD_FEC_NONE 0x0
+/* enum: Clause 74 BASE-R FEC (a.k.a Firecode) */
+#define          MC_CMD_FEC_BASER 0x1
+/* enum: Clause 91/Clause 108 Reed-Solomon FEC */
+#define          MC_CMD_FEC_RS 0x2
+#define       FEC_TYPE_TYPE_LBN 0
+#define       FEC_TYPE_TYPE_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_GET_LINK
+ * Read the unified MAC/PHY link state. Locks required: None Return code: 0,
+ * ETIME.
+ */
+#define MC_CMD_GET_LINK 0x29
+#undef MC_CMD_0x29_PRIVILEGE_CTG
+
+#define MC_CMD_0x29_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_LINK_IN msgrequest */
+#define    MC_CMD_GET_LINK_IN_LEN 0
+
+/* MC_CMD_GET_LINK_OUT msgresponse */
+#define    MC_CMD_GET_LINK_OUT_LEN 28
+/* Near-side advertised capabilities. Refer to
+ * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions.
+ */
+#define       MC_CMD_GET_LINK_OUT_CAP_OFST 0
+#define       MC_CMD_GET_LINK_OUT_CAP_LEN 4
+/* Link-partner advertised capabilities. Refer to
+ * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions.
+ */
+#define       MC_CMD_GET_LINK_OUT_LP_CAP_OFST 4
+#define       MC_CMD_GET_LINK_OUT_LP_CAP_LEN 4
+/* Autonegotiated speed in mbit/s. The link may still be down even if this
+ * reads non-zero.
+ */
+#define       MC_CMD_GET_LINK_OUT_LINK_SPEED_OFST 8
+#define       MC_CMD_GET_LINK_OUT_LINK_SPEED_LEN 4
+/* Current loopback setting. */
+#define       MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_OFST 12
+#define       MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_LEN 4
+/*            Enum values, see field(s): */
+/*               MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
+#define       MC_CMD_GET_LINK_OUT_FLAGS_OFST 16
+#define       MC_CMD_GET_LINK_OUT_FLAGS_LEN 4
+#define        MC_CMD_GET_LINK_OUT_LINK_UP_OFST 16
+#define        MC_CMD_GET_LINK_OUT_LINK_UP_LBN 0
+#define        MC_CMD_GET_LINK_OUT_LINK_UP_WIDTH 1
+#define        MC_CMD_GET_LINK_OUT_FULL_DUPLEX_OFST 16
+#define        MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN 1
+#define        MC_CMD_GET_LINK_OUT_FULL_DUPLEX_WIDTH 1
+#define        MC_CMD_GET_LINK_OUT_BPX_LINK_OFST 16
+#define        MC_CMD_GET_LINK_OUT_BPX_LINK_LBN 2
+#define        MC_CMD_GET_LINK_OUT_BPX_LINK_WIDTH 1
+#define        MC_CMD_GET_LINK_OUT_PHY_LINK_OFST 16
+#define        MC_CMD_GET_LINK_OUT_PHY_LINK_LBN 3
+#define        MC_CMD_GET_LINK_OUT_PHY_LINK_WIDTH 1
+#define        MC_CMD_GET_LINK_OUT_LINK_FAULT_RX_OFST 16
+#define        MC_CMD_GET_LINK_OUT_LINK_FAULT_RX_LBN 6
+#define        MC_CMD_GET_LINK_OUT_LINK_FAULT_RX_WIDTH 1
+#define        MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_OFST 16
+#define        MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_LBN 7
+#define        MC_CMD_GET_LINK_OUT_LINK_FAULT_TX_WIDTH 1
+#define        MC_CMD_GET_LINK_OUT_MODULE_UP_VALID_OFST 16
+#define        MC_CMD_GET_LINK_OUT_MODULE_UP_VALID_LBN 8
+#define        MC_CMD_GET_LINK_OUT_MODULE_UP_VALID_WIDTH 1
+#define        MC_CMD_GET_LINK_OUT_MODULE_UP_OFST 16
+#define        MC_CMD_GET_LINK_OUT_MODULE_UP_LBN 9
+#define        MC_CMD_GET_LINK_OUT_MODULE_UP_WIDTH 1
+/* This returns the negotiated flow control value. */
+#define       MC_CMD_GET_LINK_OUT_FCNTL_OFST 20
+#define       MC_CMD_GET_LINK_OUT_FCNTL_LEN 4
+/*            Enum values, see field(s): */
+/*               MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */
+#define       MC_CMD_GET_LINK_OUT_MAC_FAULT_OFST 24
+#define       MC_CMD_GET_LINK_OUT_MAC_FAULT_LEN 4
+#define        MC_CMD_MAC_FAULT_XGMII_LOCAL_OFST 24
+#define        MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0
+#define        MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1
+#define        MC_CMD_MAC_FAULT_XGMII_REMOTE_OFST 24
+#define        MC_CMD_MAC_FAULT_XGMII_REMOTE_LBN 1
+#define        MC_CMD_MAC_FAULT_XGMII_REMOTE_WIDTH 1
+#define        MC_CMD_MAC_FAULT_SGMII_REMOTE_OFST 24
+#define        MC_CMD_MAC_FAULT_SGMII_REMOTE_LBN 2
+#define        MC_CMD_MAC_FAULT_SGMII_REMOTE_WIDTH 1
+#define        MC_CMD_MAC_FAULT_PENDING_RECONFIG_OFST 24
+#define        MC_CMD_MAC_FAULT_PENDING_RECONFIG_LBN 3
+#define        MC_CMD_MAC_FAULT_PENDING_RECONFIG_WIDTH 1
+
+/* MC_CMD_GET_LINK_OUT_V2 msgresponse: Extended link state information */
+#define    MC_CMD_GET_LINK_OUT_V2_LEN 44
+/* Near-side advertised capabilities. Refer to
+ * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions.
+ */
+#define       MC_CMD_GET_LINK_OUT_V2_CAP_OFST 0
+#define       MC_CMD_GET_LINK_OUT_V2_CAP_LEN 4
+/* Link-partner advertised capabilities. Refer to
+ * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions.
+ */
+#define       MC_CMD_GET_LINK_OUT_V2_LP_CAP_OFST 4
+#define       MC_CMD_GET_LINK_OUT_V2_LP_CAP_LEN 4
+/* Autonegotiated speed in mbit/s. The link may still be down even if this
+ * reads non-zero.
+ */
+#define       MC_CMD_GET_LINK_OUT_V2_LINK_SPEED_OFST 8
+#define       MC_CMD_GET_LINK_OUT_V2_LINK_SPEED_LEN 4
+/* Current loopback setting. */
+#define       MC_CMD_GET_LINK_OUT_V2_LOOPBACK_MODE_OFST 12
+#define       MC_CMD_GET_LINK_OUT_V2_LOOPBACK_MODE_LEN 4
+/*            Enum values, see field(s): */
+/*               MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
+#define       MC_CMD_GET_LINK_OUT_V2_FLAGS_OFST 16
+#define       MC_CMD_GET_LINK_OUT_V2_FLAGS_LEN 4
+#define        MC_CMD_GET_LINK_OUT_V2_LINK_UP_OFST 16
+#define        MC_CMD_GET_LINK_OUT_V2_LINK_UP_LBN 0
+#define        MC_CMD_GET_LINK_OUT_V2_LINK_UP_WIDTH 1
+#define        MC_CMD_GET_LINK_OUT_V2_FULL_DUPLEX_OFST 16
+#define        MC_CMD_GET_LINK_OUT_V2_FULL_DUPLEX_LBN 1
+#define        MC_CMD_GET_LINK_OUT_V2_FULL_DUPLEX_WIDTH 1
+#define        MC_CMD_GET_LINK_OUT_V2_BPX_LINK_OFST 16
+#define        MC_CMD_GET_LINK_OUT_V2_BPX_LINK_LBN 2
+#define        MC_CMD_GET_LINK_OUT_V2_BPX_LINK_WIDTH 1
+#define        MC_CMD_GET_LINK_OUT_V2_PHY_LINK_OFST 16
+#define        MC_CMD_GET_LINK_OUT_V2_PHY_LINK_LBN 3
+#define        MC_CMD_GET_LINK_OUT_V2_PHY_LINK_WIDTH 1
+#define        MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_RX_OFST 16
+#define        MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_RX_LBN 6
+#define        MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_RX_WIDTH 1
+#define        MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_TX_OFST 16
+#define        MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_TX_LBN 7
+#define        MC_CMD_GET_LINK_OUT_V2_LINK_FAULT_TX_WIDTH 1
+#define        MC_CMD_GET_LINK_OUT_V2_MODULE_UP_VALID_OFST 16
+#define        MC_CMD_GET_LINK_OUT_V2_MODULE_UP_VALID_LBN 8
+#define        MC_CMD_GET_LINK_OUT_V2_MODULE_UP_VALID_WIDTH 1
+#define        MC_CMD_GET_LINK_OUT_V2_MODULE_UP_OFST 16
+#define        MC_CMD_GET_LINK_OUT_V2_MODULE_UP_LBN 9
+#define        MC_CMD_GET_LINK_OUT_V2_MODULE_UP_WIDTH 1
+/* This returns the negotiated flow control value. */
+#define       MC_CMD_GET_LINK_OUT_V2_FCNTL_OFST 20
+#define       MC_CMD_GET_LINK_OUT_V2_FCNTL_LEN 4
+/*            Enum values, see field(s): */
+/*               MC_CMD_SET_MAC/MC_CMD_SET_MAC_IN/FCNTL */
+#define       MC_CMD_GET_LINK_OUT_V2_MAC_FAULT_OFST 24
+#define       MC_CMD_GET_LINK_OUT_V2_MAC_FAULT_LEN 4
+/*             MC_CMD_MAC_FAULT_XGMII_LOCAL_OFST 24 */
+/*             MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0 */
+/*             MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1 */
+/*             MC_CMD_MAC_FAULT_XGMII_REMOTE_OFST 24 */
+/*             MC_CMD_MAC_FAULT_XGMII_REMOTE_LBN 1 */
+/*             MC_CMD_MAC_FAULT_XGMII_REMOTE_WIDTH 1 */
+/*             MC_CMD_MAC_FAULT_SGMII_REMOTE_OFST 24 */
+/*             MC_CMD_MAC_FAULT_SGMII_REMOTE_LBN 2 */
+/*             MC_CMD_MAC_FAULT_SGMII_REMOTE_WIDTH 1 */
+/*             MC_CMD_MAC_FAULT_PENDING_RECONFIG_OFST 24 */
+/*             MC_CMD_MAC_FAULT_PENDING_RECONFIG_LBN 3 */
+/*             MC_CMD_MAC_FAULT_PENDING_RECONFIG_WIDTH 1 */
+/* True local device capabilities (taking into account currently used PMD/MDI,
+ * e.g. plugged-in module). In general, subset of
+ * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP, but may include extra _FEC_REQUEST
+ * bits, if the PMD requires FEC. 0 if unknown (e.g. module unplugged). Equal
+ * to SUPPORTED_CAP for non-pluggable PMDs. Refer to
+ * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions.
+ */
+#define       MC_CMD_GET_LINK_OUT_V2_LD_CAP_OFST 28
+#define       MC_CMD_GET_LINK_OUT_V2_LD_CAP_LEN 4
+/* Auto-negotiation type used on the link */
+#define       MC_CMD_GET_LINK_OUT_V2_AN_TYPE_OFST 32
+#define       MC_CMD_GET_LINK_OUT_V2_AN_TYPE_LEN 4
+/*            Enum values, see field(s): */
+/*               AN_TYPE/TYPE */
+/* Forward error correction used on the link */
+#define       MC_CMD_GET_LINK_OUT_V2_FEC_TYPE_OFST 36
+#define       MC_CMD_GET_LINK_OUT_V2_FEC_TYPE_LEN 4
+/*            Enum values, see field(s): */
+/*               FEC_TYPE/TYPE */
+#define       MC_CMD_GET_LINK_OUT_V2_EXT_FLAGS_OFST 40
+#define       MC_CMD_GET_LINK_OUT_V2_EXT_FLAGS_LEN 4
+#define        MC_CMD_GET_LINK_OUT_V2_PMD_MDI_CONNECTED_OFST 40
+#define        MC_CMD_GET_LINK_OUT_V2_PMD_MDI_CONNECTED_LBN 0
+#define        MC_CMD_GET_LINK_OUT_V2_PMD_MDI_CONNECTED_WIDTH 1
+#define        MC_CMD_GET_LINK_OUT_V2_PMD_READY_OFST 40
+#define        MC_CMD_GET_LINK_OUT_V2_PMD_READY_LBN 1
+#define        MC_CMD_GET_LINK_OUT_V2_PMD_READY_WIDTH 1
+#define        MC_CMD_GET_LINK_OUT_V2_PMD_LINK_UP_OFST 40
+#define        MC_CMD_GET_LINK_OUT_V2_PMD_LINK_UP_LBN 2
+#define        MC_CMD_GET_LINK_OUT_V2_PMD_LINK_UP_WIDTH 1
+#define        MC_CMD_GET_LINK_OUT_V2_PMA_LINK_UP_OFST 40
+#define        MC_CMD_GET_LINK_OUT_V2_PMA_LINK_UP_LBN 3
+#define        MC_CMD_GET_LINK_OUT_V2_PMA_LINK_UP_WIDTH 1
+#define        MC_CMD_GET_LINK_OUT_V2_PCS_LOCK_OFST 40
+#define        MC_CMD_GET_LINK_OUT_V2_PCS_LOCK_LBN 4
+#define        MC_CMD_GET_LINK_OUT_V2_PCS_LOCK_WIDTH 1
+#define        MC_CMD_GET_LINK_OUT_V2_ALIGN_LOCK_OFST 40
+#define        MC_CMD_GET_LINK_OUT_V2_ALIGN_LOCK_LBN 5
+#define        MC_CMD_GET_LINK_OUT_V2_ALIGN_LOCK_WIDTH 1
+#define        MC_CMD_GET_LINK_OUT_V2_HI_BER_OFST 40
+#define        MC_CMD_GET_LINK_OUT_V2_HI_BER_LBN 6
+#define        MC_CMD_GET_LINK_OUT_V2_HI_BER_WIDTH 1
+#define        MC_CMD_GET_LINK_OUT_V2_FEC_LOCK_OFST 40
+#define        MC_CMD_GET_LINK_OUT_V2_FEC_LOCK_LBN 7
+#define        MC_CMD_GET_LINK_OUT_V2_FEC_LOCK_WIDTH 1
+#define        MC_CMD_GET_LINK_OUT_V2_AN_DONE_OFST 40
+#define        MC_CMD_GET_LINK_OUT_V2_AN_DONE_LBN 8
+#define        MC_CMD_GET_LINK_OUT_V2_AN_DONE_WIDTH 1
+#define        MC_CMD_GET_LINK_OUT_V2_PORT_SHUTDOWN_OFST 40
+#define        MC_CMD_GET_LINK_OUT_V2_PORT_SHUTDOWN_LBN 9
+#define        MC_CMD_GET_LINK_OUT_V2_PORT_SHUTDOWN_WIDTH 1
+
+
+/***********************************/
+/* MC_CMD_SET_LINK
+ * Write the unified MAC/PHY link configuration. Locks required: None. Return
+ * code: 0, EINVAL, ETIME, EAGAIN
+ */
+#define MC_CMD_SET_LINK 0x2a
+#undef MC_CMD_0x2a_PRIVILEGE_CTG
+
+#define MC_CMD_0x2a_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_SET_LINK_IN msgrequest */
+#define    MC_CMD_SET_LINK_IN_LEN 16
+/* Near-side advertised capabilities. Refer to
+ * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions.
+ */
+#define       MC_CMD_SET_LINK_IN_CAP_OFST 0
+#define       MC_CMD_SET_LINK_IN_CAP_LEN 4
+/* Flags */
+#define       MC_CMD_SET_LINK_IN_FLAGS_OFST 4
+#define       MC_CMD_SET_LINK_IN_FLAGS_LEN 4
+#define        MC_CMD_SET_LINK_IN_LOWPOWER_OFST 4
+#define        MC_CMD_SET_LINK_IN_LOWPOWER_LBN 0
+#define        MC_CMD_SET_LINK_IN_LOWPOWER_WIDTH 1
+#define        MC_CMD_SET_LINK_IN_POWEROFF_OFST 4
+#define        MC_CMD_SET_LINK_IN_POWEROFF_LBN 1
+#define        MC_CMD_SET_LINK_IN_POWEROFF_WIDTH 1
+#define        MC_CMD_SET_LINK_IN_TXDIS_OFST 4
+#define        MC_CMD_SET_LINK_IN_TXDIS_LBN 2
+#define        MC_CMD_SET_LINK_IN_TXDIS_WIDTH 1
+#define        MC_CMD_SET_LINK_IN_LINKDOWN_OFST 4
+#define        MC_CMD_SET_LINK_IN_LINKDOWN_LBN 3
+#define        MC_CMD_SET_LINK_IN_LINKDOWN_WIDTH 1
+/* Loopback mode. */
+#define       MC_CMD_SET_LINK_IN_LOOPBACK_MODE_OFST 8
+#define       MC_CMD_SET_LINK_IN_LOOPBACK_MODE_LEN 4
+/*            Enum values, see field(s): */
+/*               MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
+/* A loopback speed of "0" is supported, and means (choose any available
+ * speed).
+ */
+#define       MC_CMD_SET_LINK_IN_LOOPBACK_SPEED_OFST 12
+#define       MC_CMD_SET_LINK_IN_LOOPBACK_SPEED_LEN 4
+
+/* MC_CMD_SET_LINK_IN_V2 msgrequest: Updated SET_LINK to include sequence
+ * number to ensure this SET_LINK command corresponds to the latest
+ * MODULECHANGE event.
+ */
+#define    MC_CMD_SET_LINK_IN_V2_LEN 17
+/* Near-side advertised capabilities. Refer to
+ * MC_CMD_GET_PHY_CFG_OUT/SUPPORTED_CAP for bit definitions.
+ */
+#define       MC_CMD_SET_LINK_IN_V2_CAP_OFST 0
+#define       MC_CMD_SET_LINK_IN_V2_CAP_LEN 4
+/* Flags */
+#define       MC_CMD_SET_LINK_IN_V2_FLAGS_OFST 4
+#define       MC_CMD_SET_LINK_IN_V2_FLAGS_LEN 4
+#define        MC_CMD_SET_LINK_IN_V2_LOWPOWER_OFST 4
+#define        MC_CMD_SET_LINK_IN_V2_LOWPOWER_LBN 0
+#define        MC_CMD_SET_LINK_IN_V2_LOWPOWER_WIDTH 1
+#define        MC_CMD_SET_LINK_IN_V2_POWEROFF_OFST 4
+#define        MC_CMD_SET_LINK_IN_V2_POWEROFF_LBN 1
+#define        MC_CMD_SET_LINK_IN_V2_POWEROFF_WIDTH 1
+#define        MC_CMD_SET_LINK_IN_V2_TXDIS_OFST 4
+#define        MC_CMD_SET_LINK_IN_V2_TXDIS_LBN 2
+#define        MC_CMD_SET_LINK_IN_V2_TXDIS_WIDTH 1
+#define        MC_CMD_SET_LINK_IN_V2_LINKDOWN_OFST 4
+#define        MC_CMD_SET_LINK_IN_V2_LINKDOWN_LBN 3
+#define        MC_CMD_SET_LINK_IN_V2_LINKDOWN_WIDTH 1
+/* Loopback mode. */
+#define       MC_CMD_SET_LINK_IN_V2_LOOPBACK_MODE_OFST 8
+#define       MC_CMD_SET_LINK_IN_V2_LOOPBACK_MODE_LEN 4
+/*            Enum values, see field(s): */
+/*               MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
+/* A loopback speed of "0" is supported, and means (choose any available
+ * speed).
+ */
+#define       MC_CMD_SET_LINK_IN_V2_LOOPBACK_SPEED_OFST 12
+#define       MC_CMD_SET_LINK_IN_V2_LOOPBACK_SPEED_LEN 4
+#define       MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_OFST 16
+#define       MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_LEN 1
+#define        MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_NUMBER_OFST 16
+#define        MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_NUMBER_LBN 0
+#define        MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_NUMBER_WIDTH 7
+#define        MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_IGNORE_OFST 16
+#define        MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_IGNORE_LBN 7
+#define        MC_CMD_SET_LINK_IN_V2_MODULE_SEQ_IGNORE_WIDTH 1
+
+/* MC_CMD_SET_LINK_OUT msgresponse */
+#define    MC_CMD_SET_LINK_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SET_ID_LED
+ * Set identification LED state. Locks required: None. Return code: 0, EINVAL
+ */
+#define MC_CMD_SET_ID_LED 0x2b
+#undef MC_CMD_0x2b_PRIVILEGE_CTG
+
+#define MC_CMD_0x2b_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_SET_ID_LED_IN msgrequest */
+#define    MC_CMD_SET_ID_LED_IN_LEN 4
+/* Set LED state. */
+#define       MC_CMD_SET_ID_LED_IN_STATE_OFST 0
+#define       MC_CMD_SET_ID_LED_IN_STATE_LEN 4
+#define          MC_CMD_LED_OFF 0x0 /* enum */
+#define          MC_CMD_LED_ON 0x1 /* enum */
+#define          MC_CMD_LED_DEFAULT 0x2 /* enum */
+
+/* MC_CMD_SET_ID_LED_OUT msgresponse */
+#define    MC_CMD_SET_ID_LED_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SET_MAC
+ * Set MAC configuration. Locks required: None. Return code: 0, EINVAL
+ */
+#define MC_CMD_SET_MAC 0x2c
+#undef MC_CMD_0x2c_PRIVILEGE_CTG
+
+#define MC_CMD_0x2c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_SET_MAC_IN msgrequest */
+#define    MC_CMD_SET_MAC_IN_LEN 28
+/* The MTU is the MTU programmed directly into the XMAC/GMAC (inclusive of
+ * EtherII, VLAN, bug16011 padding).
+ */
+#define       MC_CMD_SET_MAC_IN_MTU_OFST 0
+#define       MC_CMD_SET_MAC_IN_MTU_LEN 4
+#define       MC_CMD_SET_MAC_IN_DRAIN_OFST 4
+#define       MC_CMD_SET_MAC_IN_DRAIN_LEN 4
+#define       MC_CMD_SET_MAC_IN_ADDR_OFST 8
+#define       MC_CMD_SET_MAC_IN_ADDR_LEN 8
+#define       MC_CMD_SET_MAC_IN_ADDR_LO_OFST 8
+#define       MC_CMD_SET_MAC_IN_ADDR_HI_OFST 12
+#define       MC_CMD_SET_MAC_IN_REJECT_OFST 16
+#define       MC_CMD_SET_MAC_IN_REJECT_LEN 4
+#define        MC_CMD_SET_MAC_IN_REJECT_UNCST_OFST 16
+#define        MC_CMD_SET_MAC_IN_REJECT_UNCST_LBN 0
+#define        MC_CMD_SET_MAC_IN_REJECT_UNCST_WIDTH 1
+#define        MC_CMD_SET_MAC_IN_REJECT_BRDCST_OFST 16
+#define        MC_CMD_SET_MAC_IN_REJECT_BRDCST_LBN 1
+#define        MC_CMD_SET_MAC_IN_REJECT_BRDCST_WIDTH 1
+#define       MC_CMD_SET_MAC_IN_FCNTL_OFST 20
+#define       MC_CMD_SET_MAC_IN_FCNTL_LEN 4
+/* enum: Flow control is off. */
+#define          MC_CMD_FCNTL_OFF 0x0
+/* enum: Respond to flow control. */
+#define          MC_CMD_FCNTL_RESPOND 0x1
+/* enum: Respond to and Issue flow control. */
+#define          MC_CMD_FCNTL_BIDIR 0x2
+/* enum: Auto neg flow control. */
+#define          MC_CMD_FCNTL_AUTO 0x3
+/* enum: Priority flow control (eftest builds only). */
+#define          MC_CMD_FCNTL_QBB 0x4
+/* enum: Issue flow control. */
+#define          MC_CMD_FCNTL_GENERATE 0x5
+#define       MC_CMD_SET_MAC_IN_FLAGS_OFST 24
+#define       MC_CMD_SET_MAC_IN_FLAGS_LEN 4
+#define        MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_OFST 24
+#define        MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_LBN 0
+#define        MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_WIDTH 1
+
+/* MC_CMD_SET_MAC_EXT_IN msgrequest */
+#define    MC_CMD_SET_MAC_EXT_IN_LEN 32
+/* The MTU is the MTU programmed directly into the XMAC/GMAC (inclusive of
+ * EtherII, VLAN, bug16011 padding).
+ */
+#define       MC_CMD_SET_MAC_EXT_IN_MTU_OFST 0
+#define       MC_CMD_SET_MAC_EXT_IN_MTU_LEN 4
+#define       MC_CMD_SET_MAC_EXT_IN_DRAIN_OFST 4
+#define       MC_CMD_SET_MAC_EXT_IN_DRAIN_LEN 4
+#define       MC_CMD_SET_MAC_EXT_IN_ADDR_OFST 8
+#define       MC_CMD_SET_MAC_EXT_IN_ADDR_LEN 8
+#define       MC_CMD_SET_MAC_EXT_IN_ADDR_LO_OFST 8
+#define       MC_CMD_SET_MAC_EXT_IN_ADDR_HI_OFST 12
+#define       MC_CMD_SET_MAC_EXT_IN_REJECT_OFST 16
+#define       MC_CMD_SET_MAC_EXT_IN_REJECT_LEN 4
+#define        MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_OFST 16
+#define        MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_LBN 0
+#define        MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_WIDTH 1
+#define        MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_OFST 16
+#define        MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_LBN 1
+#define        MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_WIDTH 1
+#define       MC_CMD_SET_MAC_EXT_IN_FCNTL_OFST 20
+#define       MC_CMD_SET_MAC_EXT_IN_FCNTL_LEN 4
+/* enum: Flow control is off. */
+/*               MC_CMD_FCNTL_OFF 0x0 */
+/* enum: Respond to flow control. */
+/*               MC_CMD_FCNTL_RESPOND 0x1 */
+/* enum: Respond to and Issue flow control. */
+/*               MC_CMD_FCNTL_BIDIR 0x2 */
+/* enum: Auto neg flow control. */
+/*               MC_CMD_FCNTL_AUTO 0x3 */
+/* enum: Priority flow control (eftest builds only). */
+/*               MC_CMD_FCNTL_QBB 0x4 */
+/* enum: Issue flow control. */
+/*               MC_CMD_FCNTL_GENERATE 0x5 */
+#define       MC_CMD_SET_MAC_EXT_IN_FLAGS_OFST 24
+#define       MC_CMD_SET_MAC_EXT_IN_FLAGS_LEN 4
+#define        MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_OFST 24
+#define        MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_LBN 0
+#define        MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_WIDTH 1
+/* Select which parameters to configure. A parameter will only be modified if
+ * the corresponding control flag is set. If SET_MAC_ENHANCED is not set in
+ * capabilities then this field is ignored (and all flags are assumed to be
+ * set).
+ */
+#define       MC_CMD_SET_MAC_EXT_IN_CONTROL_OFST 28
+#define       MC_CMD_SET_MAC_EXT_IN_CONTROL_LEN 4
+#define        MC_CMD_SET_MAC_EXT_IN_CFG_MTU_OFST 28
+#define        MC_CMD_SET_MAC_EXT_IN_CFG_MTU_LBN 0
+#define        MC_CMD_SET_MAC_EXT_IN_CFG_MTU_WIDTH 1
+#define        MC_CMD_SET_MAC_EXT_IN_CFG_DRAIN_OFST 28
+#define        MC_CMD_SET_MAC_EXT_IN_CFG_DRAIN_LBN 1
+#define        MC_CMD_SET_MAC_EXT_IN_CFG_DRAIN_WIDTH 1
+#define        MC_CMD_SET_MAC_EXT_IN_CFG_REJECT_OFST 28
+#define        MC_CMD_SET_MAC_EXT_IN_CFG_REJECT_LBN 2
+#define        MC_CMD_SET_MAC_EXT_IN_CFG_REJECT_WIDTH 1
+#define        MC_CMD_SET_MAC_EXT_IN_CFG_FCNTL_OFST 28
+#define        MC_CMD_SET_MAC_EXT_IN_CFG_FCNTL_LBN 3
+#define        MC_CMD_SET_MAC_EXT_IN_CFG_FCNTL_WIDTH 1
+#define        MC_CMD_SET_MAC_EXT_IN_CFG_FCS_OFST 28
+#define        MC_CMD_SET_MAC_EXT_IN_CFG_FCS_LBN 4
+#define        MC_CMD_SET_MAC_EXT_IN_CFG_FCS_WIDTH 1
+
+/* MC_CMD_SET_MAC_OUT msgresponse */
+#define    MC_CMD_SET_MAC_OUT_LEN 0
+
+/* MC_CMD_SET_MAC_V2_OUT msgresponse */
+#define    MC_CMD_SET_MAC_V2_OUT_LEN 4
+/* MTU as configured after processing the request. See comment at
+ * MC_CMD_SET_MAC_IN/MTU. To query MTU without doing any changes, set CONTROL
+ * to 0.
+ */
+#define       MC_CMD_SET_MAC_V2_OUT_MTU_OFST 0
+#define       MC_CMD_SET_MAC_V2_OUT_MTU_LEN 4
+
+
+/***********************************/
+/* MC_CMD_PHY_STATS
+ * Get generic PHY statistics. This call returns the statistics for a generic
+ * PHY in a sparse array (indexed by the enumerate). Each value is represented
+ * by a 32bit number. If the DMA_ADDR is 0, then no DMA is performed, and the
+ * statistics may be read from the message response. If DMA_ADDR != 0, then the
+ * statistics are dmad to that (page-aligned location). Locks required: None.
+ * Returns: 0, ETIME
+ */
+#define MC_CMD_PHY_STATS 0x2d
+#undef MC_CMD_0x2d_PRIVILEGE_CTG
+
+#define MC_CMD_0x2d_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_PHY_STATS_IN msgrequest */
+#define    MC_CMD_PHY_STATS_IN_LEN 8
+/* ??? */
+#define       MC_CMD_PHY_STATS_IN_DMA_ADDR_OFST 0
+#define       MC_CMD_PHY_STATS_IN_DMA_ADDR_LEN 8
+#define       MC_CMD_PHY_STATS_IN_DMA_ADDR_LO_OFST 0
+#define       MC_CMD_PHY_STATS_IN_DMA_ADDR_HI_OFST 4
+
+/* MC_CMD_PHY_STATS_OUT_DMA msgresponse */
+#define    MC_CMD_PHY_STATS_OUT_DMA_LEN 0
+
+/* MC_CMD_PHY_STATS_OUT_NO_DMA msgresponse */
+#define    MC_CMD_PHY_STATS_OUT_NO_DMA_LEN (((MC_CMD_PHY_NSTATS*32))>>3)
+#define       MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_OFST 0
+#define       MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_LEN 4
+#define       MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_PHY_NSTATS
+/* enum: OUI. */
+#define          MC_CMD_OUI 0x0
+/* enum: PMA-PMD Link Up. */
+#define          MC_CMD_PMA_PMD_LINK_UP 0x1
+/* enum: PMA-PMD RX Fault. */
+#define          MC_CMD_PMA_PMD_RX_FAULT 0x2
+/* enum: PMA-PMD TX Fault. */
+#define          MC_CMD_PMA_PMD_TX_FAULT 0x3
+/* enum: PMA-PMD Signal */
+#define          MC_CMD_PMA_PMD_SIGNAL 0x4
+/* enum: PMA-PMD SNR A. */
+#define          MC_CMD_PMA_PMD_SNR_A 0x5
+/* enum: PMA-PMD SNR B. */
+#define          MC_CMD_PMA_PMD_SNR_B 0x6
+/* enum: PMA-PMD SNR C. */
+#define          MC_CMD_PMA_PMD_SNR_C 0x7
+/* enum: PMA-PMD SNR D. */
+#define          MC_CMD_PMA_PMD_SNR_D 0x8
+/* enum: PCS Link Up. */
+#define          MC_CMD_PCS_LINK_UP 0x9
+/* enum: PCS RX Fault. */
+#define          MC_CMD_PCS_RX_FAULT 0xa
+/* enum: PCS TX Fault. */
+#define          MC_CMD_PCS_TX_FAULT 0xb
+/* enum: PCS BER. */
+#define          MC_CMD_PCS_BER 0xc
+/* enum: PCS Block Errors. */
+#define          MC_CMD_PCS_BLOCK_ERRORS 0xd
+/* enum: PhyXS Link Up. */
+#define          MC_CMD_PHYXS_LINK_UP 0xe
+/* enum: PhyXS RX Fault. */
+#define          MC_CMD_PHYXS_RX_FAULT 0xf
+/* enum: PhyXS TX Fault. */
+#define          MC_CMD_PHYXS_TX_FAULT 0x10
+/* enum: PhyXS Align. */
+#define          MC_CMD_PHYXS_ALIGN 0x11
+/* enum: PhyXS Sync. */
+#define          MC_CMD_PHYXS_SYNC 0x12
+/* enum: AN link-up. */
+#define          MC_CMD_AN_LINK_UP 0x13
+/* enum: AN Complete. */
+#define          MC_CMD_AN_COMPLETE 0x14
+/* enum: AN 10GBaseT Status. */
+#define          MC_CMD_AN_10GBT_STATUS 0x15
+/* enum: Clause 22 Link-Up. */
+#define          MC_CMD_CL22_LINK_UP 0x16
+/* enum: (Last entry) */
+#define          MC_CMD_PHY_NSTATS 0x17
+
+
+/***********************************/
+/* MC_CMD_MAC_STATS
+ * Get generic MAC statistics. This call returns unified statistics maintained
+ * by the MC as it switches between the GMAC and XMAC. The MC will write out
+ * all supported stats. The driver should zero initialise the buffer to
+ * guarantee consistent results. If the DMA_ADDR is 0, then no DMA is
+ * performed, and the statistics may be read from the message response. If
+ * DMA_ADDR != 0, then the statistics are dmad to that (page-aligned location).
+ * Locks required: None. The PERIODIC_CLEAR option is not used and now has no
+ * effect. Returns: 0, ETIME
+ */
+#define MC_CMD_MAC_STATS 0x2e
+#undef MC_CMD_0x2e_PRIVILEGE_CTG
+
+#define MC_CMD_0x2e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_MAC_STATS_IN msgrequest */
+#define    MC_CMD_MAC_STATS_IN_LEN 20
+/* ??? */
+#define       MC_CMD_MAC_STATS_IN_DMA_ADDR_OFST 0
+#define       MC_CMD_MAC_STATS_IN_DMA_ADDR_LEN 8
+#define       MC_CMD_MAC_STATS_IN_DMA_ADDR_LO_OFST 0
+#define       MC_CMD_MAC_STATS_IN_DMA_ADDR_HI_OFST 4
+#define       MC_CMD_MAC_STATS_IN_CMD_OFST 8
+#define       MC_CMD_MAC_STATS_IN_CMD_LEN 4
+#define        MC_CMD_MAC_STATS_IN_DMA_OFST 8
+#define        MC_CMD_MAC_STATS_IN_DMA_LBN 0
+#define        MC_CMD_MAC_STATS_IN_DMA_WIDTH 1
+#define        MC_CMD_MAC_STATS_IN_CLEAR_OFST 8
+#define        MC_CMD_MAC_STATS_IN_CLEAR_LBN 1
+#define        MC_CMD_MAC_STATS_IN_CLEAR_WIDTH 1
+#define        MC_CMD_MAC_STATS_IN_PERIODIC_CHANGE_OFST 8
+#define        MC_CMD_MAC_STATS_IN_PERIODIC_CHANGE_LBN 2
+#define        MC_CMD_MAC_STATS_IN_PERIODIC_CHANGE_WIDTH 1
+#define        MC_CMD_MAC_STATS_IN_PERIODIC_ENABLE_OFST 8
+#define        MC_CMD_MAC_STATS_IN_PERIODIC_ENABLE_LBN 3
+#define        MC_CMD_MAC_STATS_IN_PERIODIC_ENABLE_WIDTH 1
+#define        MC_CMD_MAC_STATS_IN_PERIODIC_CLEAR_OFST 8
+#define        MC_CMD_MAC_STATS_IN_PERIODIC_CLEAR_LBN 4
+#define        MC_CMD_MAC_STATS_IN_PERIODIC_CLEAR_WIDTH 1
+#define        MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT_OFST 8
+#define        MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT_LBN 5
+#define        MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT_WIDTH 1
+#define        MC_CMD_MAC_STATS_IN_PERIOD_MS_OFST 8
+#define        MC_CMD_MAC_STATS_IN_PERIOD_MS_LBN 16
+#define        MC_CMD_MAC_STATS_IN_PERIOD_MS_WIDTH 16
+/* DMA length. Should be set to MAC_STATS_NUM_STATS * sizeof(uint64_t), as
+ * returned by MC_CMD_GET_CAPABILITIES_V4_OUT. For legacy firmware not
+ * supporting MC_CMD_GET_CAPABILITIES_V4_OUT, DMA_LEN should be set to
+ * MC_CMD_MAC_NSTATS * sizeof(uint64_t)
+ */
+#define       MC_CMD_MAC_STATS_IN_DMA_LEN_OFST 12
+#define       MC_CMD_MAC_STATS_IN_DMA_LEN_LEN 4
+/* port id so vadapter stats can be provided */
+#define       MC_CMD_MAC_STATS_IN_PORT_ID_OFST 16
+#define       MC_CMD_MAC_STATS_IN_PORT_ID_LEN 4
+
+/* MC_CMD_MAC_STATS_OUT_DMA msgresponse */
+#define    MC_CMD_MAC_STATS_OUT_DMA_LEN 0
+
+/* MC_CMD_MAC_STATS_OUT_NO_DMA msgresponse */
+#define    MC_CMD_MAC_STATS_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS*64))>>3)
+#define       MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_OFST 0
+#define       MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_LEN 8
+#define       MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_LO_OFST 0
+#define       MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_HI_OFST 4
+#define       MC_CMD_MAC_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS
+#define          MC_CMD_MAC_GENERATION_START 0x0 /* enum */
+#define          MC_CMD_MAC_DMABUF_START 0x1 /* enum */
+#define          MC_CMD_MAC_TX_PKTS 0x1 /* enum */
+#define          MC_CMD_MAC_TX_PAUSE_PKTS 0x2 /* enum */
+#define          MC_CMD_MAC_TX_CONTROL_PKTS 0x3 /* enum */
+#define          MC_CMD_MAC_TX_UNICAST_PKTS 0x4 /* enum */
+#define          MC_CMD_MAC_TX_MULTICAST_PKTS 0x5 /* enum */
+#define          MC_CMD_MAC_TX_BROADCAST_PKTS 0x6 /* enum */
+#define          MC_CMD_MAC_TX_BYTES 0x7 /* enum */
+#define          MC_CMD_MAC_TX_BAD_BYTES 0x8 /* enum */
+#define          MC_CMD_MAC_TX_LT64_PKTS 0x9 /* enum */
+#define          MC_CMD_MAC_TX_64_PKTS 0xa /* enum */
+#define          MC_CMD_MAC_TX_65_TO_127_PKTS 0xb /* enum */
+#define          MC_CMD_MAC_TX_128_TO_255_PKTS 0xc /* enum */
+#define          MC_CMD_MAC_TX_256_TO_511_PKTS 0xd /* enum */
+#define          MC_CMD_MAC_TX_512_TO_1023_PKTS 0xe /* enum */
+#define          MC_CMD_MAC_TX_1024_TO_15XX_PKTS 0xf /* enum */
+#define          MC_CMD_MAC_TX_15XX_TO_JUMBO_PKTS 0x10 /* enum */
+#define          MC_CMD_MAC_TX_GTJUMBO_PKTS 0x11 /* enum */
+#define          MC_CMD_MAC_TX_BAD_FCS_PKTS 0x12 /* enum */
+#define          MC_CMD_MAC_TX_SINGLE_COLLISION_PKTS 0x13 /* enum */
+#define          MC_CMD_MAC_TX_MULTIPLE_COLLISION_PKTS 0x14 /* enum */
+#define          MC_CMD_MAC_TX_EXCESSIVE_COLLISION_PKTS 0x15 /* enum */
+#define          MC_CMD_MAC_TX_LATE_COLLISION_PKTS 0x16 /* enum */
+#define          MC_CMD_MAC_TX_DEFERRED_PKTS 0x17 /* enum */
+#define          MC_CMD_MAC_TX_EXCESSIVE_DEFERRED_PKTS 0x18 /* enum */
+#define          MC_CMD_MAC_TX_NON_TCPUDP_PKTS 0x19 /* enum */
+#define          MC_CMD_MAC_TX_MAC_SRC_ERR_PKTS 0x1a /* enum */
+#define          MC_CMD_MAC_TX_IP_SRC_ERR_PKTS 0x1b /* enum */
+#define          MC_CMD_MAC_RX_PKTS 0x1c /* enum */
+#define          MC_CMD_MAC_RX_PAUSE_PKTS 0x1d /* enum */
+#define          MC_CMD_MAC_RX_GOOD_PKTS 0x1e /* enum */
+#define          MC_CMD_MAC_RX_CONTROL_PKTS 0x1f /* enum */
+#define          MC_CMD_MAC_RX_UNICAST_PKTS 0x20 /* enum */
+#define          MC_CMD_MAC_RX_MULTICAST_PKTS 0x21 /* enum */
+#define          MC_CMD_MAC_RX_BROADCAST_PKTS 0x22 /* enum */
+#define          MC_CMD_MAC_RX_BYTES 0x23 /* enum */
+#define          MC_CMD_MAC_RX_BAD_BYTES 0x24 /* enum */
+#define          MC_CMD_MAC_RX_64_PKTS 0x25 /* enum */
+#define          MC_CMD_MAC_RX_65_TO_127_PKTS 0x26 /* enum */
+#define          MC_CMD_MAC_RX_128_TO_255_PKTS 0x27 /* enum */
+#define          MC_CMD_MAC_RX_256_TO_511_PKTS 0x28 /* enum */
+#define          MC_CMD_MAC_RX_512_TO_1023_PKTS 0x29 /* enum */
+#define          MC_CMD_MAC_RX_1024_TO_15XX_PKTS 0x2a /* enum */
+#define          MC_CMD_MAC_RX_15XX_TO_JUMBO_PKTS 0x2b /* enum */
+#define          MC_CMD_MAC_RX_GTJUMBO_PKTS 0x2c /* enum */
+#define          MC_CMD_MAC_RX_UNDERSIZE_PKTS 0x2d /* enum */
+#define          MC_CMD_MAC_RX_BAD_FCS_PKTS 0x2e /* enum */
+#define          MC_CMD_MAC_RX_OVERFLOW_PKTS 0x2f /* enum */
+#define          MC_CMD_MAC_RX_FALSE_CARRIER_PKTS 0x30 /* enum */
+#define          MC_CMD_MAC_RX_SYMBOL_ERROR_PKTS 0x31 /* enum */
+#define          MC_CMD_MAC_RX_ALIGN_ERROR_PKTS 0x32 /* enum */
+#define          MC_CMD_MAC_RX_LENGTH_ERROR_PKTS 0x33 /* enum */
+#define          MC_CMD_MAC_RX_INTERNAL_ERROR_PKTS 0x34 /* enum */
+#define          MC_CMD_MAC_RX_JABBER_PKTS 0x35 /* enum */
+#define          MC_CMD_MAC_RX_NODESC_DROPS 0x36 /* enum */
+#define          MC_CMD_MAC_RX_LANES01_CHAR_ERR 0x37 /* enum */
+#define          MC_CMD_MAC_RX_LANES23_CHAR_ERR 0x38 /* enum */
+#define          MC_CMD_MAC_RX_LANES01_DISP_ERR 0x39 /* enum */
+#define          MC_CMD_MAC_RX_LANES23_DISP_ERR 0x3a /* enum */
+#define          MC_CMD_MAC_RX_MATCH_FAULT 0x3b /* enum */
+/* enum: PM trunc_bb_overflow counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define          MC_CMD_MAC_PM_TRUNC_BB_OVERFLOW 0x3c
+/* enum: PM discard_bb_overflow counter. Valid for EF10 with
+ * PM_AND_RXDP_COUNTERS capability only.
+ */
+#define          MC_CMD_MAC_PM_DISCARD_BB_OVERFLOW 0x3d
+/* enum: PM trunc_vfifo_full counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define          MC_CMD_MAC_PM_TRUNC_VFIFO_FULL 0x3e
+/* enum: PM discard_vfifo_full counter. Valid for EF10 with
+ * PM_AND_RXDP_COUNTERS capability only.
+ */
+#define          MC_CMD_MAC_PM_DISCARD_VFIFO_FULL 0x3f
+/* enum: PM trunc_qbb counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define          MC_CMD_MAC_PM_TRUNC_QBB 0x40
+/* enum: PM discard_qbb counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define          MC_CMD_MAC_PM_DISCARD_QBB 0x41
+/* enum: PM discard_mapping counter. Valid for EF10 with PM_AND_RXDP_COUNTERS
+ * capability only.
+ */
+#define          MC_CMD_MAC_PM_DISCARD_MAPPING 0x42
+/* enum: RXDP counter: Number of packets dropped due to the queue being
+ * disabled. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
+ */
+#define          MC_CMD_MAC_RXDP_Q_DISABLED_PKTS 0x43
+/* enum: RXDP counter: Number of packets dropped by the DICPU. Valid for EF10
+ * with PM_AND_RXDP_COUNTERS capability only.
+ */
+#define          MC_CMD_MAC_RXDP_DI_DROPPED_PKTS 0x45
+/* enum: RXDP counter: Number of non-host packets. Valid for EF10 with
+ * PM_AND_RXDP_COUNTERS capability only.
+ */
+#define          MC_CMD_MAC_RXDP_STREAMING_PKTS 0x46
+/* enum: RXDP counter: Number of times an hlb descriptor fetch was performed.
+ * Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
+ */
+#define          MC_CMD_MAC_RXDP_HLB_FETCH_CONDITIONS 0x47
+/* enum: RXDP counter: Number of times the DPCPU waited for an existing
+ * descriptor fetch. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
+ */
+#define          MC_CMD_MAC_RXDP_HLB_WAIT_CONDITIONS 0x48
+#define          MC_CMD_MAC_VADAPTER_RX_DMABUF_START 0x4c /* enum */
+#define          MC_CMD_MAC_VADAPTER_RX_UNICAST_PACKETS 0x4c /* enum */
+#define          MC_CMD_MAC_VADAPTER_RX_UNICAST_BYTES 0x4d /* enum */
+#define          MC_CMD_MAC_VADAPTER_RX_MULTICAST_PACKETS 0x4e /* enum */
+#define          MC_CMD_MAC_VADAPTER_RX_MULTICAST_BYTES 0x4f /* enum */
+#define          MC_CMD_MAC_VADAPTER_RX_BROADCAST_PACKETS 0x50 /* enum */
+#define          MC_CMD_MAC_VADAPTER_RX_BROADCAST_BYTES 0x51 /* enum */
+#define          MC_CMD_MAC_VADAPTER_RX_BAD_PACKETS 0x52 /* enum */
+#define          MC_CMD_MAC_VADAPTER_RX_BAD_BYTES 0x53 /* enum */
+#define          MC_CMD_MAC_VADAPTER_RX_OVERFLOW 0x54 /* enum */
+#define          MC_CMD_MAC_VADAPTER_TX_DMABUF_START 0x57 /* enum */
+#define          MC_CMD_MAC_VADAPTER_TX_UNICAST_PACKETS 0x57 /* enum */
+#define          MC_CMD_MAC_VADAPTER_TX_UNICAST_BYTES 0x58 /* enum */
+#define          MC_CMD_MAC_VADAPTER_TX_MULTICAST_PACKETS 0x59 /* enum */
+#define          MC_CMD_MAC_VADAPTER_TX_MULTICAST_BYTES 0x5a /* enum */
+#define          MC_CMD_MAC_VADAPTER_TX_BROADCAST_PACKETS 0x5b /* enum */
+#define          MC_CMD_MAC_VADAPTER_TX_BROADCAST_BYTES 0x5c /* enum */
+#define          MC_CMD_MAC_VADAPTER_TX_BAD_PACKETS 0x5d /* enum */
+#define          MC_CMD_MAC_VADAPTER_TX_BAD_BYTES 0x5e /* enum */
+#define          MC_CMD_MAC_VADAPTER_TX_OVERFLOW 0x5f /* enum */
+/* enum: Start of GMAC stats buffer space, for Siena only. */
+#define          MC_CMD_GMAC_DMABUF_START 0x40
+/* enum: End of GMAC stats buffer space, for Siena only. */
+#define          MC_CMD_GMAC_DMABUF_END 0x5f
+/* enum: GENERATION_END value, used together with GENERATION_START to verify
+ * consistency of DMAd data. For legacy firmware / drivers without extended
+ * stats (more precisely, when DMA_LEN == MC_CMD_MAC_NSTATS *
+ * sizeof(uint64_t)), this entry holds the GENERATION_END value. Otherwise,
+ * this value is invalid/ reserved and GENERATION_END is written as the last
+ * 64-bit word of the DMA buffer (at DMA_LEN - sizeof(uint64_t)). Note that
+ * this is consistent with the legacy behaviour, in the sense that entry 96 is
+ * the last 64-bit word in the buffer when DMA_LEN == MC_CMD_MAC_NSTATS *
+ * sizeof(uint64_t). See SF-109306-TC, Section 9.2 for details.
+ */
+#define          MC_CMD_MAC_GENERATION_END 0x60
+#define          MC_CMD_MAC_NSTATS 0x61 /* enum */
+
+/* MC_CMD_MAC_STATS_V2_OUT_DMA msgresponse */
+#define    MC_CMD_MAC_STATS_V2_OUT_DMA_LEN 0
+
+/* MC_CMD_MAC_STATS_V2_OUT_NO_DMA msgresponse */
+#define    MC_CMD_MAC_STATS_V2_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS_V2*64))>>3)
+#define       MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_OFST 0
+#define       MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_LEN 8
+#define       MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_LO_OFST 0
+#define       MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_HI_OFST 4
+#define       MC_CMD_MAC_STATS_V2_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V2
+/* enum: Start of FEC stats buffer space, Medford2 and up */
+#define          MC_CMD_MAC_FEC_DMABUF_START 0x61
+/* enum: Number of uncorrected FEC codewords on link (RS-FEC only for Medford2)
+ */
+#define          MC_CMD_MAC_FEC_UNCORRECTED_ERRORS 0x61
+/* enum: Number of corrected FEC codewords on link (RS-FEC only for Medford2)
+ */
+#define          MC_CMD_MAC_FEC_CORRECTED_ERRORS 0x62
+/* enum: Number of corrected 10-bit symbol errors, lane 0 (RS-FEC only) */
+#define          MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE0 0x63
+/* enum: Number of corrected 10-bit symbol errors, lane 1 (RS-FEC only) */
+#define          MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE1 0x64
+/* enum: Number of corrected 10-bit symbol errors, lane 2 (RS-FEC only) */
+#define          MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE2 0x65
+/* enum: Number of corrected 10-bit symbol errors, lane 3 (RS-FEC only) */
+#define          MC_CMD_MAC_FEC_CORRECTED_SYMBOLS_LANE3 0x66
+/* enum: This includes the space at offset 103 which is the final
+ * GENERATION_END in a MAC_STATS_V2 response and otherwise unused.
+ */
+#define          MC_CMD_MAC_NSTATS_V2 0x68
+/*            Other enum values, see field(s): */
+/*               MC_CMD_MAC_STATS_OUT_NO_DMA/STATISTICS */
+
+/* MC_CMD_MAC_STATS_V3_OUT_DMA msgresponse */
+#define    MC_CMD_MAC_STATS_V3_OUT_DMA_LEN 0
+
+/* MC_CMD_MAC_STATS_V3_OUT_NO_DMA msgresponse */
+#define    MC_CMD_MAC_STATS_V3_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS_V3*64))>>3)
+#define       MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_OFST 0
+#define       MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_LEN 8
+#define       MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_LO_OFST 0
+#define       MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_HI_OFST 4
+#define       MC_CMD_MAC_STATS_V3_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V3
+/* enum: Start of CTPIO stats buffer space, Medford2 and up */
+#define          MC_CMD_MAC_CTPIO_DMABUF_START 0x68
+/* enum: Number of CTPIO fallbacks because a DMA packet was in progress on the
+ * target VI
+ */
+#define          MC_CMD_MAC_CTPIO_VI_BUSY_FALLBACK 0x68
+/* enum: Number of times a CTPIO send wrote beyond frame end (informational
+ * only)
+ */
+#define          MC_CMD_MAC_CTPIO_LONG_WRITE_SUCCESS 0x69
+/* enum: Number of CTPIO failures because the TX doorbell was written before
+ * the end of the frame data
+ */
+#define          MC_CMD_MAC_CTPIO_MISSING_DBELL_FAIL 0x6a
+/* enum: Number of CTPIO failures because the internal FIFO overflowed */
+#define          MC_CMD_MAC_CTPIO_OVERFLOW_FAIL 0x6b
+/* enum: Number of CTPIO failures because the host did not deliver data fast
+ * enough to avoid MAC underflow
+ */
+#define          MC_CMD_MAC_CTPIO_UNDERFLOW_FAIL 0x6c
+/* enum: Number of CTPIO failures because the host did not deliver all the
+ * frame data within the timeout
+ */
+#define          MC_CMD_MAC_CTPIO_TIMEOUT_FAIL 0x6d
+/* enum: Number of CTPIO failures because the frame data arrived out of order
+ * or with gaps
+ */
+#define          MC_CMD_MAC_CTPIO_NONCONTIG_WR_FAIL 0x6e
+/* enum: Number of CTPIO failures because the host started a new frame before
+ * completing the previous one
+ */
+#define          MC_CMD_MAC_CTPIO_FRM_CLOBBER_FAIL 0x6f
+/* enum: Number of CTPIO failures because a write was not a multiple of 32 bits
+ * or not 32-bit aligned
+ */
+#define          MC_CMD_MAC_CTPIO_INVALID_WR_FAIL 0x70
+/* enum: Number of CTPIO fallbacks because another VI on the same port was
+ * sending a CTPIO frame
+ */
+#define          MC_CMD_MAC_CTPIO_VI_CLOBBER_FALLBACK 0x71
+/* enum: Number of CTPIO fallbacks because target VI did not have CTPIO enabled
+ */
+#define          MC_CMD_MAC_CTPIO_UNQUALIFIED_FALLBACK 0x72
+/* enum: Number of CTPIO fallbacks because length in header was less than 29
+ * bytes
+ */
+#define          MC_CMD_MAC_CTPIO_RUNT_FALLBACK 0x73
+/* enum: Total number of successful CTPIO sends on this port */
+#define          MC_CMD_MAC_CTPIO_SUCCESS 0x74
+/* enum: Total number of CTPIO fallbacks on this port */
+#define          MC_CMD_MAC_CTPIO_FALLBACK 0x75
+/* enum: Total number of CTPIO poisoned frames on this port, whether erased or
+ * not
+ */
+#define          MC_CMD_MAC_CTPIO_POISON 0x76
+/* enum: Total number of CTPIO erased frames on this port */
+#define          MC_CMD_MAC_CTPIO_ERASE 0x77
+/* enum: This includes the space at offset 120 which is the final
+ * GENERATION_END in a MAC_STATS_V3 response and otherwise unused.
+ */
+#define          MC_CMD_MAC_NSTATS_V3 0x79
+/*            Other enum values, see field(s): */
+/*               MC_CMD_MAC_STATS_V2_OUT_NO_DMA/STATISTICS */
+
+/* MC_CMD_MAC_STATS_V4_OUT_DMA msgresponse */
+#define    MC_CMD_MAC_STATS_V4_OUT_DMA_LEN 0
+
+/* MC_CMD_MAC_STATS_V4_OUT_NO_DMA msgresponse */
+#define    MC_CMD_MAC_STATS_V4_OUT_NO_DMA_LEN (((MC_CMD_MAC_NSTATS_V4*64))>>3)
+#define       MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_OFST 0
+#define       MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_LEN 8
+#define       MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_LO_OFST 0
+#define       MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_HI_OFST 4
+#define       MC_CMD_MAC_STATS_V4_OUT_NO_DMA_STATISTICS_NUM MC_CMD_MAC_NSTATS_V4
+/* enum: Start of V4 stats buffer space */
+#define          MC_CMD_MAC_V4_DMABUF_START 0x79
+/* enum: RXDP counter: Number of packets truncated because scattering was
+ * disabled.
+ */
+#define          MC_CMD_MAC_RXDP_SCATTER_DISABLED_TRUNC 0x79
+/* enum: RXDP counter: Number of times the RXDP head of line blocked waiting
+ * for descriptors. Will be zero unless RXDP_HLB_IDLE capability is set.
+ */
+#define          MC_CMD_MAC_RXDP_HLB_IDLE 0x7a
+/* enum: RXDP counter: Number of times the RXDP timed out while head of line
+ * blocking. Will be zero unless RXDP_HLB_IDLE capability is set.
+ */
+#define          MC_CMD_MAC_RXDP_HLB_TIMEOUT 0x7b
+/* enum: This includes the space at offset 124 which is the final
+ * GENERATION_END in a MAC_STATS_V4 response and otherwise unused.
+ */
+#define          MC_CMD_MAC_NSTATS_V4 0x7d
+/*            Other enum values, see field(s): */
+/*               MC_CMD_MAC_STATS_V3_OUT_NO_DMA/STATISTICS */
+
+
+/***********************************/
+/* MC_CMD_SRIOV
+ * to be documented
+ */
+#define MC_CMD_SRIOV 0x30
+
+/* MC_CMD_SRIOV_IN msgrequest */
+#define    MC_CMD_SRIOV_IN_LEN 12
+#define       MC_CMD_SRIOV_IN_ENABLE_OFST 0
+#define       MC_CMD_SRIOV_IN_ENABLE_LEN 4
+#define       MC_CMD_SRIOV_IN_VI_BASE_OFST 4
+#define       MC_CMD_SRIOV_IN_VI_BASE_LEN 4
+#define       MC_CMD_SRIOV_IN_VF_COUNT_OFST 8
+#define       MC_CMD_SRIOV_IN_VF_COUNT_LEN 4
+
+/* MC_CMD_SRIOV_OUT msgresponse */
+#define    MC_CMD_SRIOV_OUT_LEN 8
+#define       MC_CMD_SRIOV_OUT_VI_SCALE_OFST 0
+#define       MC_CMD_SRIOV_OUT_VI_SCALE_LEN 4
+#define       MC_CMD_SRIOV_OUT_VF_TOTAL_OFST 4
+#define       MC_CMD_SRIOV_OUT_VF_TOTAL_LEN 4
+
+/* MC_CMD_MEMCPY_RECORD_TYPEDEF structuredef */
+#define    MC_CMD_MEMCPY_RECORD_TYPEDEF_LEN 32
+/* this is only used for the first record */
+#define       MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_OFST 0
+#define       MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LEN 4
+#define       MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LBN 0
+#define       MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_WIDTH 32
+#define       MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_OFST 4
+#define       MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_LEN 4
+#define       MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_LBN 32
+#define       MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_RID_WIDTH 32
+#define       MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_OFST 8
+#define       MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LEN 8
+#define       MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO_OFST 8
+#define       MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI_OFST 12
+#define       MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_LBN 64
+#define       MC_CMD_MEMCPY_RECORD_TYPEDEF_TO_ADDR_WIDTH 64
+#define       MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_OFST 16
+#define       MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_LEN 4
+#define          MC_CMD_MEMCPY_RECORD_TYPEDEF_RID_INLINE 0x100 /* enum */
+#define       MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_LBN 128
+#define       MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_RID_WIDTH 32
+#define       MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_OFST 20
+#define       MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LEN 8
+#define       MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO_OFST 20
+#define       MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI_OFST 24
+#define       MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LBN 160
+#define       MC_CMD_MEMCPY_RECORD_TYPEDEF_FROM_ADDR_WIDTH 64
+#define       MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_OFST 28
+#define       MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_LEN 4
+#define       MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_LBN 224
+#define       MC_CMD_MEMCPY_RECORD_TYPEDEF_LENGTH_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_MEMCPY
+ * DMA write data into (Rid,Addr), either by dma reading (Rid,Addr), or by data
+ * embedded directly in the command.
+ *
+ * A common pattern is for a client to use generation counts to signal a dma
+ * update of a datastructure. To facilitate this, this MCDI operation can
+ * contain multiple requests which are executed in strict order. Requests take
+ * the form of duplicating the entire MCDI request continuously (including the
+ * requests record, which is ignored in all but the first structure)
+ *
+ * The source data can either come from a DMA from the host, or it can be
+ * embedded within the request directly, thereby eliminating a DMA read. To
+ * indicate this, the client sets FROM_RID=%RID_INLINE, ADDR_HI=0, and
+ * ADDR_LO=offset, and inserts the data at %offset from the start of the
+ * payload. It's the callers responsibility to ensure that the embedded data
+ * doesn't overlap the records.
+ *
+ * Returns: 0, EINVAL (invalid RID)
+ */
+#define MC_CMD_MEMCPY 0x31
+
+/* MC_CMD_MEMCPY_IN msgrequest */
+#define    MC_CMD_MEMCPY_IN_LENMIN 32
+#define    MC_CMD_MEMCPY_IN_LENMAX 224
+#define    MC_CMD_MEMCPY_IN_LENMAX_MCDI2 992
+#define    MC_CMD_MEMCPY_IN_LEN(num) (0+32*(num))
+#define    MC_CMD_MEMCPY_IN_RECORD_NUM(len) (((len)-0)/32)
+/* see MC_CMD_MEMCPY_RECORD_TYPEDEF */
+#define       MC_CMD_MEMCPY_IN_RECORD_OFST 0
+#define       MC_CMD_MEMCPY_IN_RECORD_LEN 32
+#define       MC_CMD_MEMCPY_IN_RECORD_MINNUM 1
+#define       MC_CMD_MEMCPY_IN_RECORD_MAXNUM 7
+#define       MC_CMD_MEMCPY_IN_RECORD_MAXNUM_MCDI2 31
+
+/* MC_CMD_MEMCPY_OUT msgresponse */
+#define    MC_CMD_MEMCPY_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_WOL_FILTER_SET
+ * Set a WoL filter.
+ */
+#define MC_CMD_WOL_FILTER_SET 0x32
+#undef MC_CMD_0x32_PRIVILEGE_CTG
+
+#define MC_CMD_0x32_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_WOL_FILTER_SET_IN msgrequest */
+#define    MC_CMD_WOL_FILTER_SET_IN_LEN 192
+#define       MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0
+#define       MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4
+#define          MC_CMD_FILTER_MODE_SIMPLE 0x0 /* enum */
+#define          MC_CMD_FILTER_MODE_STRUCTURED 0xffffffff /* enum */
+/* A type value of 1 is unused. */
+#define       MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4
+#define       MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4
+/* enum: Magic */
+#define          MC_CMD_WOL_TYPE_MAGIC 0x0
+/* enum: MS Windows Magic */
+#define          MC_CMD_WOL_TYPE_WIN_MAGIC 0x2
+/* enum: IPv4 Syn */
+#define          MC_CMD_WOL_TYPE_IPV4_SYN 0x3
+/* enum: IPv6 Syn */
+#define          MC_CMD_WOL_TYPE_IPV6_SYN 0x4
+/* enum: Bitmap */
+#define          MC_CMD_WOL_TYPE_BITMAP 0x5
+/* enum: Link */
+#define          MC_CMD_WOL_TYPE_LINK 0x6
+/* enum: (Above this for future use) */
+#define          MC_CMD_WOL_TYPE_MAX 0x7
+#define       MC_CMD_WOL_FILTER_SET_IN_DATA_OFST 8
+#define       MC_CMD_WOL_FILTER_SET_IN_DATA_LEN 4
+#define       MC_CMD_WOL_FILTER_SET_IN_DATA_NUM 46
+
+/* MC_CMD_WOL_FILTER_SET_IN_MAGIC msgrequest */
+#define    MC_CMD_WOL_FILTER_SET_IN_MAGIC_LEN 16
+/*            MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/*            MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */
+/*            MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+/*            MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */
+#define       MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_OFST 8
+#define       MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LEN 8
+#define       MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_LO_OFST 8
+#define       MC_CMD_WOL_FILTER_SET_IN_MAGIC_MAC_HI_OFST 12
+
+/* MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN msgrequest */
+#define    MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_LEN 20
+/*            MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/*            MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */
+/*            MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+/*            MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */
+#define       MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_IP_OFST 8
+#define       MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_IP_LEN 4
+#define       MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_IP_OFST 12
+#define       MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_IP_LEN 4
+#define       MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_PORT_OFST 16
+#define       MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_SRC_PORT_LEN 2
+#define       MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_PORT_OFST 18
+#define       MC_CMD_WOL_FILTER_SET_IN_IPV4_SYN_DST_PORT_LEN 2
+
+/* MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN msgrequest */
+#define    MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_LEN 44
+/*            MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/*            MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */
+/*            MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+/*            MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */
+#define       MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_IP_OFST 8
+#define       MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_IP_LEN 16
+#define       MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_IP_OFST 24
+#define       MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_IP_LEN 16
+#define       MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_PORT_OFST 40
+#define       MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_SRC_PORT_LEN 2
+#define       MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_PORT_OFST 42
+#define       MC_CMD_WOL_FILTER_SET_IN_IPV6_SYN_DST_PORT_LEN 2
+
+/* MC_CMD_WOL_FILTER_SET_IN_BITMAP msgrequest */
+#define    MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN 187
+/*            MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/*            MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */
+/*            MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+/*            MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */
+#define       MC_CMD_WOL_FILTER_SET_IN_BITMAP_MASK_OFST 8
+#define       MC_CMD_WOL_FILTER_SET_IN_BITMAP_MASK_LEN 48
+#define       MC_CMD_WOL_FILTER_SET_IN_BITMAP_BITMAP_OFST 56
+#define       MC_CMD_WOL_FILTER_SET_IN_BITMAP_BITMAP_LEN 128
+#define       MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN_OFST 184
+#define       MC_CMD_WOL_FILTER_SET_IN_BITMAP_LEN_LEN 1
+#define       MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER3_OFST 185
+#define       MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER3_LEN 1
+#define       MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER4_OFST 186
+#define       MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER4_LEN 1
+
+/* MC_CMD_WOL_FILTER_SET_IN_LINK msgrequest */
+#define    MC_CMD_WOL_FILTER_SET_IN_LINK_LEN 12
+/*            MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0 */
+/*            MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_LEN 4 */
+/*            MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4 */
+/*            MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_LEN 4 */
+#define       MC_CMD_WOL_FILTER_SET_IN_LINK_MASK_OFST 8
+#define       MC_CMD_WOL_FILTER_SET_IN_LINK_MASK_LEN 4
+#define        MC_CMD_WOL_FILTER_SET_IN_LINK_UP_OFST 8
+#define        MC_CMD_WOL_FILTER_SET_IN_LINK_UP_LBN 0
+#define        MC_CMD_WOL_FILTER_SET_IN_LINK_UP_WIDTH 1
+#define        MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_OFST 8
+#define        MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_LBN 1
+#define        MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_WIDTH 1
+
+/* MC_CMD_WOL_FILTER_SET_OUT msgresponse */
+#define    MC_CMD_WOL_FILTER_SET_OUT_LEN 4
+#define       MC_CMD_WOL_FILTER_SET_OUT_FILTER_ID_OFST 0
+#define       MC_CMD_WOL_FILTER_SET_OUT_FILTER_ID_LEN 4
+
+
+/***********************************/
+/* MC_CMD_WOL_FILTER_REMOVE
+ * Remove a WoL filter. Locks required: None. Returns: 0, EINVAL, ENOSYS
+ */
+#define MC_CMD_WOL_FILTER_REMOVE 0x33
+#undef MC_CMD_0x33_PRIVILEGE_CTG
+
+#define MC_CMD_0x33_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_WOL_FILTER_REMOVE_IN msgrequest */
+#define    MC_CMD_WOL_FILTER_REMOVE_IN_LEN 4
+#define       MC_CMD_WOL_FILTER_REMOVE_IN_FILTER_ID_OFST 0
+#define       MC_CMD_WOL_FILTER_REMOVE_IN_FILTER_ID_LEN 4
+
+/* MC_CMD_WOL_FILTER_REMOVE_OUT msgresponse */
+#define    MC_CMD_WOL_FILTER_REMOVE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_WOL_FILTER_RESET
+ * Reset (i.e. remove all) WoL filters. Locks required: None. Returns: 0,
+ * ENOSYS
+ */
+#define MC_CMD_WOL_FILTER_RESET 0x34
+#undef MC_CMD_0x34_PRIVILEGE_CTG
+
+#define MC_CMD_0x34_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_WOL_FILTER_RESET_IN msgrequest */
+#define    MC_CMD_WOL_FILTER_RESET_IN_LEN 4
+#define       MC_CMD_WOL_FILTER_RESET_IN_MASK_OFST 0
+#define       MC_CMD_WOL_FILTER_RESET_IN_MASK_LEN 4
+#define          MC_CMD_WOL_FILTER_RESET_IN_WAKE_FILTERS 0x1 /* enum */
+#define          MC_CMD_WOL_FILTER_RESET_IN_LIGHTSOUT_OFFLOADS 0x2 /* enum */
+
+/* MC_CMD_WOL_FILTER_RESET_OUT msgresponse */
+#define    MC_CMD_WOL_FILTER_RESET_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SET_MCAST_HASH
+ * Set the MCAST hash value without otherwise reconfiguring the MAC
+ */
+#define MC_CMD_SET_MCAST_HASH 0x35
+
+/* MC_CMD_SET_MCAST_HASH_IN msgrequest */
+#define    MC_CMD_SET_MCAST_HASH_IN_LEN 32
+#define       MC_CMD_SET_MCAST_HASH_IN_HASH0_OFST 0
+#define       MC_CMD_SET_MCAST_HASH_IN_HASH0_LEN 16
+#define       MC_CMD_SET_MCAST_HASH_IN_HASH1_OFST 16
+#define       MC_CMD_SET_MCAST_HASH_IN_HASH1_LEN 16
+
+/* MC_CMD_SET_MCAST_HASH_OUT msgresponse */
+#define    MC_CMD_SET_MCAST_HASH_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_NVRAM_TYPES
+ * Return bitfield indicating available types of virtual NVRAM partitions.
+ * Locks required: none. Returns: 0
+ */
+#define MC_CMD_NVRAM_TYPES 0x36
+#undef MC_CMD_0x36_PRIVILEGE_CTG
+
+#define MC_CMD_0x36_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_TYPES_IN msgrequest */
+#define    MC_CMD_NVRAM_TYPES_IN_LEN 0
+
+/* MC_CMD_NVRAM_TYPES_OUT msgresponse */
+#define    MC_CMD_NVRAM_TYPES_OUT_LEN 4
+/* Bit mask of supported types. */
+#define       MC_CMD_NVRAM_TYPES_OUT_TYPES_OFST 0
+#define       MC_CMD_NVRAM_TYPES_OUT_TYPES_LEN 4
+/* enum: Disabled callisto. */
+#define          MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO 0x0
+/* enum: MC firmware. */
+#define          MC_CMD_NVRAM_TYPE_MC_FW 0x1
+/* enum: MC backup firmware. */
+#define          MC_CMD_NVRAM_TYPE_MC_FW_BACKUP 0x2
+/* enum: Static configuration Port0. */
+#define          MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0 0x3
+/* enum: Static configuration Port1. */
+#define          MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1 0x4
+/* enum: Dynamic configuration Port0. */
+#define          MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0 0x5
+/* enum: Dynamic configuration Port1. */
+#define          MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1 0x6
+/* enum: Expansion Rom. */
+#define          MC_CMD_NVRAM_TYPE_EXP_ROM 0x7
+/* enum: Expansion Rom Configuration Port0. */
+#define          MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0 0x8
+/* enum: Expansion Rom Configuration Port1. */
+#define          MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1 0x9
+/* enum: Phy Configuration Port0. */
+#define          MC_CMD_NVRAM_TYPE_PHY_PORT0 0xa
+/* enum: Phy Configuration Port1. */
+#define          MC_CMD_NVRAM_TYPE_PHY_PORT1 0xb
+/* enum: Log. */
+#define          MC_CMD_NVRAM_TYPE_LOG 0xc
+/* enum: FPGA image. */
+#define          MC_CMD_NVRAM_TYPE_FPGA 0xd
+/* enum: FPGA backup image */
+#define          MC_CMD_NVRAM_TYPE_FPGA_BACKUP 0xe
+/* enum: FC firmware. */
+#define          MC_CMD_NVRAM_TYPE_FC_FW 0xf
+/* enum: FC backup firmware. */
+#define          MC_CMD_NVRAM_TYPE_FC_FW_BACKUP 0x10
+/* enum: CPLD image. */
+#define          MC_CMD_NVRAM_TYPE_CPLD 0x11
+/* enum: Licensing information. */
+#define          MC_CMD_NVRAM_TYPE_LICENSE 0x12
+/* enum: FC Log. */
+#define          MC_CMD_NVRAM_TYPE_FC_LOG 0x13
+/* enum: Additional flash on FPGA. */
+#define          MC_CMD_NVRAM_TYPE_FC_EXTRA 0x14
+
+
+/***********************************/
+/* MC_CMD_NVRAM_INFO
+ * Read info about a virtual NVRAM partition. Locks required: none. Returns: 0,
+ * EINVAL (bad type).
+ */
+#define MC_CMD_NVRAM_INFO 0x37
+#undef MC_CMD_0x37_PRIVILEGE_CTG
+
+#define MC_CMD_0x37_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_INFO_IN msgrequest */
+#define    MC_CMD_NVRAM_INFO_IN_LEN 4
+#define       MC_CMD_NVRAM_INFO_IN_TYPE_OFST 0
+#define       MC_CMD_NVRAM_INFO_IN_TYPE_LEN 4
+/*            Enum values, see field(s): */
+/*               MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+
+/* MC_CMD_NVRAM_INFO_OUT msgresponse */
+#define    MC_CMD_NVRAM_INFO_OUT_LEN 24
+#define       MC_CMD_NVRAM_INFO_OUT_TYPE_OFST 0
+#define       MC_CMD_NVRAM_INFO_OUT_TYPE_LEN 4
+/*            Enum values, see field(s): */
+/*               MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define       MC_CMD_NVRAM_INFO_OUT_SIZE_OFST 4
+#define       MC_CMD_NVRAM_INFO_OUT_SIZE_LEN 4
+#define       MC_CMD_NVRAM_INFO_OUT_ERASESIZE_OFST 8
+#define       MC_CMD_NVRAM_INFO_OUT_ERASESIZE_LEN 4
+#define       MC_CMD_NVRAM_INFO_OUT_FLAGS_OFST 12
+#define       MC_CMD_NVRAM_INFO_OUT_FLAGS_LEN 4
+#define        MC_CMD_NVRAM_INFO_OUT_PROTECTED_OFST 12
+#define        MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN 0
+#define        MC_CMD_NVRAM_INFO_OUT_PROTECTED_WIDTH 1
+#define        MC_CMD_NVRAM_INFO_OUT_TLV_OFST 12
+#define        MC_CMD_NVRAM_INFO_OUT_TLV_LBN 1
+#define        MC_CMD_NVRAM_INFO_OUT_TLV_WIDTH 1
+#define        MC_CMD_NVRAM_INFO_OUT_READ_ONLY_IF_TSA_BOUND_OFST 12
+#define        MC_CMD_NVRAM_INFO_OUT_READ_ONLY_IF_TSA_BOUND_LBN 2
+#define        MC_CMD_NVRAM_INFO_OUT_READ_ONLY_IF_TSA_BOUND_WIDTH 1
+#define        MC_CMD_NVRAM_INFO_OUT_CRC_OFST 12
+#define        MC_CMD_NVRAM_INFO_OUT_CRC_LBN 3
+#define        MC_CMD_NVRAM_INFO_OUT_CRC_WIDTH 1
+#define        MC_CMD_NVRAM_INFO_OUT_READ_ONLY_OFST 12
+#define        MC_CMD_NVRAM_INFO_OUT_READ_ONLY_LBN 5
+#define        MC_CMD_NVRAM_INFO_OUT_READ_ONLY_WIDTH 1
+#define        MC_CMD_NVRAM_INFO_OUT_CMAC_OFST 12
+#define        MC_CMD_NVRAM_INFO_OUT_CMAC_LBN 6
+#define        MC_CMD_NVRAM_INFO_OUT_CMAC_WIDTH 1
+#define        MC_CMD_NVRAM_INFO_OUT_A_B_OFST 12
+#define        MC_CMD_NVRAM_INFO_OUT_A_B_LBN 7
+#define        MC_CMD_NVRAM_INFO_OUT_A_B_WIDTH 1
+#define       MC_CMD_NVRAM_INFO_OUT_PHYSDEV_OFST 16
+#define       MC_CMD_NVRAM_INFO_OUT_PHYSDEV_LEN 4
+#define       MC_CMD_NVRAM_INFO_OUT_PHYSADDR_OFST 20
+#define       MC_CMD_NVRAM_INFO_OUT_PHYSADDR_LEN 4
+
+/* MC_CMD_NVRAM_INFO_V2_OUT msgresponse */
+#define    MC_CMD_NVRAM_INFO_V2_OUT_LEN 28
+#define       MC_CMD_NVRAM_INFO_V2_OUT_TYPE_OFST 0
+#define       MC_CMD_NVRAM_INFO_V2_OUT_TYPE_LEN 4
+/*            Enum values, see field(s): */
+/*               MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define       MC_CMD_NVRAM_INFO_V2_OUT_SIZE_OFST 4
+#define       MC_CMD_NVRAM_INFO_V2_OUT_SIZE_LEN 4
+#define       MC_CMD_NVRAM_INFO_V2_OUT_ERASESIZE_OFST 8
+#define       MC_CMD_NVRAM_INFO_V2_OUT_ERASESIZE_LEN 4
+#define       MC_CMD_NVRAM_INFO_V2_OUT_FLAGS_OFST 12
+#define       MC_CMD_NVRAM_INFO_V2_OUT_FLAGS_LEN 4
+#define        MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_OFST 12
+#define        MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_LBN 0
+#define        MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_WIDTH 1
+#define        MC_CMD_NVRAM_INFO_V2_OUT_TLV_OFST 12
+#define        MC_CMD_NVRAM_INFO_V2_OUT_TLV_LBN 1
+#define        MC_CMD_NVRAM_INFO_V2_OUT_TLV_WIDTH 1
+#define        MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_IF_TSA_BOUND_OFST 12
+#define        MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_IF_TSA_BOUND_LBN 2
+#define        MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_IF_TSA_BOUND_WIDTH 1
+#define        MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_OFST 12
+#define        MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_LBN 5
+#define        MC_CMD_NVRAM_INFO_V2_OUT_READ_ONLY_WIDTH 1
+#define        MC_CMD_NVRAM_INFO_V2_OUT_A_B_OFST 12
+#define        MC_CMD_NVRAM_INFO_V2_OUT_A_B_LBN 7
+#define        MC_CMD_NVRAM_INFO_V2_OUT_A_B_WIDTH 1
+#define       MC_CMD_NVRAM_INFO_V2_OUT_PHYSDEV_OFST 16
+#define       MC_CMD_NVRAM_INFO_V2_OUT_PHYSDEV_LEN 4
+#define       MC_CMD_NVRAM_INFO_V2_OUT_PHYSADDR_OFST 20
+#define       MC_CMD_NVRAM_INFO_V2_OUT_PHYSADDR_LEN 4
+/* Writes must be multiples of this size. Added to support the MUM on Sorrento.
+ */
+#define       MC_CMD_NVRAM_INFO_V2_OUT_WRITESIZE_OFST 24
+#define       MC_CMD_NVRAM_INFO_V2_OUT_WRITESIZE_LEN 4
+
+
+/***********************************/
+/* MC_CMD_NVRAM_UPDATE_START
+ * Start a group of update operations on a virtual NVRAM partition. Locks
+ * required: PHY_LOCK if type==*PHY*. Returns: 0, EINVAL (bad type), EACCES (if
+ * PHY_LOCK required and not held). In an adapter bound to a TSA controller,
+ * MC_CMD_NVRAM_UPDATE_START can only be used on a subset of partition types
+ * i.e. static config, dynamic config and expansion ROM config. Attempting to
+ * perform this operation on a restricted partition will return the error
+ * EPERM.
+ */
+#define MC_CMD_NVRAM_UPDATE_START 0x38
+#undef MC_CMD_0x38_PRIVILEGE_CTG
+
+#define MC_CMD_0x38_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_UPDATE_START_IN msgrequest: Legacy NVRAM_UPDATE_START request.
+ * Use NVRAM_UPDATE_START_V2_IN in new code
+ */
+#define    MC_CMD_NVRAM_UPDATE_START_IN_LEN 4
+#define       MC_CMD_NVRAM_UPDATE_START_IN_TYPE_OFST 0
+#define       MC_CMD_NVRAM_UPDATE_START_IN_TYPE_LEN 4
+/*            Enum values, see field(s): */
+/*               MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+
+/* MC_CMD_NVRAM_UPDATE_START_V2_IN msgrequest: Extended NVRAM_UPDATE_START
+ * request with additional flags indicating version of command in use. See
+ * MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT for details of extended functionality. Use
+ * paired up with NVRAM_UPDATE_FINISH_V2_IN.
+ */
+#define    MC_CMD_NVRAM_UPDATE_START_V2_IN_LEN 8
+#define       MC_CMD_NVRAM_UPDATE_START_V2_IN_TYPE_OFST 0
+#define       MC_CMD_NVRAM_UPDATE_START_V2_IN_TYPE_LEN 4
+/*            Enum values, see field(s): */
+/*               MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define       MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAGS_OFST 4
+#define       MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAGS_LEN 4
+#define        MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT_OFST 4
+#define        MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT_LBN 0
+#define        MC_CMD_NVRAM_UPDATE_START_V2_IN_FLAG_REPORT_VERIFY_RESULT_WIDTH 1
+
+/* MC_CMD_NVRAM_UPDATE_START_OUT msgresponse */
+#define    MC_CMD_NVRAM_UPDATE_START_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_NVRAM_READ
+ * Read data from a virtual NVRAM partition. Locks required: PHY_LOCK if
+ * type==*PHY*. Returns: 0, EINVAL (bad type/offset/length), EACCES (if
+ * PHY_LOCK required and not held)
+ */
+#define MC_CMD_NVRAM_READ 0x39
+#undef MC_CMD_0x39_PRIVILEGE_CTG
+
+#define MC_CMD_0x39_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_READ_IN msgrequest */
+#define    MC_CMD_NVRAM_READ_IN_LEN 12
+#define       MC_CMD_NVRAM_READ_IN_TYPE_OFST 0
+#define       MC_CMD_NVRAM_READ_IN_TYPE_LEN 4
+/*            Enum values, see field(s): */
+/*               MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define       MC_CMD_NVRAM_READ_IN_OFFSET_OFST 4
+#define       MC_CMD_NVRAM_READ_IN_OFFSET_LEN 4
+/* amount to read in bytes */
+#define       MC_CMD_NVRAM_READ_IN_LENGTH_OFST 8
+#define       MC_CMD_NVRAM_READ_IN_LENGTH_LEN 4
+
+/* MC_CMD_NVRAM_READ_IN_V2 msgrequest */
+#define    MC_CMD_NVRAM_READ_IN_V2_LEN 16
+#define       MC_CMD_NVRAM_READ_IN_V2_TYPE_OFST 0
+#define       MC_CMD_NVRAM_READ_IN_V2_TYPE_LEN 4
+/*            Enum values, see field(s): */
+/*               MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define       MC_CMD_NVRAM_READ_IN_V2_OFFSET_OFST 4
+#define       MC_CMD_NVRAM_READ_IN_V2_OFFSET_LEN 4
+/* amount to read in bytes */
+#define       MC_CMD_NVRAM_READ_IN_V2_LENGTH_OFST 8
+#define       MC_CMD_NVRAM_READ_IN_V2_LENGTH_LEN 4
+/* Optional control info. If a partition is stored with an A/B versioning
+ * scheme (i.e. in more than one physical partition in NVRAM) the host can set
+ * this to control which underlying physical partition is used to read data
+ * from. This allows it to perform a read-modify-write-verify with the write
+ * lock continuously held by calling NVRAM_UPDATE_START, reading the old
+ * contents using MODE=TARGET_CURRENT, overwriting the old partition and then
+ * verifying by reading with MODE=TARGET_BACKUP.
+ */
+#define       MC_CMD_NVRAM_READ_IN_V2_MODE_OFST 12
+#define       MC_CMD_NVRAM_READ_IN_V2_MODE_LEN 4
+/* enum: Same as omitting MODE: caller sees data in current partition unless it
+ * holds the write lock in which case it sees data in the partition it is
+ * updating.
+ */
+#define          MC_CMD_NVRAM_READ_IN_V2_DEFAULT 0x0
+/* enum: Read from the current partition of an A/B pair, even if holding the
+ * write lock.
+ */
+#define          MC_CMD_NVRAM_READ_IN_V2_TARGET_CURRENT 0x1
+/* enum: Read from the non-current (i.e. to be updated) partition of an A/B
+ * pair
+ */
+#define          MC_CMD_NVRAM_READ_IN_V2_TARGET_BACKUP 0x2
+
+/* MC_CMD_NVRAM_READ_OUT msgresponse */
+#define    MC_CMD_NVRAM_READ_OUT_LENMIN 1
+#define    MC_CMD_NVRAM_READ_OUT_LENMAX 252
+#define    MC_CMD_NVRAM_READ_OUT_LENMAX_MCDI2 1020
+#define    MC_CMD_NVRAM_READ_OUT_LEN(num) (0+1*(num))
+#define    MC_CMD_NVRAM_READ_OUT_READ_BUFFER_NUM(len) (((len)-0)/1)
+#define       MC_CMD_NVRAM_READ_OUT_READ_BUFFER_OFST 0
+#define       MC_CMD_NVRAM_READ_OUT_READ_BUFFER_LEN 1
+#define       MC_CMD_NVRAM_READ_OUT_READ_BUFFER_MINNUM 1
+#define       MC_CMD_NVRAM_READ_OUT_READ_BUFFER_MAXNUM 252
+#define       MC_CMD_NVRAM_READ_OUT_READ_BUFFER_MAXNUM_MCDI2 1020
+
+
+/***********************************/
+/* MC_CMD_NVRAM_WRITE
+ * Write data to a virtual NVRAM partition. Locks required: PHY_LOCK if
+ * type==*PHY*. Returns: 0, EINVAL (bad type/offset/length), EACCES (if
+ * PHY_LOCK required and not held)
+ */
+#define MC_CMD_NVRAM_WRITE 0x3a
+#undef MC_CMD_0x3a_PRIVILEGE_CTG
+
+#define MC_CMD_0x3a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_WRITE_IN msgrequest */
+#define    MC_CMD_NVRAM_WRITE_IN_LENMIN 13
+#define    MC_CMD_NVRAM_WRITE_IN_LENMAX 252
+#define    MC_CMD_NVRAM_WRITE_IN_LENMAX_MCDI2 1020
+#define    MC_CMD_NVRAM_WRITE_IN_LEN(num) (12+1*(num))
+#define    MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_NUM(len) (((len)-12)/1)
+#define       MC_CMD_NVRAM_WRITE_IN_TYPE_OFST 0
+#define       MC_CMD_NVRAM_WRITE_IN_TYPE_LEN 4
+/*            Enum values, see field(s): */
+/*               MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define       MC_CMD_NVRAM_WRITE_IN_OFFSET_OFST 4
+#define       MC_CMD_NVRAM_WRITE_IN_OFFSET_LEN 4
+#define       MC_CMD_NVRAM_WRITE_IN_LENGTH_OFST 8
+#define       MC_CMD_NVRAM_WRITE_IN_LENGTH_LEN 4
+#define       MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_OFST 12
+#define       MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_LEN 1
+#define       MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MINNUM 1
+#define       MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MAXNUM 240
+#define       MC_CMD_NVRAM_WRITE_IN_WRITE_BUFFER_MAXNUM_MCDI2 1008
+
+/* MC_CMD_NVRAM_WRITE_OUT msgresponse */
+#define    MC_CMD_NVRAM_WRITE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_NVRAM_ERASE
+ * Erase sector(s) from a virtual NVRAM partition. Locks required: PHY_LOCK if
+ * type==*PHY*. Returns: 0, EINVAL (bad type/offset/length), EACCES (if
+ * PHY_LOCK required and not held)
+ */
+#define MC_CMD_NVRAM_ERASE 0x3b
+#undef MC_CMD_0x3b_PRIVILEGE_CTG
+
+#define MC_CMD_0x3b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_ERASE_IN msgrequest */
+#define    MC_CMD_NVRAM_ERASE_IN_LEN 12
+#define       MC_CMD_NVRAM_ERASE_IN_TYPE_OFST 0
+#define       MC_CMD_NVRAM_ERASE_IN_TYPE_LEN 4
+/*            Enum values, see field(s): */
+/*               MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define       MC_CMD_NVRAM_ERASE_IN_OFFSET_OFST 4
+#define       MC_CMD_NVRAM_ERASE_IN_OFFSET_LEN 4
+#define       MC_CMD_NVRAM_ERASE_IN_LENGTH_OFST 8
+#define       MC_CMD_NVRAM_ERASE_IN_LENGTH_LEN 4
+
+/* MC_CMD_NVRAM_ERASE_OUT msgresponse */
+#define    MC_CMD_NVRAM_ERASE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_NVRAM_UPDATE_FINISH
+ * Finish a group of update operations on a virtual NVRAM partition. Locks
+ * required: PHY_LOCK if type==*PHY*. Returns: 0, EINVAL (bad type/offset/
+ * length), EACCES (if PHY_LOCK required and not held). In an adapter bound to
+ * a TSA controller, MC_CMD_NVRAM_UPDATE_FINISH can only be used on a subset of
+ * partition types i.e. static config, dynamic config and expansion ROM config.
+ * Attempting to perform this operation on a restricted partition will return
+ * the error EPERM.
+ */
+#define MC_CMD_NVRAM_UPDATE_FINISH 0x3c
+#undef MC_CMD_0x3c_PRIVILEGE_CTG
+
+#define MC_CMD_0x3c_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_UPDATE_FINISH_IN msgrequest: Legacy NVRAM_UPDATE_FINISH
+ * request. Use NVRAM_UPDATE_FINISH_V2_IN in new code
+ */
+#define    MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN 8
+#define       MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_OFST 0
+#define       MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_LEN 4
+/*            Enum values, see field(s): */
+/*               MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define       MC_CMD_NVRAM_UPDATE_FINISH_IN_REBOOT_OFST 4
+#define       MC_CMD_NVRAM_UPDATE_FINISH_IN_REBOOT_LEN 4
+
+/* MC_CMD_NVRAM_UPDATE_FINISH_V2_IN msgrequest: Extended NVRAM_UPDATE_FINISH
+ * request with additional flags indicating version of NVRAM_UPDATE commands in
+ * use. See MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT for details of extended
+ * functionality. Use paired up with NVRAM_UPDATE_START_V2_IN.
+ */
+#define    MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_LEN 12
+#define       MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_TYPE_OFST 0
+#define       MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_TYPE_LEN 4
+/*            Enum values, see field(s): */
+/*               MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define       MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_REBOOT_OFST 4
+#define       MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_REBOOT_LEN 4
+#define       MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAGS_OFST 8
+#define       MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAGS_LEN 4
+#define        MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT_OFST 8
+#define        MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT_LBN 0
+#define        MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_REPORT_VERIFY_RESULT_WIDTH 1
+#define        MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_RUN_IN_BACKGROUND_OFST 8
+#define        MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_RUN_IN_BACKGROUND_LBN 1
+#define        MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_RUN_IN_BACKGROUND_WIDTH 1
+#define        MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_POLL_VERIFY_RESULT_OFST 8
+#define        MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_POLL_VERIFY_RESULT_LBN 2
+#define        MC_CMD_NVRAM_UPDATE_FINISH_V2_IN_FLAG_POLL_VERIFY_RESULT_WIDTH 1
+
+/* MC_CMD_NVRAM_UPDATE_FINISH_OUT msgresponse: Legacy NVRAM_UPDATE_FINISH
+ * response. Use NVRAM_UPDATE_FINISH_V2_OUT in new code
+ */
+#define    MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN 0
+
+/* MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT msgresponse:
+ *
+ * Extended NVRAM_UPDATE_FINISH response that communicates the result of secure
+ * firmware validation where applicable back to the host.
+ *
+ * Medford only: For signed firmware images, such as those for medford, the MC
+ * firmware verifies the signature before marking the firmware image as valid.
+ * This process takes a few seconds to complete. So is likely to take more than
+ * the MCDI timeout. Hence signature verification is initiated when
+ * MC_CMD_NVRAM_UPDATE_FINISH_V2_IN is received by the firmware, however, the
+ * MCDI command is run in a background MCDI processing thread. This response
+ * payload includes the results of the signature verification. Note that the
+ * per-partition nvram lock in firmware is only released after the verification
+ * has completed.
+ */
+#define    MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_LEN 4
+/* Result of nvram update completion processing. Result codes that indicate an
+ * internal build failure and therefore not expected to be seen by customers in
+ * the field are marked with a prefix 'Internal-error'.
+ */
+#define       MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE_OFST 0
+#define       MC_CMD_NVRAM_UPDATE_FINISH_V2_OUT_RESULT_CODE_LEN 4
+/* enum: Invalid return code; only non-zero values are defined. Defined as
+ * unknown for backwards compatibility with NVRAM_UPDATE_FINISH_OUT.
+ */
+#define          MC_CMD_NVRAM_VERIFY_RC_UNKNOWN 0x0
+/* enum: Verify succeeded without any errors. */
+#define          MC_CMD_NVRAM_VERIFY_RC_SUCCESS 0x1
+/* enum: CMS format verification failed due to an internal error. */
+#define          MC_CMD_NVRAM_VERIFY_RC_CMS_CHECK_FAILED 0x2
+/* enum: Invalid CMS format in image metadata. */
+#define          MC_CMD_NVRAM_VERIFY_RC_INVALID_CMS_FORMAT 0x3
+/* enum: Message digest verification failed due to an internal error. */
+#define          MC_CMD_NVRAM_VERIFY_RC_MESSAGE_DIGEST_CHECK_FAILED 0x4
+/* enum: Error in message digest calculated over the reflash-header, payload
+ * and reflash-trailer.
+ */
+#define          MC_CMD_NVRAM_VERIFY_RC_BAD_MESSAGE_DIGEST 0x5
+/* enum: Signature verification failed due to an internal error. */
+#define          MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHECK_FAILED 0x6
+/* enum: There are no valid signatures in the image. */
+#define          MC_CMD_NVRAM_VERIFY_RC_NO_VALID_SIGNATURES 0x7
+/* enum: Trusted approvers verification failed due to an internal error. */
+#define          MC_CMD_NVRAM_VERIFY_RC_TRUSTED_APPROVERS_CHECK_FAILED 0x8
+/* enum: The Trusted approver's list is empty. */
+#define          MC_CMD_NVRAM_VERIFY_RC_NO_TRUSTED_APPROVERS 0x9
+/* enum: Signature chain verification failed due to an internal error. */
+#define          MC_CMD_NVRAM_VERIFY_RC_SIGNATURE_CHAIN_CHECK_FAILED 0xa
+/* enum: The signers of the signatures in the image are not listed in the
+ * Trusted approver's list.
+ */
+#define          MC_CMD_NVRAM_VERIFY_RC_NO_SIGNATURE_MATCH 0xb
+/* enum: The image contains a test-signed certificate, but the adapter accepts
+ * only production signed images.
+ */
+#define          MC_CMD_NVRAM_VERIFY_RC_REJECT_TEST_SIGNED 0xc
+/* enum: The image has a lower security level than the current firmware. */
+#define          MC_CMD_NVRAM_VERIFY_RC_SECURITY_LEVEL_DOWNGRADE 0xd
+/* enum: Internal-error. The signed image is missing the 'contents' section,
+ * where the 'contents' section holds the actual image payload to be applied.
+ */
+#define          MC_CMD_NVRAM_VERIFY_RC_CONTENT_NOT_FOUND 0xe
+/* enum: Internal-error. The bundle header is invalid. */
+#define          MC_CMD_NVRAM_VERIFY_RC_BUNDLE_CONTENT_HEADER_INVALID 0xf
+/* enum: Internal-error. The bundle does not have a valid reflash image layout.
+ */
+#define          MC_CMD_NVRAM_VERIFY_RC_BUNDLE_REFLASH_IMAGE_INVALID 0x10
+/* enum: Internal-error. The bundle has an inconsistent layout of components or
+ * incorrect checksum.
+ */
+#define          MC_CMD_NVRAM_VERIFY_RC_BUNDLE_IMAGE_LAYOUT_INVALID 0x11
+/* enum: Internal-error. The bundle manifest is inconsistent with components in
+ * the bundle.
+ */
+#define          MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_INVALID 0x12
+/* enum: Internal-error. The number of components in a bundle do not match the
+ * number of components advertised by the bundle manifest.
+ */
+#define          MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_NUM_COMPONENTS_MISMATCH 0x13
+/* enum: Internal-error. The bundle contains too many components for the MC
+ * firmware to process
+ */
+#define          MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_TOO_MANY_COMPONENTS 0x14
+/* enum: Internal-error. The bundle manifest has an invalid/inconsistent
+ * component.
+ */
+#define          MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_COMPONENT_INVALID 0x15
+/* enum: Internal-error. The hash of a component does not match the hash stored
+ * in the bundle manifest.
+ */
+#define          MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_COMPONENT_HASH_MISMATCH 0x16
+/* enum: Internal-error. Component hash calculation failed. */
+#define          MC_CMD_NVRAM_VERIFY_RC_BUNDLE_MANIFEST_COMPONENT_HASH_FAILED 0x17
+/* enum: Internal-error. The component does not have a valid reflash image
+ * layout.
+ */
+#define          MC_CMD_NVRAM_VERIFY_RC_BUNDLE_COMPONENT_REFLASH_IMAGE_INVALID 0x18
+/* enum: The bundle processing code failed to copy a component to its target
+ * partition.
+ */
+#define          MC_CMD_NVRAM_VERIFY_RC_BUNDLE_COMPONENT_COPY_FAILED 0x19
+/* enum: The update operation is in-progress. */
+#define          MC_CMD_NVRAM_VERIFY_RC_PENDING 0x1a
+
+
+/***********************************/
+/* MC_CMD_REBOOT
+ * Reboot the MC.
+ *
+ * The AFTER_ASSERTION flag is intended to be used when the driver notices an
+ * assertion failure (at which point it is expected to perform a complete tear
+ * down and reinitialise), to allow both ports to reset the MC once in an
+ * atomic fashion.
+ *
+ * Production mc firmwares are generally compiled with REBOOT_ON_ASSERT=1,
+ * which means that they will automatically reboot out of the assertion
+ * handler, so this is in practise an optional operation. It is still
+ * recommended that drivers execute this to support custom firmwares with
+ * REBOOT_ON_ASSERT=0.
+ *
+ * Locks required: NONE Returns: Nothing. You get back a response with ERR=1,
+ * DATALEN=0
+ */
+#define MC_CMD_REBOOT 0x3d
+#undef MC_CMD_0x3d_PRIVILEGE_CTG
+
+#define MC_CMD_0x3d_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_REBOOT_IN msgrequest */
+#define    MC_CMD_REBOOT_IN_LEN 4
+#define       MC_CMD_REBOOT_IN_FLAGS_OFST 0
+#define       MC_CMD_REBOOT_IN_FLAGS_LEN 4
+#define          MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION 0x1 /* enum */
+
+/* MC_CMD_REBOOT_OUT msgresponse */
+#define    MC_CMD_REBOOT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SCHEDINFO
+ * Request scheduler info. Locks required: NONE. Returns: An array of
+ * (timeslice,maximum overrun), one for each thread, in ascending order of
+ * thread address.
+ */
+#define MC_CMD_SCHEDINFO 0x3e
+#undef MC_CMD_0x3e_PRIVILEGE_CTG
+
+#define MC_CMD_0x3e_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SCHEDINFO_IN msgrequest */
+#define    MC_CMD_SCHEDINFO_IN_LEN 0
+
+/* MC_CMD_SCHEDINFO_OUT msgresponse */
+#define    MC_CMD_SCHEDINFO_OUT_LENMIN 4
+#define    MC_CMD_SCHEDINFO_OUT_LENMAX 252
+#define    MC_CMD_SCHEDINFO_OUT_LENMAX_MCDI2 1020
+#define    MC_CMD_SCHEDINFO_OUT_LEN(num) (0+4*(num))
+#define    MC_CMD_SCHEDINFO_OUT_DATA_NUM(len) (((len)-0)/4)
+#define       MC_CMD_SCHEDINFO_OUT_DATA_OFST 0
+#define       MC_CMD_SCHEDINFO_OUT_DATA_LEN 4
+#define       MC_CMD_SCHEDINFO_OUT_DATA_MINNUM 1
+#define       MC_CMD_SCHEDINFO_OUT_DATA_MAXNUM 63
+#define       MC_CMD_SCHEDINFO_OUT_DATA_MAXNUM_MCDI2 255
+
+
+/***********************************/
+/* MC_CMD_REBOOT_MODE
+ * Set the mode for the next MC reboot. Locks required: NONE. Sets the reboot
+ * mode to the specified value. Returns the old mode.
+ */
+#define MC_CMD_REBOOT_MODE 0x3f
+#undef MC_CMD_0x3f_PRIVILEGE_CTG
+
+#define MC_CMD_0x3f_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_REBOOT_MODE_IN msgrequest */
+#define    MC_CMD_REBOOT_MODE_IN_LEN 4
+#define       MC_CMD_REBOOT_MODE_IN_VALUE_OFST 0
+#define       MC_CMD_REBOOT_MODE_IN_VALUE_LEN 4
+/* enum: Normal. */
+#define          MC_CMD_REBOOT_MODE_NORMAL 0x0
+/* enum: Power-on Reset. */
+#define          MC_CMD_REBOOT_MODE_POR 0x2
+/* enum: Snapper. */
+#define          MC_CMD_REBOOT_MODE_SNAPPER 0x3
+/* enum: snapper fake POR */
+#define          MC_CMD_REBOOT_MODE_SNAPPER_POR 0x4
+#define        MC_CMD_REBOOT_MODE_IN_FAKE_OFST 0
+#define        MC_CMD_REBOOT_MODE_IN_FAKE_LBN 7
+#define        MC_CMD_REBOOT_MODE_IN_FAKE_WIDTH 1
+
+/* MC_CMD_REBOOT_MODE_OUT msgresponse */
+#define    MC_CMD_REBOOT_MODE_OUT_LEN 4
+#define       MC_CMD_REBOOT_MODE_OUT_VALUE_OFST 0
+#define       MC_CMD_REBOOT_MODE_OUT_VALUE_LEN 4
+
+
+/***********************************/
+/* MC_CMD_SENSOR_INFO
+ * Returns information about every available sensor.
+ *
+ * Each sensor has a single (16bit) value, and a corresponding state. The
+ * mapping between value and state is nominally determined by the MC, but may
+ * be implemented using up to 2 ranges per sensor.
+ *
+ * This call returns a mask (32bit) of the sensors that are supported by this
+ * platform, then an array of sensor information structures, in order of sensor
+ * type (but without gaps for unimplemented sensors). Each structure defines
+ * the ranges for the corresponding sensor. An unused range is indicated by
+ * equal limit values. If one range is used, a value outside that range results
+ * in STATE_FATAL. If two ranges are used, a value outside the second range
+ * results in STATE_FATAL while a value outside the first and inside the second
+ * range results in STATE_WARNING.
+ *
+ * Sensor masks and sensor information arrays are organised into pages. For
+ * backward compatibility, older host software can only use sensors in page 0.
+ * Bit 32 in the sensor mask was previously unused, and is no reserved for use
+ * as the next page flag.
+ *
+ * If the request does not contain a PAGE value then firmware will only return
+ * page 0 of sensor information, with bit 31 in the sensor mask cleared.
+ *
+ * If the request contains a PAGE value then firmware responds with the sensor
+ * mask and sensor information array for that page of sensors. In this case bit
+ * 31 in the mask is set if another page exists.
+ *
+ * Locks required: None Returns: 0
+ */
+#define MC_CMD_SENSOR_INFO 0x41
+#undef MC_CMD_0x41_PRIVILEGE_CTG
+
+#define MC_CMD_0x41_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_SENSOR_INFO_IN msgrequest */
+#define    MC_CMD_SENSOR_INFO_IN_LEN 0
+
+/* MC_CMD_SENSOR_INFO_EXT_IN msgrequest */
+#define    MC_CMD_SENSOR_INFO_EXT_IN_LEN 4
+/* Which page of sensors to report.
+ *
+ * Page 0 contains sensors 0 to 30 (sensor 31 is the next page bit).
+ *
+ * Page 1 contains sensors 32 to 62 (sensor 63 is the next page bit). etc.
+ */
+#define       MC_CMD_SENSOR_INFO_EXT_IN_PAGE_OFST 0
+#define       MC_CMD_SENSOR_INFO_EXT_IN_PAGE_LEN 4
+
+/* MC_CMD_SENSOR_INFO_EXT_IN_V2 msgrequest */
+#define    MC_CMD_SENSOR_INFO_EXT_IN_V2_LEN 8
+/* Which page of sensors to report.
+ *
+ * Page 0 contains sensors 0 to 30 (sensor 31 is the next page bit).
+ *
+ * Page 1 contains sensors 32 to 62 (sensor 63 is the next page bit). etc.
+ */
+#define       MC_CMD_SENSOR_INFO_EXT_IN_V2_PAGE_OFST 0
+#define       MC_CMD_SENSOR_INFO_EXT_IN_V2_PAGE_LEN 4
+/* Flags controlling information retrieved */
+#define       MC_CMD_SENSOR_INFO_EXT_IN_V2_FLAGS_OFST 4
+#define       MC_CMD_SENSOR_INFO_EXT_IN_V2_FLAGS_LEN 4
+#define        MC_CMD_SENSOR_INFO_EXT_IN_V2_ENGINEERING_OFST 4
+#define        MC_CMD_SENSOR_INFO_EXT_IN_V2_ENGINEERING_LBN 0
+#define        MC_CMD_SENSOR_INFO_EXT_IN_V2_ENGINEERING_WIDTH 1
+
+/* MC_CMD_SENSOR_INFO_OUT msgresponse */
+#define    MC_CMD_SENSOR_INFO_OUT_LENMIN 4
+#define    MC_CMD_SENSOR_INFO_OUT_LENMAX 252
+#define    MC_CMD_SENSOR_INFO_OUT_LENMAX_MCDI2 1020
+#define    MC_CMD_SENSOR_INFO_OUT_LEN(num) (4+8*(num))
+#define    MC_CMD_SENSOR_INFO_OUT_MC_CMD_SENSOR_ENTRY_NUM(len) (((len)-4)/8)
+#define       MC_CMD_SENSOR_INFO_OUT_MASK_OFST 0
+#define       MC_CMD_SENSOR_INFO_OUT_MASK_LEN 4
+/* enum: Controller temperature: degC */
+#define          MC_CMD_SENSOR_CONTROLLER_TEMP 0x0
+/* enum: Phy common temperature: degC */
+#define          MC_CMD_SENSOR_PHY_COMMON_TEMP 0x1
+/* enum: Controller cooling: bool */
+#define          MC_CMD_SENSOR_CONTROLLER_COOLING 0x2
+/* enum: Phy 0 temperature: degC */
+#define          MC_CMD_SENSOR_PHY0_TEMP 0x3
+/* enum: Phy 0 cooling: bool */
+#define          MC_CMD_SENSOR_PHY0_COOLING 0x4
+/* enum: Phy 1 temperature: degC */
+#define          MC_CMD_SENSOR_PHY1_TEMP 0x5
+/* enum: Phy 1 cooling: bool */
+#define          MC_CMD_SENSOR_PHY1_COOLING 0x6
+/* enum: 1.0v power: mV */
+#define          MC_CMD_SENSOR_IN_1V0 0x7
+/* enum: 1.2v power: mV */
+#define          MC_CMD_SENSOR_IN_1V2 0x8
+/* enum: 1.8v power: mV */
+#define          MC_CMD_SENSOR_IN_1V8 0x9
+/* enum: 2.5v power: mV */
+#define          MC_CMD_SENSOR_IN_2V5 0xa
+/* enum: 3.3v power: mV */
+#define          MC_CMD_SENSOR_IN_3V3 0xb
+/* enum: 12v power: mV */
+#define          MC_CMD_SENSOR_IN_12V0 0xc
+/* enum: 1.2v analogue power: mV */
+#define          MC_CMD_SENSOR_IN_1V2A 0xd
+/* enum: reference voltage: mV */
+#define          MC_CMD_SENSOR_IN_VREF 0xe
+/* enum: AOE FPGA power: mV */
+#define          MC_CMD_SENSOR_OUT_VAOE 0xf
+/* enum: AOE FPGA temperature: degC */
+#define          MC_CMD_SENSOR_AOE_TEMP 0x10
+/* enum: AOE FPGA PSU temperature: degC */
+#define          MC_CMD_SENSOR_PSU_AOE_TEMP 0x11
+/* enum: AOE PSU temperature: degC */
+#define          MC_CMD_SENSOR_PSU_TEMP 0x12
+/* enum: Fan 0 speed: RPM */
+#define          MC_CMD_SENSOR_FAN_0 0x13
+/* enum: Fan 1 speed: RPM */
+#define          MC_CMD_SENSOR_FAN_1 0x14
+/* enum: Fan 2 speed: RPM */
+#define          MC_CMD_SENSOR_FAN_2 0x15
+/* enum: Fan 3 speed: RPM */
+#define          MC_CMD_SENSOR_FAN_3 0x16
+/* enum: Fan 4 speed: RPM */
+#define          MC_CMD_SENSOR_FAN_4 0x17
+/* enum: AOE FPGA input power: mV */
+#define          MC_CMD_SENSOR_IN_VAOE 0x18
+/* enum: AOE FPGA current: mA */
+#define          MC_CMD_SENSOR_OUT_IAOE 0x19
+/* enum: AOE FPGA input current: mA */
+#define          MC_CMD_SENSOR_IN_IAOE 0x1a
+/* enum: NIC power consumption: W */
+#define          MC_CMD_SENSOR_NIC_POWER 0x1b
+/* enum: 0.9v power voltage: mV */
+#define          MC_CMD_SENSOR_IN_0V9 0x1c
+/* enum: 0.9v power current: mA */
+#define          MC_CMD_SENSOR_IN_I0V9 0x1d
+/* enum: 1.2v power current: mA */
+#define          MC_CMD_SENSOR_IN_I1V2 0x1e
+/* enum: Not a sensor: reserved for the next page flag */
+#define          MC_CMD_SENSOR_PAGE0_NEXT 0x1f
+/* enum: 0.9v power voltage (at ADC): mV */
+#define          MC_CMD_SENSOR_IN_0V9_ADC 0x20
+/* enum: Controller temperature 2: degC */
+#define          MC_CMD_SENSOR_CONTROLLER_2_TEMP 0x21
+/* enum: Voltage regulator internal temperature: degC */
+#define          MC_CMD_SENSOR_VREG_INTERNAL_TEMP 0x22
+/* enum: 0.9V voltage regulator temperature: degC */
+#define          MC_CMD_SENSOR_VREG_0V9_TEMP 0x23
+/* enum: 1.2V voltage regulator temperature: degC */
+#define          MC_CMD_SENSOR_VREG_1V2_TEMP 0x24
+/* enum: controller internal temperature sensor voltage (internal ADC): mV */
+#define          MC_CMD_SENSOR_CONTROLLER_VPTAT 0x25
+/* enum: controller internal temperature (internal ADC): degC */
+#define          MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP 0x26
+/* enum: controller internal temperature sensor voltage (external ADC): mV */
+#define          MC_CMD_SENSOR_CONTROLLER_VPTAT_EXTADC 0x27
+/* enum: controller internal temperature (external ADC): degC */
+#define          MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP_EXTADC 0x28
+/* enum: ambient temperature: degC */
+#define          MC_CMD_SENSOR_AMBIENT_TEMP 0x29
+/* enum: air flow: bool */
+#define          MC_CMD_SENSOR_AIRFLOW 0x2a
+/* enum: voltage between VSS08D and VSS08D at CSR: mV */
+#define          MC_CMD_SENSOR_VDD08D_VSS08D_CSR 0x2b
+/* enum: voltage between VSS08D and VSS08D at CSR (external ADC): mV */
+#define          MC_CMD_SENSOR_VDD08D_VSS08D_CSR_EXTADC 0x2c
+/* enum: Hotpoint temperature: degC */
+#define          MC_CMD_SENSOR_HOTPOINT_TEMP 0x2d
+/* enum: Port 0 PHY power switch over-current: bool */
+#define          MC_CMD_SENSOR_PHY_POWER_PORT0 0x2e
+/* enum: Port 1 PHY power switch over-current: bool */
+#define          MC_CMD_SENSOR_PHY_POWER_PORT1 0x2f
+/* enum: Mop-up microcontroller reference voltage: mV */
+#define          MC_CMD_SENSOR_MUM_VCC 0x30
+/* enum: 0.9v power phase A voltage: mV */
+#define          MC_CMD_SENSOR_IN_0V9_A 0x31
+/* enum: 0.9v power phase A current: mA */
+#define          MC_CMD_SENSOR_IN_I0V9_A 0x32
+/* enum: 0.9V voltage regulator phase A temperature: degC */
+#define          MC_CMD_SENSOR_VREG_0V9_A_TEMP 0x33
+/* enum: 0.9v power phase B voltage: mV */
+#define          MC_CMD_SENSOR_IN_0V9_B 0x34
+/* enum: 0.9v power phase B current: mA */
+#define          MC_CMD_SENSOR_IN_I0V9_B 0x35
+/* enum: 0.9V voltage regulator phase B temperature: degC */
+#define          MC_CMD_SENSOR_VREG_0V9_B_TEMP 0x36
+/* enum: CCOM AVREG 1v2 supply (interval ADC): mV */
+#define          MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY 0x37
+/* enum: CCOM AVREG 1v2 supply (external ADC): mV */
+#define          MC_CMD_SENSOR_CCOM_AVREG_1V2_SUPPLY_EXTADC 0x38
+/* enum: CCOM AVREG 1v8 supply (interval ADC): mV */
+#define          MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY 0x39
+/* enum: CCOM AVREG 1v8 supply (external ADC): mV */
+#define          MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY_EXTADC 0x3a
+/* enum: CCOM RTS temperature: degC */
+#define          MC_CMD_SENSOR_CONTROLLER_RTS 0x3b
+/* enum: Not a sensor: reserved for the next page flag */
+#define          MC_CMD_SENSOR_PAGE1_NEXT 0x3f
+/* enum: controller internal temperature sensor voltage on master core
+ * (internal ADC): mV
+ */
+#define          MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT 0x40
+/* enum: controller internal temperature on master core (internal ADC): degC */
+#define          MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP 0x41
+/* enum: controller internal temperature sensor voltage on master core
+ * (external ADC): mV
+ */
+#define          MC_CMD_SENSOR_CONTROLLER_MASTER_VPTAT_EXTADC 0x42
+/* enum: controller internal temperature on master core (external ADC): degC */
+#define          MC_CMD_SENSOR_CONTROLLER_MASTER_INTERNAL_TEMP_EXTADC 0x43
+/* enum: controller internal temperature on slave core sensor voltage (internal
+ * ADC): mV
+ */
+#define          MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT 0x44
+/* enum: controller internal temperature on slave core (internal ADC): degC */
+#define          MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP 0x45
+/* enum: controller internal temperature on slave core sensor voltage (external
+ * ADC): mV
+ */
+#define          MC_CMD_SENSOR_CONTROLLER_SLAVE_VPTAT_EXTADC 0x46
+/* enum: controller internal temperature on slave core (external ADC): degC */
+#define          MC_CMD_SENSOR_CONTROLLER_SLAVE_INTERNAL_TEMP_EXTADC 0x47
+/* enum: Voltage supplied to the SODIMMs from their power supply: mV */
+#define          MC_CMD_SENSOR_SODIMM_VOUT 0x49
+/* enum: Temperature of SODIMM 0 (if installed): degC */
+#define          MC_CMD_SENSOR_SODIMM_0_TEMP 0x4a
+/* enum: Temperature of SODIMM 1 (if installed): degC */
+#define          MC_CMD_SENSOR_SODIMM_1_TEMP 0x4b
+/* enum: Voltage supplied to the QSFP #0 from their power supply: mV */
+#define          MC_CMD_SENSOR_PHY0_VCC 0x4c
+/* enum: Voltage supplied to the QSFP #1 from their power supply: mV */
+#define          MC_CMD_SENSOR_PHY1_VCC 0x4d
+/* enum: Controller die temperature (TDIODE): degC */
+#define          MC_CMD_SENSOR_CONTROLLER_TDIODE_TEMP 0x4e
+/* enum: Board temperature (front): degC */
+#define          MC_CMD_SENSOR_BOARD_FRONT_TEMP 0x4f
+/* enum: Board temperature (back): degC */
+#define          MC_CMD_SENSOR_BOARD_BACK_TEMP 0x50
+/* enum: 1.8v power current: mA */
+#define          MC_CMD_SENSOR_IN_I1V8 0x51
+/* enum: 2.5v power current: mA */
+#define          MC_CMD_SENSOR_IN_I2V5 0x52
+/* enum: 3.3v power current: mA */
+#define          MC_CMD_SENSOR_IN_I3V3 0x53
+/* enum: 12v power current: mA */
+#define          MC_CMD_SENSOR_IN_I12V0 0x54
+/* enum: 1.3v power: mV */
+#define          MC_CMD_SENSOR_IN_1V3 0x55
+/* enum: 1.3v power current: mA */
+#define          MC_CMD_SENSOR_IN_I1V3 0x56
+/* enum: Engineering sensor 1 */
+#define          MC_CMD_SENSOR_ENGINEERING_1 0x57
+/* enum: Engineering sensor 2 */
+#define          MC_CMD_SENSOR_ENGINEERING_2 0x58
+/* enum: Engineering sensor 3 */
+#define          MC_CMD_SENSOR_ENGINEERING_3 0x59
+/* enum: Engineering sensor 4 */
+#define          MC_CMD_SENSOR_ENGINEERING_4 0x5a
+/* enum: Engineering sensor 5 */
+#define          MC_CMD_SENSOR_ENGINEERING_5 0x5b
+/* enum: Engineering sensor 6 */
+#define          MC_CMD_SENSOR_ENGINEERING_6 0x5c
+/* enum: Engineering sensor 7 */
+#define          MC_CMD_SENSOR_ENGINEERING_7 0x5d
+/* enum: Engineering sensor 8 */
+#define          MC_CMD_SENSOR_ENGINEERING_8 0x5e
+/* enum: Not a sensor: reserved for the next page flag */
+#define          MC_CMD_SENSOR_PAGE2_NEXT 0x5f
+/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */
+#define       MC_CMD_SENSOR_ENTRY_OFST 4
+#define       MC_CMD_SENSOR_ENTRY_LEN 8
+#define       MC_CMD_SENSOR_ENTRY_LO_OFST 4
+#define       MC_CMD_SENSOR_ENTRY_HI_OFST 8
+#define       MC_CMD_SENSOR_ENTRY_MINNUM 0
+#define       MC_CMD_SENSOR_ENTRY_MAXNUM 31
+#define       MC_CMD_SENSOR_ENTRY_MAXNUM_MCDI2 127
+
+/* MC_CMD_SENSOR_INFO_EXT_OUT msgresponse */
+#define    MC_CMD_SENSOR_INFO_EXT_OUT_LENMIN 4
+#define    MC_CMD_SENSOR_INFO_EXT_OUT_LENMAX 252
+#define    MC_CMD_SENSOR_INFO_EXT_OUT_LENMAX_MCDI2 1020
+#define    MC_CMD_SENSOR_INFO_EXT_OUT_LEN(num) (4+8*(num))
+#define    MC_CMD_SENSOR_INFO_EXT_OUT_MC_CMD_SENSOR_ENTRY_NUM(len) (((len)-4)/8)
+#define       MC_CMD_SENSOR_INFO_EXT_OUT_MASK_OFST 0
+#define       MC_CMD_SENSOR_INFO_EXT_OUT_MASK_LEN 4
+/*            Enum values, see field(s): */
+/*               MC_CMD_SENSOR_INFO_OUT */
+#define        MC_CMD_SENSOR_INFO_EXT_OUT_NEXT_PAGE_OFST 0
+#define        MC_CMD_SENSOR_INFO_EXT_OUT_NEXT_PAGE_LBN 31
+#define        MC_CMD_SENSOR_INFO_EXT_OUT_NEXT_PAGE_WIDTH 1
+/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */
+/*            MC_CMD_SENSOR_ENTRY_OFST 4 */
+/*            MC_CMD_SENSOR_ENTRY_LEN 8 */
+/*            MC_CMD_SENSOR_ENTRY_LO_OFST 4 */
+/*            MC_CMD_SENSOR_ENTRY_HI_OFST 8 */
+/*            MC_CMD_SENSOR_ENTRY_MINNUM 0 */
+/*            MC_CMD_SENSOR_ENTRY_MAXNUM 31 */
+/*            MC_CMD_SENSOR_ENTRY_MAXNUM_MCDI2 127 */
+
+/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF structuredef */
+#define    MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_LEN 8
+#define       MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_OFST 0
+#define       MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_LEN 2
+#define       MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_LBN 0
+#define       MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_WIDTH 16
+#define       MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_OFST 2
+#define       MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_LEN 2
+#define       MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_LBN 16
+#define       MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX1_WIDTH 16
+#define       MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_OFST 4
+#define       MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_LEN 2
+#define       MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_LBN 32
+#define       MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN2_WIDTH 16
+#define       MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_OFST 6
+#define       MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_LEN 2
+#define       MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_LBN 48
+#define       MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MAX2_WIDTH 16
+
+
+/***********************************/
+/* MC_CMD_READ_SENSORS
+ * Returns the current reading from each sensor. DMAs an array of sensor
+ * readings, in order of sensor type (but without gaps for unimplemented
+ * sensors), into host memory. Each array element is a
+ * MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF dword.
+ *
+ * If the request does not contain the LENGTH field then only sensors 0 to 30
+ * are reported, to avoid DMA buffer overflow in older host software. If the
+ * sensor reading require more space than the LENGTH allows, then return
+ * EINVAL.
+ *
+ * The MC will send a SENSOREVT event every time any sensor changes state. The
+ * driver is responsible for ensuring that it doesn't miss any events. The
+ * board will function normally if all sensors are in STATE_OK or
+ * STATE_WARNING. Otherwise the board should not be expected to function.
+ */
+#define MC_CMD_READ_SENSORS 0x42
+#undef MC_CMD_0x42_PRIVILEGE_CTG
+
+#define MC_CMD_0x42_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_READ_SENSORS_IN msgrequest */
+#define    MC_CMD_READ_SENSORS_IN_LEN 8
+/* DMA address of host buffer for sensor readings (must be 4Kbyte aligned).
+ *
+ * If the address is 0xffffffffffffffff send the readings in the response (used
+ * by cmdclient).
+ */
+#define       MC_CMD_READ_SENSORS_IN_DMA_ADDR_OFST 0
+#define       MC_CMD_READ_SENSORS_IN_DMA_ADDR_LEN 8
+#define       MC_CMD_READ_SENSORS_IN_DMA_ADDR_LO_OFST 0
+#define       MC_CMD_READ_SENSORS_IN_DMA_ADDR_HI_OFST 4
+
+/* MC_CMD_READ_SENSORS_EXT_IN msgrequest */
+#define    MC_CMD_READ_SENSORS_EXT_IN_LEN 12
+/* DMA address of host buffer for sensor readings (must be 4Kbyte aligned).
+ *
+ * If the address is 0xffffffffffffffff send the readings in the response (used
+ * by cmdclient).
+ */
+#define       MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_OFST 0
+#define       MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LEN 8
+#define       MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LO_OFST 0
+#define       MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_HI_OFST 4
+/* Size in bytes of host buffer. */
+#define       MC_CMD_READ_SENSORS_EXT_IN_LENGTH_OFST 8
+#define       MC_CMD_READ_SENSORS_EXT_IN_LENGTH_LEN 4
+
+/* MC_CMD_READ_SENSORS_EXT_IN_V2 msgrequest */
+#define    MC_CMD_READ_SENSORS_EXT_IN_V2_LEN 16
+/* DMA address of host buffer for sensor readings (must be 4Kbyte aligned).
+ *
+ * If the address is 0xffffffffffffffff send the readings in the response (used
+ * by cmdclient).
+ */
+#define       MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_OFST 0
+#define       MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_LEN 8
+#define       MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_LO_OFST 0
+#define       MC_CMD_READ_SENSORS_EXT_IN_V2_DMA_ADDR_HI_OFST 4
+/* Size in bytes of host buffer. */
+#define       MC_CMD_READ_SENSORS_EXT_IN_V2_LENGTH_OFST 8
+#define       MC_CMD_READ_SENSORS_EXT_IN_V2_LENGTH_LEN 4
+/* Flags controlling information retrieved */
+#define       MC_CMD_READ_SENSORS_EXT_IN_V2_FLAGS_OFST 12
+#define       MC_CMD_READ_SENSORS_EXT_IN_V2_FLAGS_LEN 4
+#define        MC_CMD_READ_SENSORS_EXT_IN_V2_ENGINEERING_OFST 12
+#define        MC_CMD_READ_SENSORS_EXT_IN_V2_ENGINEERING_LBN 0
+#define        MC_CMD_READ_SENSORS_EXT_IN_V2_ENGINEERING_WIDTH 1
+
+/* MC_CMD_READ_SENSORS_OUT msgresponse */
+#define    MC_CMD_READ_SENSORS_OUT_LEN 0
+
+/* MC_CMD_READ_SENSORS_EXT_OUT msgresponse */
+#define    MC_CMD_READ_SENSORS_EXT_OUT_LEN 0
+
+/* MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF structuredef */
+#define    MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_LEN 4
+#define       MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_OFST 0
+#define       MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_LEN 2
+#define       MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_LBN 0
+#define       MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_WIDTH 16
+#define       MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_OFST 2
+#define       MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LEN 1
+/* enum: Ok. */
+#define          MC_CMD_SENSOR_STATE_OK 0x0
+/* enum: Breached warning threshold. */
+#define          MC_CMD_SENSOR_STATE_WARNING 0x1
+/* enum: Breached fatal threshold. */
+#define          MC_CMD_SENSOR_STATE_FATAL 0x2
+/* enum: Fault with sensor. */
+#define          MC_CMD_SENSOR_STATE_BROKEN 0x3
+/* enum: Sensor is working but does not currently have a reading. */
+#define          MC_CMD_SENSOR_STATE_NO_READING 0x4
+/* enum: Sensor initialisation failed. */
+#define          MC_CMD_SENSOR_STATE_INIT_FAILED 0x5
+#define       MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LBN 16
+#define       MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_WIDTH 8
+#define       MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_OFST 3
+#define       MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_LEN 1
+/*            Enum values, see field(s): */
+/*               MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
+#define       MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_LBN 24
+#define       MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_WIDTH 8
+
+
+/***********************************/
+/* MC_CMD_GET_PHY_STATE
+ * Report current state of PHY. A 'zombie' PHY is a PHY that has failed to boot
+ * (e.g. due to missing or corrupted firmware). Locks required: None. Return
+ * code: 0
+ */
+#define MC_CMD_GET_PHY_STATE 0x43
+#undef MC_CMD_0x43_PRIVILEGE_CTG
+
+#define MC_CMD_0x43_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PHY_STATE_IN msgrequest */
+#define    MC_CMD_GET_PHY_STATE_IN_LEN 0
+
+/* MC_CMD_GET_PHY_STATE_OUT msgresponse */
+#define    MC_CMD_GET_PHY_STATE_OUT_LEN 4
+#define       MC_CMD_GET_PHY_STATE_OUT_STATE_OFST 0
+#define       MC_CMD_GET_PHY_STATE_OUT_STATE_LEN 4
+/* enum: Ok. */
+#define          MC_CMD_PHY_STATE_OK 0x1
+/* enum: Faulty. */
+#define          MC_CMD_PHY_STATE_ZOMBIE 0x2
+
+
+/***********************************/
+/* MC_CMD_SETUP_8021QBB
+ * 802.1Qbb control. 8 Tx queues that map to priorities 0 - 7. Use all 1s to
+ * disable 802.Qbb for a given priority.
+ */
+#define MC_CMD_SETUP_8021QBB 0x44
+
+/* MC_CMD_SETUP_8021QBB_IN msgrequest */
+#define    MC_CMD_SETUP_8021QBB_IN_LEN 32
+#define       MC_CMD_SETUP_8021QBB_IN_TXQS_OFST 0
+#define       MC_CMD_SETUP_8021QBB_IN_TXQS_LEN 32
+
+/* MC_CMD_SETUP_8021QBB_OUT msgresponse */
+#define    MC_CMD_SETUP_8021QBB_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_WOL_FILTER_GET
+ * Retrieve ID of any WoL filters. Locks required: None. Returns: 0, ENOSYS
+ */
+#define MC_CMD_WOL_FILTER_GET 0x45
+#undef MC_CMD_0x45_PRIVILEGE_CTG
+
+#define MC_CMD_0x45_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_WOL_FILTER_GET_IN msgrequest */
+#define    MC_CMD_WOL_FILTER_GET_IN_LEN 0
+
+/* MC_CMD_WOL_FILTER_GET_OUT msgresponse */
+#define    MC_CMD_WOL_FILTER_GET_OUT_LEN 4
+#define       MC_CMD_WOL_FILTER_GET_OUT_FILTER_ID_OFST 0
+#define       MC_CMD_WOL_FILTER_GET_OUT_FILTER_ID_LEN 4
+
+
+/***********************************/
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD
+ * Add a protocol offload to NIC for lights-out state. Locks required: None.
+ * Returns: 0, ENOSYS
+ */
+#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD 0x46
+#undef MC_CMD_0x46_PRIVILEGE_CTG
+
+#define MC_CMD_0x46_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN msgrequest */
+#define    MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMIN 8
+#define    MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMAX 252
+#define    MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LENMAX_MCDI2 1020
+#define    MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_LEN(num) (4+4*(num))
+#define    MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_NUM(len) (((len)-4)/4)
+#define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0
+#define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4
+#define          MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_ARP 0x1 /* enum */
+#define          MC_CMD_LIGHTSOUT_OFFLOAD_PROTOCOL_NS 0x2 /* enum */
+#define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_OFST 4
+#define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_LEN 4
+#define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MINNUM 1
+#define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MAXNUM 62
+#define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_DATA_MAXNUM_MCDI2 254
+
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP msgrequest */
+#define    MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_LEN 14
+/*            MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */
+/*            MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 */
+#define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_OFST 4
+#define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_MAC_LEN 6
+#define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_IP_OFST 10
+#define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_ARP_IP_LEN 4
+
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS msgrequest */
+#define    MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_LEN 42
+/*            MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0 */
+/*            MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4 */
+#define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_OFST 4
+#define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_MAC_LEN 6
+#define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_SNIPV6_OFST 10
+#define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_SNIPV6_LEN 16
+#define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_IPV6_OFST 26
+#define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_IN_NS_IPV6_LEN 16
+
+/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT msgresponse */
+#define    MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_LEN 4
+#define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_OFST 0
+#define       MC_CMD_ADD_LIGHTSOUT_OFFLOAD_OUT_FILTER_ID_LEN 4
+
+
+/***********************************/
+/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD
+ * Remove a protocol offload from NIC for lights-out state. Locks required:
+ * None. Returns: 0, ENOSYS
+ */
+#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD 0x47
+#undef MC_CMD_0x47_PRIVILEGE_CTG
+
+#define MC_CMD_0x47_PRIVILEGE_CTG SRIOV_CTG_LINK
+
+/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN msgrequest */
+#define    MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_LEN 8
+#define       MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_OFST 0
+#define       MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_PROTOCOL_LEN 4
+#define       MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_OFST 4
+#define       MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_IN_FILTER_ID_LEN 4
+
+/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT msgresponse */
+#define    MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_MAC_RESET_RESTORE
+ * Restore MAC after block reset. Locks required: None. Returns: 0.
+ */
+#define MC_CMD_MAC_RESET_RESTORE 0x48
+
+/* MC_CMD_MAC_RESET_RESTORE_IN msgrequest */
+#define    MC_CMD_MAC_RESET_RESTORE_IN_LEN 0
+
+/* MC_CMD_MAC_RESET_RESTORE_OUT msgresponse */
+#define    MC_CMD_MAC_RESET_RESTORE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_TESTASSERT
+ * Deliberately trigger an assert-detonation in the firmware for testing
+ * purposes (i.e. to allow tests that the driver copes gracefully). Locks
+ * required: None Returns: 0
+ */
+#define MC_CMD_TESTASSERT 0x49
+#undef MC_CMD_0x49_PRIVILEGE_CTG
+
+#define MC_CMD_0x49_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_TESTASSERT_IN msgrequest */
+#define    MC_CMD_TESTASSERT_IN_LEN 0
+
+/* MC_CMD_TESTASSERT_OUT msgresponse */
+#define    MC_CMD_TESTASSERT_OUT_LEN 0
+
+/* MC_CMD_TESTASSERT_V2_IN msgrequest */
+#define    MC_CMD_TESTASSERT_V2_IN_LEN 4
+/* How to provoke the assertion */
+#define       MC_CMD_TESTASSERT_V2_IN_TYPE_OFST 0
+#define       MC_CMD_TESTASSERT_V2_IN_TYPE_LEN 4
+/* enum: Assert using the FAIL_ASSERTION_WITH_USEFUL_VALUES macro. Unless
+ * you're testing firmware, this is what you want.
+ */
+#define          MC_CMD_TESTASSERT_V2_IN_FAIL_ASSERTION_WITH_USEFUL_VALUES 0x0
+/* enum: Assert using assert(0); */
+#define          MC_CMD_TESTASSERT_V2_IN_ASSERT_FALSE 0x1
+/* enum: Deliberately trigger a watchdog */
+#define          MC_CMD_TESTASSERT_V2_IN_WATCHDOG 0x2
+/* enum: Deliberately trigger a trap by loading from an invalid address */
+#define          MC_CMD_TESTASSERT_V2_IN_LOAD_TRAP 0x3
+/* enum: Deliberately trigger a trap by storing to an invalid address */
+#define          MC_CMD_TESTASSERT_V2_IN_STORE_TRAP 0x4
+/* enum: Jump to an invalid address */
+#define          MC_CMD_TESTASSERT_V2_IN_JUMP_TRAP 0x5
+
+/* MC_CMD_TESTASSERT_V2_OUT msgresponse */
+#define    MC_CMD_TESTASSERT_V2_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_WORKAROUND
+ * Enable/Disable a given workaround. The mcfw will return EINVAL if it doesn't
+ * understand the given workaround number - which should not be treated as a
+ * hard error by client code. This op does not imply any semantics about each
+ * workaround, that's between the driver and the mcfw on a per-workaround
+ * basis. Locks required: None. Returns: 0, EINVAL .
+ */
+#define MC_CMD_WORKAROUND 0x4a
+#undef MC_CMD_0x4a_PRIVILEGE_CTG
+
+#define MC_CMD_0x4a_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_WORKAROUND_IN msgrequest */
+#define    MC_CMD_WORKAROUND_IN_LEN 8
+/* The enums here must correspond with those in MC_CMD_GET_WORKAROUND. */
+#define       MC_CMD_WORKAROUND_IN_TYPE_OFST 0
+#define       MC_CMD_WORKAROUND_IN_TYPE_LEN 4
+/* enum: Bug 17230 work around. */
+#define          MC_CMD_WORKAROUND_BUG17230 0x1
+/* enum: Bug 35388 work around (unsafe EVQ writes). */
+#define          MC_CMD_WORKAROUND_BUG35388 0x2
+/* enum: Bug35017 workaround (A64 tables must be identity map) */
+#define          MC_CMD_WORKAROUND_BUG35017 0x3
+/* enum: Bug 41750 present (MC_CMD_TRIGGER_INTERRUPT won't work) */
+#define          MC_CMD_WORKAROUND_BUG41750 0x4
+/* enum: Bug 42008 present (Interrupts can overtake associated events). Caution
+ * - before adding code that queries this workaround, remember that there's
+ * released Monza firmware that doesn't understand MC_CMD_WORKAROUND_BUG42008,
+ * and will hence (incorrectly) report that the bug doesn't exist.
+ */
+#define          MC_CMD_WORKAROUND_BUG42008 0x5
+/* enum: Bug 26807 features present in firmware (multicast filter chaining)
+ * This feature cannot be turned on/off while there are any filters already
+ * present. The behaviour in such case depends on the acting client's privilege
+ * level. If the client has the admin privilege, then all functions that have
+ * filters installed will be FLRed and the FLR_DONE flag will be set. Otherwise
+ * the command will fail with MC_CMD_ERR_FILTERS_PRESENT.
+ */
+#define          MC_CMD_WORKAROUND_BUG26807 0x6
+/* enum: Bug 61265 work around (broken EVQ TMR writes). */
+#define          MC_CMD_WORKAROUND_BUG61265 0x7
+/* 0 = disable the workaround indicated by TYPE; any non-zero value = enable
+ * the workaround
+ */
+#define       MC_CMD_WORKAROUND_IN_ENABLED_OFST 4
+#define       MC_CMD_WORKAROUND_IN_ENABLED_LEN 4
+
+/* MC_CMD_WORKAROUND_OUT msgresponse */
+#define    MC_CMD_WORKAROUND_OUT_LEN 0
+
+/* MC_CMD_WORKAROUND_EXT_OUT msgresponse: This response format will be used
+ * when (TYPE == MC_CMD_WORKAROUND_BUG26807)
+ */
+#define    MC_CMD_WORKAROUND_EXT_OUT_LEN 4
+#define       MC_CMD_WORKAROUND_EXT_OUT_FLAGS_OFST 0
+#define       MC_CMD_WORKAROUND_EXT_OUT_FLAGS_LEN 4
+#define        MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_OFST 0
+#define        MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN 0
+#define        MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_WIDTH 1
+
+
+/***********************************/
+/* MC_CMD_GET_PHY_MEDIA_INFO
+ * Read media-specific data from PHY (e.g. SFP/SFP+ module ID information for
+ * SFP+ PHYs). The 'media type' can be found via GET_PHY_CFG
+ * (GET_PHY_CFG_OUT_MEDIA_TYPE); the valid 'page number' input values, and the
+ * output data, are interpreted on a per-type basis. For SFP+: PAGE=0 or 1
+ * returns a 128-byte block read from module I2C address 0xA0 offset 0 or 0x80.
+ * Anything else: currently undefined. Locks required: None. Return code: 0.
+ */
+#define MC_CMD_GET_PHY_MEDIA_INFO 0x4b
+#undef MC_CMD_0x4b_PRIVILEGE_CTG
+
+#define MC_CMD_0x4b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_GET_PHY_MEDIA_INFO_IN msgrequest */
+#define    MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN 4
+#define       MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_OFST 0
+#define       MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_LEN 4
+
+/* MC_CMD_GET_PHY_MEDIA_INFO_OUT msgresponse */
+#define    MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMIN 5
+#define    MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX 252
+#define    MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX_MCDI2 1020
+#define    MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(num) (4+1*(num))
+#define    MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_NUM(len) (((len)-4)/1)
+/* in bytes */
+#define       MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_OFST 0
+#define       MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_LEN 4
+#define       MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST 4
+#define       MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_LEN 1
+#define       MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MINNUM 1
+#define       MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MAXNUM 248
+#define       MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_MAXNUM_MCDI2 1016
+
+
+/***********************************/
+/* MC_CMD_NVRAM_TEST
+ * Test a particular NVRAM partition for valid contents (where "valid" depends
+ * on the type of partition).
+ */
+#define MC_CMD_NVRAM_TEST 0x4c
+#undef MC_CMD_0x4c_PRIVILEGE_CTG
+
+#define MC_CMD_0x4c_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_TEST_IN msgrequest */
+#define    MC_CMD_NVRAM_TEST_IN_LEN 4
+#define       MC_CMD_NVRAM_TEST_IN_TYPE_OFST 0
+#define       MC_CMD_NVRAM_TEST_IN_TYPE_LEN 4
+/*            Enum values, see field(s): */
+/*               MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+
+/* MC_CMD_NVRAM_TEST_OUT msgresponse */
+#define    MC_CMD_NVRAM_TEST_OUT_LEN 4
+#define       MC_CMD_NVRAM_TEST_OUT_RESULT_OFST 0
+#define       MC_CMD_NVRAM_TEST_OUT_RESULT_LEN 4
+/* enum: Passed. */
+#define          MC_CMD_NVRAM_TEST_PASS 0x0
+/* enum: Failed. */
+#define          MC_CMD_NVRAM_TEST_FAIL 0x1
+/* enum: Not supported. */
+#define          MC_CMD_NVRAM_TEST_NOTSUPP 0x2
+
+
+/***********************************/
+/* MC_CMD_MRSFP_TWEAK
+ * Read status and/or set parameters for the 'mrsfp' driver in mr_rusty builds.
+ * I2C I/O expander bits are always read; if equaliser parameters are supplied,
+ * they are configured first. Locks required: None. Return code: 0, EINVAL.
+ */
+#define MC_CMD_MRSFP_TWEAK 0x4d
+
+/* MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG msgrequest */
+#define    MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_LEN 16
+/* 0-6 low->high de-emph. */
+#define       MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_OFST 0
+#define       MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_LEN 4
+/* 0-8 low->high ref.V */
+#define       MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_OFST 4
+#define       MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_LEN 4
+/* 0-8 0-8 low->high boost */
+#define       MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_OFST 8
+#define       MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_LEN 4
+/* 0-8 low->high ref.V */
+#define       MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_OFST 12
+#define       MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_LEN 4
+
+/* MC_CMD_MRSFP_TWEAK_IN_READ_ONLY msgrequest */
+#define    MC_CMD_MRSFP_TWEAK_IN_READ_ONLY_LEN 0
+
+/* MC_CMD_MRSFP_TWEAK_OUT msgresponse */
+#define    MC_CMD_MRSFP_TWEAK_OUT_LEN 12
+/* input bits */
+#define       MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_OFST 0
+#define       MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_LEN 4
+/* output bits */
+#define       MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_OFST 4
+#define       MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_LEN 4
+/* direction */
+#define       MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OFST 8
+#define       MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_LEN 4
+/* enum: Out. */
+#define          MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OUT 0x0
+/* enum: In. */
+#define          MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_IN 0x1
+
+
+/***********************************/
+/* MC_CMD_SENSOR_SET_LIMS
+ * Adjusts the sensor limits. This is a warranty-voiding operation. Returns:
+ * ENOENT if the sensor specified does not exist, EINVAL if the limits are out
+ * of range.
+ */
+#define MC_CMD_SENSOR_SET_LIMS 0x4e
+#undef MC_CMD_0x4e_PRIVILEGE_CTG
+
+#define MC_CMD_0x4e_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_SENSOR_SET_LIMS_IN msgrequest */
+#define    MC_CMD_SENSOR_SET_LIMS_IN_LEN 20
+#define       MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_OFST 0
+#define       MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_LEN 4
+/*            Enum values, see field(s): */
+/*               MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
+/* interpretation is is sensor-specific. */
+#define       MC_CMD_SENSOR_SET_LIMS_IN_LOW0_OFST 4
+#define       MC_CMD_SENSOR_SET_LIMS_IN_LOW0_LEN 4
+/* interpretation is is sensor-specific. */
+#define       MC_CMD_SENSOR_SET_LIMS_IN_HI0_OFST 8
+#define       MC_CMD_SENSOR_SET_LIMS_IN_HI0_LEN 4
+/* interpretation is is sensor-specific. */
+#define       MC_CMD_SENSOR_SET_LIMS_IN_LOW1_OFST 12
+#define       MC_CMD_SENSOR_SET_LIMS_IN_LOW1_LEN 4
+/* interpretation is is sensor-specific. */
+#define       MC_CMD_SENSOR_SET_LIMS_IN_HI1_OFST 16
+#define       MC_CMD_SENSOR_SET_LIMS_IN_HI1_LEN 4
+
+/* MC_CMD_SENSOR_SET_LIMS_OUT msgresponse */
+#define    MC_CMD_SENSOR_SET_LIMS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_RESOURCE_LIMITS
+ */
+#define MC_CMD_GET_RESOURCE_LIMITS 0x4f
+
+/* MC_CMD_GET_RESOURCE_LIMITS_IN msgrequest */
+#define    MC_CMD_GET_RESOURCE_LIMITS_IN_LEN 0
+
+/* MC_CMD_GET_RESOURCE_LIMITS_OUT msgresponse */
+#define    MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN 16
+#define       MC_CMD_GET_RESOURCE_LIMITS_OUT_BUFTBL_OFST 0
+#define       MC_CMD_GET_RESOURCE_LIMITS_OUT_BUFTBL_LEN 4
+#define       MC_CMD_GET_RESOURCE_LIMITS_OUT_EVQ_OFST 4
+#define       MC_CMD_GET_RESOURCE_LIMITS_OUT_EVQ_LEN 4
+#define       MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_OFST 8
+#define       MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_LEN 4
+#define       MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_OFST 12
+#define       MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_LEN 4
+
+
+/***********************************/
+/* MC_CMD_NVRAM_PARTITIONS
+ * Reads the list of available virtual NVRAM partition types. Locks required:
+ * none. Returns: 0, EINVAL (bad type).
+ */
+#define MC_CMD_NVRAM_PARTITIONS 0x51
+#undef MC_CMD_0x51_PRIVILEGE_CTG
+
+#define MC_CMD_0x51_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_PARTITIONS_IN msgrequest */
+#define    MC_CMD_NVRAM_PARTITIONS_IN_LEN 0
+
+/* MC_CMD_NVRAM_PARTITIONS_OUT msgresponse */
+#define    MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN 4
+#define    MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX 252
+#define    MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX_MCDI2 1020
+#define    MC_CMD_NVRAM_PARTITIONS_OUT_LEN(num) (4+4*(num))
+#define    MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_NUM(len) (((len)-4)/4)
+/* total number of partitions */
+#define       MC_CMD_NVRAM_PARTITIONS_OUT_NUM_PARTITIONS_OFST 0
+#define       MC_CMD_NVRAM_PARTITIONS_OUT_NUM_PARTITIONS_LEN 4
+/* type ID code for each of NUM_PARTITIONS partitions */
+#define       MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_OFST 4
+#define       MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_LEN 4
+#define       MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_MINNUM 0
+#define       MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_MAXNUM 62
+#define       MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_MAXNUM_MCDI2 254
+
+
+/***********************************/
+/* MC_CMD_NVRAM_METADATA
+ * Reads soft metadata for a virtual NVRAM partition type. Locks required:
+ * none. Returns: 0, EINVAL (bad type).
+ */
+#define MC_CMD_NVRAM_METADATA 0x52
+#undef MC_CMD_0x52_PRIVILEGE_CTG
+
+#define MC_CMD_0x52_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_METADATA_IN msgrequest */
+#define    MC_CMD_NVRAM_METADATA_IN_LEN 4
+/* Partition type ID code */
+#define       MC_CMD_NVRAM_METADATA_IN_TYPE_OFST 0
+#define       MC_CMD_NVRAM_METADATA_IN_TYPE_LEN 4
+
+/* MC_CMD_NVRAM_METADATA_OUT msgresponse */
+#define    MC_CMD_NVRAM_METADATA_OUT_LENMIN 20
+#define    MC_CMD_NVRAM_METADATA_OUT_LENMAX 252
+#define    MC_CMD_NVRAM_METADATA_OUT_LENMAX_MCDI2 1020
+#define    MC_CMD_NVRAM_METADATA_OUT_LEN(num) (20+1*(num))
+#define    MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_NUM(len) (((len)-20)/1)
+/* Partition type ID code */
+#define       MC_CMD_NVRAM_METADATA_OUT_TYPE_OFST 0
+#define       MC_CMD_NVRAM_METADATA_OUT_TYPE_LEN 4
+#define       MC_CMD_NVRAM_METADATA_OUT_FLAGS_OFST 4
+#define       MC_CMD_NVRAM_METADATA_OUT_FLAGS_LEN 4
+#define        MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_OFST 4
+#define        MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN 0
+#define        MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_WIDTH 1
+#define        MC_CMD_NVRAM_METADATA_OUT_VERSION_VALID_OFST 4
+#define        MC_CMD_NVRAM_METADATA_OUT_VERSION_VALID_LBN 1
+#define        MC_CMD_NVRAM_METADATA_OUT_VERSION_VALID_WIDTH 1
+#define        MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_VALID_OFST 4
+#define        MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_VALID_LBN 2
+#define        MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_VALID_WIDTH 1
+/* Subtype ID code for content of this partition */
+#define       MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_OFST 8
+#define       MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_LEN 4
+/* 1st component of W.X.Y.Z version number for content of this partition */
+#define       MC_CMD_NVRAM_METADATA_OUT_VERSION_W_OFST 12
+#define       MC_CMD_NVRAM_METADATA_OUT_VERSION_W_LEN 2
+/* 2nd component of W.X.Y.Z version number for content of this partition */
+#define       MC_CMD_NVRAM_METADATA_OUT_VERSION_X_OFST 14
+#define       MC_CMD_NVRAM_METADATA_OUT_VERSION_X_LEN 2
+/* 3rd component of W.X.Y.Z version number for content of this partition */
+#define       MC_CMD_NVRAM_METADATA_OUT_VERSION_Y_OFST 16
+#define       MC_CMD_NVRAM_METADATA_OUT_VERSION_Y_LEN 2
+/* 4th component of W.X.Y.Z version number for content of this partition */
+#define       MC_CMD_NVRAM_METADATA_OUT_VERSION_Z_OFST 18
+#define       MC_CMD_NVRAM_METADATA_OUT_VERSION_Z_LEN 2
+/* Zero-terminated string describing the content of this partition */
+#define       MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_OFST 20
+#define       MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_LEN 1
+#define       MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_MINNUM 0
+#define       MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_MAXNUM 232
+#define       MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_MAXNUM_MCDI2 1000
+
+
+/***********************************/
+/* MC_CMD_GET_MAC_ADDRESSES
+ * Returns the base MAC, count and stride for the requesting function
+ */
+#define MC_CMD_GET_MAC_ADDRESSES 0x55
+#undef MC_CMD_0x55_PRIVILEGE_CTG
+
+#define MC_CMD_0x55_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_MAC_ADDRESSES_IN msgrequest */
+#define    MC_CMD_GET_MAC_ADDRESSES_IN_LEN 0
+
+/* MC_CMD_GET_MAC_ADDRESSES_OUT msgresponse */
+#define    MC_CMD_GET_MAC_ADDRESSES_OUT_LEN 16
+/* Base MAC address */
+#define       MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE_OFST 0
+#define       MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE_LEN 6
+/* Padding */
+#define       MC_CMD_GET_MAC_ADDRESSES_OUT_RESERVED_OFST 6
+#define       MC_CMD_GET_MAC_ADDRESSES_OUT_RESERVED_LEN 2
+/* Number of allocated MAC addresses */
+#define       MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_COUNT_OFST 8
+#define       MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_COUNT_LEN 4
+/* Spacing of allocated MAC addresses */
+#define       MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_OFST 12
+#define       MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_LEN 4
+
+
+/***********************************/
+/* MC_CMD_CLP
+ * Perform a CLP related operation, see SF-110495-PS for details of CLP
+ * processing. This command has been extended to accomodate the requirements of
+ * different manufacturers which are to be found in SF-119187-TC, SF-119186-TC,
+ * SF-120509-TC and SF-117282-PS.
+ */
+#define MC_CMD_CLP 0x56
+#undef MC_CMD_0x56_PRIVILEGE_CTG
+
+#define MC_CMD_0x56_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_CLP_IN msgrequest */
+#define    MC_CMD_CLP_IN_LEN 4
+/* Sub operation */
+#define       MC_CMD_CLP_IN_OP_OFST 0
+#define       MC_CMD_CLP_IN_OP_LEN 4
+/* enum: Return to factory default settings */
+#define          MC_CMD_CLP_OP_DEFAULT 0x1
+/* enum: Set MAC address */
+#define          MC_CMD_CLP_OP_SET_MAC 0x2
+/* enum: Get MAC address */
+#define          MC_CMD_CLP_OP_GET_MAC 0x3
+/* enum: Set UEFI/GPXE boot mode */
+#define          MC_CMD_CLP_OP_SET_BOOT 0x4
+/* enum: Get UEFI/GPXE boot mode */
+#define          MC_CMD_CLP_OP_GET_BOOT 0x5
+
+/* MC_CMD_CLP_OUT msgresponse */
+#define    MC_CMD_CLP_OUT_LEN 0
+
+/* MC_CMD_CLP_IN_DEFAULT msgrequest */
+#define    MC_CMD_CLP_IN_DEFAULT_LEN 4
+/*            MC_CMD_CLP_IN_OP_OFST 0 */
+/*            MC_CMD_CLP_IN_OP_LEN 4 */
+
+/* MC_CMD_CLP_OUT_DEFAULT msgresponse */
+#define    MC_CMD_CLP_OUT_DEFAULT_LEN 0
+
+/* MC_CMD_CLP_IN_SET_MAC msgrequest */
+#define    MC_CMD_CLP_IN_SET_MAC_LEN 12
+/*            MC_CMD_CLP_IN_OP_OFST 0 */
+/*            MC_CMD_CLP_IN_OP_LEN 4 */
+/* The MAC address assigned to port. A zero MAC address of 00:00:00:00:00:00
+ * restores the permanent (factory-programmed) MAC address associated with the
+ * port. A non-zero MAC address persists until a PCIe reset or a power cycle.
+ */
+#define       MC_CMD_CLP_IN_SET_MAC_ADDR_OFST 4
+#define       MC_CMD_CLP_IN_SET_MAC_ADDR_LEN 6
+/* Padding */
+#define       MC_CMD_CLP_IN_SET_MAC_RESERVED_OFST 10
+#define       MC_CMD_CLP_IN_SET_MAC_RESERVED_LEN 2
+
+/* MC_CMD_CLP_OUT_SET_MAC msgresponse */
+#define    MC_CMD_CLP_OUT_SET_MAC_LEN 0
+
+/* MC_CMD_CLP_IN_SET_MAC_V2 msgrequest */
+#define    MC_CMD_CLP_IN_SET_MAC_V2_LEN 16
+/*            MC_CMD_CLP_IN_OP_OFST 0 */
+/*            MC_CMD_CLP_IN_OP_LEN 4 */
+/* The MAC address assigned to port. A zero MAC address of 00:00:00:00:00:00
+ * restores the permanent (factory-programmed) MAC address associated with the
+ * port. A non-zero MAC address persists until a PCIe reset or a power cycle.
+ */
+#define       MC_CMD_CLP_IN_SET_MAC_V2_ADDR_OFST 4
+#define       MC_CMD_CLP_IN_SET_MAC_V2_ADDR_LEN 6
+/* Padding */
+#define       MC_CMD_CLP_IN_SET_MAC_V2_RESERVED_OFST 10
+#define       MC_CMD_CLP_IN_SET_MAC_V2_RESERVED_LEN 2
+#define       MC_CMD_CLP_IN_SET_MAC_V2_FLAGS_OFST 12
+#define       MC_CMD_CLP_IN_SET_MAC_V2_FLAGS_LEN 4
+#define        MC_CMD_CLP_IN_SET_MAC_V2_VIRTUAL_OFST 12
+#define        MC_CMD_CLP_IN_SET_MAC_V2_VIRTUAL_LBN 0
+#define        MC_CMD_CLP_IN_SET_MAC_V2_VIRTUAL_WIDTH 1
+
+/* MC_CMD_CLP_IN_GET_MAC msgrequest */
+#define    MC_CMD_CLP_IN_GET_MAC_LEN 4
+/*            MC_CMD_CLP_IN_OP_OFST 0 */
+/*            MC_CMD_CLP_IN_OP_LEN 4 */
+
+/* MC_CMD_CLP_IN_GET_MAC_V2 msgrequest */
+#define    MC_CMD_CLP_IN_GET_MAC_V2_LEN 8
+/*            MC_CMD_CLP_IN_OP_OFST 0 */
+/*            MC_CMD_CLP_IN_OP_LEN 4 */
+#define       MC_CMD_CLP_IN_GET_MAC_V2_FLAGS_OFST 4
+#define       MC_CMD_CLP_IN_GET_MAC_V2_FLAGS_LEN 4
+#define        MC_CMD_CLP_IN_GET_MAC_V2_PERMANENT_OFST 4
+#define        MC_CMD_CLP_IN_GET_MAC_V2_PERMANENT_LBN 0
+#define        MC_CMD_CLP_IN_GET_MAC_V2_PERMANENT_WIDTH 1
+
+/* MC_CMD_CLP_OUT_GET_MAC msgresponse */
+#define    MC_CMD_CLP_OUT_GET_MAC_LEN 8
+/* MAC address assigned to port */
+#define       MC_CMD_CLP_OUT_GET_MAC_ADDR_OFST 0
+#define       MC_CMD_CLP_OUT_GET_MAC_ADDR_LEN 6
+/* Padding */
+#define       MC_CMD_CLP_OUT_GET_MAC_RESERVED_OFST 6
+#define       MC_CMD_CLP_OUT_GET_MAC_RESERVED_LEN 2
+
+/* MC_CMD_CLP_IN_SET_BOOT msgrequest */
+#define    MC_CMD_CLP_IN_SET_BOOT_LEN 5
+/*            MC_CMD_CLP_IN_OP_OFST 0 */
+/*            MC_CMD_CLP_IN_OP_LEN 4 */
+/* Boot flag */
+#define       MC_CMD_CLP_IN_SET_BOOT_FLAG_OFST 4
+#define       MC_CMD_CLP_IN_SET_BOOT_FLAG_LEN 1
+
+/* MC_CMD_CLP_OUT_SET_BOOT msgresponse */
+#define    MC_CMD_CLP_OUT_SET_BOOT_LEN 0
+
+/* MC_CMD_CLP_IN_GET_BOOT msgrequest */
+#define    MC_CMD_CLP_IN_GET_BOOT_LEN 4
+/*            MC_CMD_CLP_IN_OP_OFST 0 */
+/*            MC_CMD_CLP_IN_OP_LEN 4 */
+
+/* MC_CMD_CLP_OUT_GET_BOOT msgresponse */
+#define    MC_CMD_CLP_OUT_GET_BOOT_LEN 4
+/* Boot flag */
+#define       MC_CMD_CLP_OUT_GET_BOOT_FLAG_OFST 0
+#define       MC_CMD_CLP_OUT_GET_BOOT_FLAG_LEN 1
+/* Padding */
+#define       MC_CMD_CLP_OUT_GET_BOOT_RESERVED_OFST 1
+#define       MC_CMD_CLP_OUT_GET_BOOT_RESERVED_LEN 3
+
+
+/***********************************/
+/* MC_CMD_MUM
+ * Perform a MUM operation
+ */
+#define MC_CMD_MUM 0x57
+#undef MC_CMD_0x57_PRIVILEGE_CTG
+
+#define MC_CMD_0x57_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_MUM_IN msgrequest */
+#define    MC_CMD_MUM_IN_LEN 4
+#define       MC_CMD_MUM_IN_OP_HDR_OFST 0
+#define       MC_CMD_MUM_IN_OP_HDR_LEN 4
+#define        MC_CMD_MUM_IN_OP_OFST 0
+#define        MC_CMD_MUM_IN_OP_LBN 0
+#define        MC_CMD_MUM_IN_OP_WIDTH 8
+/* enum: NULL MCDI command to MUM */
+#define          MC_CMD_MUM_OP_NULL 0x1
+/* enum: Get MUM version */
+#define          MC_CMD_MUM_OP_GET_VERSION 0x2
+/* enum: Issue raw I2C command to MUM */
+#define          MC_CMD_MUM_OP_RAW_CMD 0x3
+/* enum: Read from registers on devices connected to MUM. */
+#define          MC_CMD_MUM_OP_READ 0x4
+/* enum: Write to registers on devices connected to MUM. */
+#define          MC_CMD_MUM_OP_WRITE 0x5
+/* enum: Control UART logging. */
+#define          MC_CMD_MUM_OP_LOG 0x6
+/* enum: Operations on MUM GPIO lines */
+#define          MC_CMD_MUM_OP_GPIO 0x7
+/* enum: Get sensor readings from MUM */
+#define          MC_CMD_MUM_OP_READ_SENSORS 0x8
+/* enum: Initiate clock programming on the MUM */
+#define          MC_CMD_MUM_OP_PROGRAM_CLOCKS 0x9
+/* enum: Initiate FPGA load from flash on the MUM */
+#define          MC_CMD_MUM_OP_FPGA_LOAD 0xa
+/* enum: Request sensor reading from MUM ADC resulting from earlier request via
+ * MUM ATB
+ */
+#define          MC_CMD_MUM_OP_READ_ATB_SENSOR 0xb
+/* enum: Send commands relating to the QSFP ports via the MUM for PHY
+ * operations
+ */
+#define          MC_CMD_MUM_OP_QSFP 0xc
+/* enum: Request discrete and SODIMM DDR info (type, size, speed grade, voltage
+ * level) from MUM
+ */
+#define          MC_CMD_MUM_OP_READ_DDR_INFO 0xd
+
+/* MC_CMD_MUM_IN_NULL msgrequest */
+#define    MC_CMD_MUM_IN_NULL_LEN 4
+/* MUM cmd header */
+#define       MC_CMD_MUM_IN_CMD_OFST 0
+#define       MC_CMD_MUM_IN_CMD_LEN 4
+
+/* MC_CMD_MUM_IN_GET_VERSION msgrequest */
+#define    MC_CMD_MUM_IN_GET_VERSION_LEN 4
+/* MUM cmd header */
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
+
+/* MC_CMD_MUM_IN_READ msgrequest */
+#define    MC_CMD_MUM_IN_READ_LEN 16
+/* MUM cmd header */
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
+/* ID of (device connected to MUM) to read from registers of */
+#define       MC_CMD_MUM_IN_READ_DEVICE_OFST 4
+#define       MC_CMD_MUM_IN_READ_DEVICE_LEN 4
+/* enum: Hittite HMC1035 clock generator on Sorrento board */
+#define          MC_CMD_MUM_DEV_HITTITE 0x1
+/* enum: Hittite HMC1035 clock generator for NIC-side on Sorrento board */
+#define          MC_CMD_MUM_DEV_HITTITE_NIC 0x2
+/* 32-bit address to read from */
+#define       MC_CMD_MUM_IN_READ_ADDR_OFST 8
+#define       MC_CMD_MUM_IN_READ_ADDR_LEN 4
+/* Number of words to read. */
+#define       MC_CMD_MUM_IN_READ_NUMWORDS_OFST 12
+#define       MC_CMD_MUM_IN_READ_NUMWORDS_LEN 4
+
+/* MC_CMD_MUM_IN_WRITE msgrequest */
+#define    MC_CMD_MUM_IN_WRITE_LENMIN 16
+#define    MC_CMD_MUM_IN_WRITE_LENMAX 252
+#define    MC_CMD_MUM_IN_WRITE_LENMAX_MCDI2 1020
+#define    MC_CMD_MUM_IN_WRITE_LEN(num) (12+4*(num))
+#define    MC_CMD_MUM_IN_WRITE_BUFFER_NUM(len) (((len)-12)/4)
+/* MUM cmd header */
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
+/* ID of (device connected to MUM) to write to registers of */
+#define       MC_CMD_MUM_IN_WRITE_DEVICE_OFST 4
+#define       MC_CMD_MUM_IN_WRITE_DEVICE_LEN 4
+/* enum: Hittite HMC1035 clock generator on Sorrento board */
+/*               MC_CMD_MUM_DEV_HITTITE 0x1 */
+/* 32-bit address to write to */
+#define       MC_CMD_MUM_IN_WRITE_ADDR_OFST 8
+#define       MC_CMD_MUM_IN_WRITE_ADDR_LEN 4
+/* Words to write */
+#define       MC_CMD_MUM_IN_WRITE_BUFFER_OFST 12
+#define       MC_CMD_MUM_IN_WRITE_BUFFER_LEN 4
+#define       MC_CMD_MUM_IN_WRITE_BUFFER_MINNUM 1
+#define       MC_CMD_MUM_IN_WRITE_BUFFER_MAXNUM 60
+#define       MC_CMD_MUM_IN_WRITE_BUFFER_MAXNUM_MCDI2 252
+
+/* MC_CMD_MUM_IN_RAW_CMD msgrequest */
+#define    MC_CMD_MUM_IN_RAW_CMD_LENMIN 17
+#define    MC_CMD_MUM_IN_RAW_CMD_LENMAX 252
+#define    MC_CMD_MUM_IN_RAW_CMD_LENMAX_MCDI2 1020
+#define    MC_CMD_MUM_IN_RAW_CMD_LEN(num) (16+1*(num))
+#define    MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_NUM(len) (((len)-16)/1)
+/* MUM cmd header */
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
+/* MUM I2C cmd code */
+#define       MC_CMD_MUM_IN_RAW_CMD_CMD_CODE_OFST 4
+#define       MC_CMD_MUM_IN_RAW_CMD_CMD_CODE_LEN 4
+/* Number of bytes to write */
+#define       MC_CMD_MUM_IN_RAW_CMD_NUM_WRITE_OFST 8
+#define       MC_CMD_MUM_IN_RAW_CMD_NUM_WRITE_LEN 4
+/* Number of bytes to read */
+#define       MC_CMD_MUM_IN_RAW_CMD_NUM_READ_OFST 12
+#define       MC_CMD_MUM_IN_RAW_CMD_NUM_READ_LEN 4
+/* Bytes to write */
+#define       MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_OFST 16
+#define       MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_LEN 1
+#define       MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MINNUM 1
+#define       MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MAXNUM 236
+#define       MC_CMD_MUM_IN_RAW_CMD_WRITE_DATA_MAXNUM_MCDI2 1004
+
+/* MC_CMD_MUM_IN_LOG msgrequest */
+#define    MC_CMD_MUM_IN_LOG_LEN 8
+/* MUM cmd header */
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
+#define       MC_CMD_MUM_IN_LOG_OP_OFST 4
+#define       MC_CMD_MUM_IN_LOG_OP_LEN 4
+#define          MC_CMD_MUM_IN_LOG_OP_UART 0x1 /* enum */
+
+/* MC_CMD_MUM_IN_LOG_OP_UART msgrequest */
+#define    MC_CMD_MUM_IN_LOG_OP_UART_LEN 12
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
+/*            MC_CMD_MUM_IN_LOG_OP_OFST 4 */
+/*            MC_CMD_MUM_IN_LOG_OP_LEN 4 */
+/* Enable/disable debug output to UART */
+#define       MC_CMD_MUM_IN_LOG_OP_UART_ENABLE_OFST 8
+#define       MC_CMD_MUM_IN_LOG_OP_UART_ENABLE_LEN 4
+
+/* MC_CMD_MUM_IN_GPIO msgrequest */
+#define    MC_CMD_MUM_IN_GPIO_LEN 8
+/* MUM cmd header */
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
+#define       MC_CMD_MUM_IN_GPIO_HDR_OFST 4
+#define       MC_CMD_MUM_IN_GPIO_HDR_LEN 4
+#define        MC_CMD_MUM_IN_GPIO_OPCODE_OFST 4
+#define        MC_CMD_MUM_IN_GPIO_OPCODE_LBN 0
+#define        MC_CMD_MUM_IN_GPIO_OPCODE_WIDTH 8
+#define          MC_CMD_MUM_IN_GPIO_IN_READ 0x0 /* enum */
+#define          MC_CMD_MUM_IN_GPIO_OUT_WRITE 0x1 /* enum */
+#define          MC_CMD_MUM_IN_GPIO_OUT_READ 0x2 /* enum */
+#define          MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE 0x3 /* enum */
+#define          MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ 0x4 /* enum */
+#define          MC_CMD_MUM_IN_GPIO_OP 0x5 /* enum */
+
+/* MC_CMD_MUM_IN_GPIO_IN_READ msgrequest */
+#define    MC_CMD_MUM_IN_GPIO_IN_READ_LEN 8
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
+#define       MC_CMD_MUM_IN_GPIO_IN_READ_HDR_OFST 4
+#define       MC_CMD_MUM_IN_GPIO_IN_READ_HDR_LEN 4
+
+/* MC_CMD_MUM_IN_GPIO_OUT_WRITE msgrequest */
+#define    MC_CMD_MUM_IN_GPIO_OUT_WRITE_LEN 16
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
+#define       MC_CMD_MUM_IN_GPIO_OUT_WRITE_HDR_OFST 4
+#define       MC_CMD_MUM_IN_GPIO_OUT_WRITE_HDR_LEN 4
+/* The first 32-bit word to be written to the GPIO OUT register. */
+#define       MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK1_OFST 8
+#define       MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK1_LEN 4
+/* The second 32-bit word to be written to the GPIO OUT register. */
+#define       MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK2_OFST 12
+#define       MC_CMD_MUM_IN_GPIO_OUT_WRITE_GPIOMASK2_LEN 4
+
+/* MC_CMD_MUM_IN_GPIO_OUT_READ msgrequest */
+#define    MC_CMD_MUM_IN_GPIO_OUT_READ_LEN 8
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
+#define       MC_CMD_MUM_IN_GPIO_OUT_READ_HDR_OFST 4
+#define       MC_CMD_MUM_IN_GPIO_OUT_READ_HDR_LEN 4
+
+/* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE msgrequest */
+#define    MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_LEN 16
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
+#define       MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_HDR_OFST 4
+#define       MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_HDR_LEN 4
+/* The first 32-bit word to be written to the GPIO OUT ENABLE register. */
+#define       MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK1_OFST 8
+#define       MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK1_LEN 4
+/* The second 32-bit word to be written to the GPIO OUT ENABLE register. */
+#define       MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK2_OFST 12
+#define       MC_CMD_MUM_IN_GPIO_OUT_ENABLE_WRITE_GPIOMASK2_LEN 4
+
+/* MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ msgrequest */
+#define    MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_LEN 8
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
+#define       MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_HDR_OFST 4
+#define       MC_CMD_MUM_IN_GPIO_OUT_ENABLE_READ_HDR_LEN 4
+
+/* MC_CMD_MUM_IN_GPIO_OP msgrequest */
+#define    MC_CMD_MUM_IN_GPIO_OP_LEN 8
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
+#define       MC_CMD_MUM_IN_GPIO_OP_HDR_OFST 4
+#define       MC_CMD_MUM_IN_GPIO_OP_HDR_LEN 4
+#define        MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_OFST 4
+#define        MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_LBN 8
+#define        MC_CMD_MUM_IN_GPIO_OP_BITWISE_OP_WIDTH 8
+#define          MC_CMD_MUM_IN_GPIO_OP_OUT_READ 0x0 /* enum */
+#define          MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE 0x1 /* enum */
+#define          MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG 0x2 /* enum */
+#define          MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE 0x3 /* enum */
+#define        MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_OFST 4
+#define        MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_LBN 16
+#define        MC_CMD_MUM_IN_GPIO_OP_GPIO_NUMBER_WIDTH 8
+
+/* MC_CMD_MUM_IN_GPIO_OP_OUT_READ msgrequest */
+#define    MC_CMD_MUM_IN_GPIO_OP_OUT_READ_LEN 8
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
+#define       MC_CMD_MUM_IN_GPIO_OP_OUT_READ_HDR_OFST 4
+#define       MC_CMD_MUM_IN_GPIO_OP_OUT_READ_HDR_LEN 4
+
+/* MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE msgrequest */
+#define    MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_LEN 8
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
+#define       MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_OFST 4
+#define       MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_HDR_LEN 4
+#define        MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_OFST 4
+#define        MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_LBN 24
+#define        MC_CMD_MUM_IN_GPIO_OP_OUT_WRITE_WRITEBIT_WIDTH 8
+
+/* MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG msgrequest */
+#define    MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_LEN 8
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
+#define       MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_OFST 4
+#define       MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_HDR_LEN 4
+#define        MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_OFST 4
+#define        MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_LBN 24
+#define        MC_CMD_MUM_IN_GPIO_OP_OUT_CONFIG_CFG_WIDTH 8
+
+/* MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE msgrequest */
+#define    MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_LEN 8
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
+#define       MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_OFST 4
+#define       MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_HDR_LEN 4
+#define        MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_OFST 4
+#define        MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_LBN 24
+#define        MC_CMD_MUM_IN_GPIO_OP_OUT_ENABLE_ENABLEBIT_WIDTH 8
+
+/* MC_CMD_MUM_IN_READ_SENSORS msgrequest */
+#define    MC_CMD_MUM_IN_READ_SENSORS_LEN 8
+/* MUM cmd header */
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
+#define       MC_CMD_MUM_IN_READ_SENSORS_PARAMS_OFST 4
+#define       MC_CMD_MUM_IN_READ_SENSORS_PARAMS_LEN 4
+#define        MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_OFST 4
+#define        MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_LBN 0
+#define        MC_CMD_MUM_IN_READ_SENSORS_SENSOR_ID_WIDTH 8
+#define        MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_OFST 4
+#define        MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_LBN 8
+#define        MC_CMD_MUM_IN_READ_SENSORS_NUM_SENSORS_WIDTH 8
+
+/* MC_CMD_MUM_IN_PROGRAM_CLOCKS msgrequest */
+#define    MC_CMD_MUM_IN_PROGRAM_CLOCKS_LEN 12
+/* MUM cmd header */
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
+/* Bit-mask of clocks to be programmed */
+#define       MC_CMD_MUM_IN_PROGRAM_CLOCKS_MASK_OFST 4
+#define       MC_CMD_MUM_IN_PROGRAM_CLOCKS_MASK_LEN 4
+#define          MC_CMD_MUM_CLOCK_ID_FPGA 0x0 /* enum */
+#define          MC_CMD_MUM_CLOCK_ID_DDR 0x1 /* enum */
+#define          MC_CMD_MUM_CLOCK_ID_NIC 0x2 /* enum */
+/* Control flags for clock programming */
+#define       MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_OFST 8
+#define       MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_LEN 4
+#define        MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_OFST 8
+#define        MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_LBN 0
+#define        MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_WIDTH 1
+#define        MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_OFST 8
+#define        MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_LBN 1
+#define        MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_WIDTH 1
+#define        MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_REF_FROM_XO_OFST 8
+#define        MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_REF_FROM_XO_LBN 2
+#define        MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_REF_FROM_XO_WIDTH 1
+
+/* MC_CMD_MUM_IN_FPGA_LOAD msgrequest */
+#define    MC_CMD_MUM_IN_FPGA_LOAD_LEN 8
+/* MUM cmd header */
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
+/* Enable/Disable FPGA config from flash */
+#define       MC_CMD_MUM_IN_FPGA_LOAD_ENABLE_OFST 4
+#define       MC_CMD_MUM_IN_FPGA_LOAD_ENABLE_LEN 4
+
+/* MC_CMD_MUM_IN_READ_ATB_SENSOR msgrequest */
+#define    MC_CMD_MUM_IN_READ_ATB_SENSOR_LEN 4
+/* MUM cmd header */
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
+
+/* MC_CMD_MUM_IN_QSFP msgrequest */
+#define    MC_CMD_MUM_IN_QSFP_LEN 12
+/* MUM cmd header */
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
+#define       MC_CMD_MUM_IN_QSFP_HDR_OFST 4
+#define       MC_CMD_MUM_IN_QSFP_HDR_LEN 4
+#define        MC_CMD_MUM_IN_QSFP_OPCODE_OFST 4
+#define        MC_CMD_MUM_IN_QSFP_OPCODE_LBN 0
+#define        MC_CMD_MUM_IN_QSFP_OPCODE_WIDTH 4
+#define          MC_CMD_MUM_IN_QSFP_INIT 0x0 /* enum */
+#define          MC_CMD_MUM_IN_QSFP_RECONFIGURE 0x1 /* enum */
+#define          MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP 0x2 /* enum */
+#define          MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO 0x3 /* enum */
+#define          MC_CMD_MUM_IN_QSFP_FILL_STATS 0x4 /* enum */
+#define          MC_CMD_MUM_IN_QSFP_POLL_BIST 0x5 /* enum */
+#define       MC_CMD_MUM_IN_QSFP_IDX_OFST 8
+#define       MC_CMD_MUM_IN_QSFP_IDX_LEN 4
+
+/* MC_CMD_MUM_IN_QSFP_INIT msgrequest */
+#define    MC_CMD_MUM_IN_QSFP_INIT_LEN 16
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
+#define       MC_CMD_MUM_IN_QSFP_INIT_HDR_OFST 4
+#define       MC_CMD_MUM_IN_QSFP_INIT_HDR_LEN 4
+#define       MC_CMD_MUM_IN_QSFP_INIT_IDX_OFST 8
+#define       MC_CMD_MUM_IN_QSFP_INIT_IDX_LEN 4
+#define       MC_CMD_MUM_IN_QSFP_INIT_CAGE_OFST 12
+#define       MC_CMD_MUM_IN_QSFP_INIT_CAGE_LEN 4
+
+/* MC_CMD_MUM_IN_QSFP_RECONFIGURE msgrequest */
+#define    MC_CMD_MUM_IN_QSFP_RECONFIGURE_LEN 24
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
+#define       MC_CMD_MUM_IN_QSFP_RECONFIGURE_HDR_OFST 4
+#define       MC_CMD_MUM_IN_QSFP_RECONFIGURE_HDR_LEN 4
+#define       MC_CMD_MUM_IN_QSFP_RECONFIGURE_IDX_OFST 8
+#define       MC_CMD_MUM_IN_QSFP_RECONFIGURE_IDX_LEN 4
+#define       MC_CMD_MUM_IN_QSFP_RECONFIGURE_TX_DISABLE_OFST 12
+#define       MC_CMD_MUM_IN_QSFP_RECONFIGURE_TX_DISABLE_LEN 4
+#define       MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LANES_OFST 16
+#define       MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LANES_LEN 4
+#define       MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LINK_SPEED_OFST 20
+#define       MC_CMD_MUM_IN_QSFP_RECONFIGURE_PORT_LINK_SPEED_LEN 4
+
+/* MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP msgrequest */
+#define    MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_LEN 12
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
+#define       MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_HDR_OFST 4
+#define       MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_HDR_LEN 4
+#define       MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_IDX_OFST 8
+#define       MC_CMD_MUM_IN_QSFP_GET_SUPPORTED_CAP_IDX_LEN 4
+
+/* MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO msgrequest */
+#define    MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_LEN 16
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
+#define       MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_HDR_OFST 4
+#define       MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_HDR_LEN 4
+#define       MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_IDX_OFST 8
+#define       MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_IDX_LEN 4
+#define       MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_PAGE_OFST 12
+#define       MC_CMD_MUM_IN_QSFP_GET_MEDIA_INFO_PAGE_LEN 4
+
+/* MC_CMD_MUM_IN_QSFP_FILL_STATS msgrequest */
+#define    MC_CMD_MUM_IN_QSFP_FILL_STATS_LEN 12
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
+#define       MC_CMD_MUM_IN_QSFP_FILL_STATS_HDR_OFST 4
+#define       MC_CMD_MUM_IN_QSFP_FILL_STATS_HDR_LEN 4
+#define       MC_CMD_MUM_IN_QSFP_FILL_STATS_IDX_OFST 8
+#define       MC_CMD_MUM_IN_QSFP_FILL_STATS_IDX_LEN 4
+
+/* MC_CMD_MUM_IN_QSFP_POLL_BIST msgrequest */
+#define    MC_CMD_MUM_IN_QSFP_POLL_BIST_LEN 12
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
+#define       MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_OFST 4
+#define       MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_LEN 4
+#define       MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_OFST 8
+#define       MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_LEN 4
+
+/* MC_CMD_MUM_IN_READ_DDR_INFO msgrequest */
+#define    MC_CMD_MUM_IN_READ_DDR_INFO_LEN 4
+/* MUM cmd header */
+/*            MC_CMD_MUM_IN_CMD_OFST 0 */
+/*            MC_CMD_MUM_IN_CMD_LEN 4 */
+
+/* MC_CMD_MUM_OUT msgresponse */
+#define    MC_CMD_MUM_OUT_LEN 0
+
+/* MC_CMD_MUM_OUT_NULL msgresponse */
+#define    MC_CMD_MUM_OUT_NULL_LEN 0
+
+/* MC_CMD_MUM_OUT_GET_VERSION msgresponse */
+#define    MC_CMD_MUM_OUT_GET_VERSION_LEN 12
+#define       MC_CMD_MUM_OUT_GET_VERSION_FIRMWARE_OFST 0
+#define       MC_CMD_MUM_OUT_GET_VERSION_FIRMWARE_LEN 4
+#define       MC_CMD_MUM_OUT_GET_VERSION_VERSION_OFST 4
+#define       MC_CMD_MUM_OUT_GET_VERSION_VERSION_LEN 8
+#define       MC_CMD_MUM_OUT_GET_VERSION_VERSION_LO_OFST 4
+#define       MC_CMD_MUM_OUT_GET_VERSION_VERSION_HI_OFST 8
+
+/* MC_CMD_MUM_OUT_RAW_CMD msgresponse */
+#define    MC_CMD_MUM_OUT_RAW_CMD_LENMIN 1
+#define    MC_CMD_MUM_OUT_RAW_CMD_LENMAX 252
+#define    MC_CMD_MUM_OUT_RAW_CMD_LENMAX_MCDI2 1020
+#define    MC_CMD_MUM_OUT_RAW_CMD_LEN(num) (0+1*(num))
+#define    MC_CMD_MUM_OUT_RAW_CMD_DATA_NUM(len) (((len)-0)/1)
+/* returned data */
+#define       MC_CMD_MUM_OUT_RAW_CMD_DATA_OFST 0
+#define       MC_CMD_MUM_OUT_RAW_CMD_DATA_LEN 1
+#define       MC_CMD_MUM_OUT_RAW_CMD_DATA_MINNUM 1
+#define       MC_CMD_MUM_OUT_RAW_CMD_DATA_MAXNUM 252
+#define       MC_CMD_MUM_OUT_RAW_CMD_DATA_MAXNUM_MCDI2 1020
+
+/* MC_CMD_MUM_OUT_READ msgresponse */
+#define    MC_CMD_MUM_OUT_READ_LENMIN 4
+#define    MC_CMD_MUM_OUT_READ_LENMAX 252
+#define    MC_CMD_MUM_OUT_READ_LENMAX_MCDI2 1020
+#define    MC_CMD_MUM_OUT_READ_LEN(num) (0+4*(num))
+#define    MC_CMD_MUM_OUT_READ_BUFFER_NUM(len) (((len)-0)/4)
+#define       MC_CMD_MUM_OUT_READ_BUFFER_OFST 0
+#define       MC_CMD_MUM_OUT_READ_BUFFER_LEN 4
+#define       MC_CMD_MUM_OUT_READ_BUFFER_MINNUM 1
+#define       MC_CMD_MUM_OUT_READ_BUFFER_MAXNUM 63
+#define       MC_CMD_MUM_OUT_READ_BUFFER_MAXNUM_MCDI2 255
+
+/* MC_CMD_MUM_OUT_WRITE msgresponse */
+#define    MC_CMD_MUM_OUT_WRITE_LEN 0
+
+/* MC_CMD_MUM_OUT_LOG msgresponse */
+#define    MC_CMD_MUM_OUT_LOG_LEN 0
+
+/* MC_CMD_MUM_OUT_LOG_OP_UART msgresponse */
+#define    MC_CMD_MUM_OUT_LOG_OP_UART_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_IN_READ msgresponse */
+#define    MC_CMD_MUM_OUT_GPIO_IN_READ_LEN 8
+/* The first 32-bit word read from the GPIO IN register. */
+#define       MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK1_OFST 0
+#define       MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK1_LEN 4
+/* The second 32-bit word read from the GPIO IN register. */
+#define       MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK2_OFST 4
+#define       MC_CMD_MUM_OUT_GPIO_IN_READ_GPIOMASK2_LEN 4
+
+/* MC_CMD_MUM_OUT_GPIO_OUT_WRITE msgresponse */
+#define    MC_CMD_MUM_OUT_GPIO_OUT_WRITE_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_OUT_READ msgresponse */
+#define    MC_CMD_MUM_OUT_GPIO_OUT_READ_LEN 8
+/* The first 32-bit word read from the GPIO OUT register. */
+#define       MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK1_OFST 0
+#define       MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK1_LEN 4
+/* The second 32-bit word read from the GPIO OUT register. */
+#define       MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK2_OFST 4
+#define       MC_CMD_MUM_OUT_GPIO_OUT_READ_GPIOMASK2_LEN 4
+
+/* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE msgresponse */
+#define    MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_WRITE_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ msgresponse */
+#define    MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_LEN 8
+#define       MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK1_OFST 0
+#define       MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK1_LEN 4
+#define       MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK2_OFST 4
+#define       MC_CMD_MUM_OUT_GPIO_OUT_ENABLE_READ_GPIOMASK2_LEN 4
+
+/* MC_CMD_MUM_OUT_GPIO_OP_OUT_READ msgresponse */
+#define    MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_LEN 4
+#define       MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_BIT_READ_OFST 0
+#define       MC_CMD_MUM_OUT_GPIO_OP_OUT_READ_BIT_READ_LEN 4
+
+/* MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE msgresponse */
+#define    MC_CMD_MUM_OUT_GPIO_OP_OUT_WRITE_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_OP_OUT_CONFIG msgresponse */
+#define    MC_CMD_MUM_OUT_GPIO_OP_OUT_CONFIG_LEN 0
+
+/* MC_CMD_MUM_OUT_GPIO_OP_OUT_ENABLE msgresponse */
+#define    MC_CMD_MUM_OUT_GPIO_OP_OUT_ENABLE_LEN 0
+
+/* MC_CMD_MUM_OUT_READ_SENSORS msgresponse */
+#define    MC_CMD_MUM_OUT_READ_SENSORS_LENMIN 4
+#define    MC_CMD_MUM_OUT_READ_SENSORS_LENMAX 252
+#define    MC_CMD_MUM_OUT_READ_SENSORS_LENMAX_MCDI2 1020
+#define    MC_CMD_MUM_OUT_READ_SENSORS_LEN(num) (0+4*(num))
+#define    MC_CMD_MUM_OUT_READ_SENSORS_DATA_NUM(len) (((len)-0)/4)
+#define       MC_CMD_MUM_OUT_READ_SENSORS_DATA_OFST 0
+#define       MC_CMD_MUM_OUT_READ_SENSORS_DATA_LEN 4
+#define       MC_CMD_MUM_OUT_READ_SENSORS_DATA_MINNUM 1
+#define       MC_CMD_MUM_OUT_READ_SENSORS_DATA_MAXNUM 63
+#define       MC_CMD_MUM_OUT_READ_SENSORS_DATA_MAXNUM_MCDI2 255
+#define        MC_CMD_MUM_OUT_READ_SENSORS_READING_OFST 0
+#define        MC_CMD_MUM_OUT_READ_SENSORS_READING_LBN 0
+#define        MC_CMD_MUM_OUT_READ_SENSORS_READING_WIDTH 16
+#define        MC_CMD_MUM_OUT_READ_SENSORS_STATE_OFST 0
+#define        MC_CMD_MUM_OUT_READ_SENSORS_STATE_LBN 16
+#define        MC_CMD_MUM_OUT_READ_SENSORS_STATE_WIDTH 8
+#define        MC_CMD_MUM_OUT_READ_SENSORS_TYPE_OFST 0
+#define        MC_CMD_MUM_OUT_READ_SENSORS_TYPE_LBN 24
+#define        MC_CMD_MUM_OUT_READ_SENSORS_TYPE_WIDTH 8
+
+/* MC_CMD_MUM_OUT_PROGRAM_CLOCKS msgresponse */
+#define    MC_CMD_MUM_OUT_PROGRAM_CLOCKS_LEN 4
+#define       MC_CMD_MUM_OUT_PROGRAM_CLOCKS_OK_MASK_OFST 0
+#define       MC_CMD_MUM_OUT_PROGRAM_CLOCKS_OK_MASK_LEN 4
+
+/* MC_CMD_MUM_OUT_FPGA_LOAD msgresponse */
+#define    MC_CMD_MUM_OUT_FPGA_LOAD_LEN 0
+
+/* MC_CMD_MUM_OUT_READ_ATB_SENSOR msgresponse */
+#define    MC_CMD_MUM_OUT_READ_ATB_SENSOR_LEN 4
+#define       MC_CMD_MUM_OUT_READ_ATB_SENSOR_RESULT_OFST 0
+#define       MC_CMD_MUM_OUT_READ_ATB_SENSOR_RESULT_LEN 4
+
+/* MC_CMD_MUM_OUT_QSFP_INIT msgresponse */
+#define    MC_CMD_MUM_OUT_QSFP_INIT_LEN 0
+
+/* MC_CMD_MUM_OUT_QSFP_RECONFIGURE msgresponse */
+#define    MC_CMD_MUM_OUT_QSFP_RECONFIGURE_LEN 8
+#define       MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LP_CAP_OFST 0
+#define       MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LP_CAP_LEN 4
+#define       MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_OFST 4
+#define       MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_FLAGS_LEN 4
+#define        MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_OFST 4
+#define        MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_LBN 0
+#define        MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_READY_WIDTH 1
+#define        MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_OFST 4
+#define        MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_LBN 1
+#define        MC_CMD_MUM_OUT_QSFP_RECONFIGURE_PORT_PHY_LINK_UP_WIDTH 1
+
+/* MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP msgresponse */
+#define    MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_LEN 4
+#define       MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_PORT_PHY_LP_CAP_OFST 0
+#define       MC_CMD_MUM_OUT_QSFP_GET_SUPPORTED_CAP_PORT_PHY_LP_CAP_LEN 4
+
+/* MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO msgresponse */
+#define    MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMIN 5
+#define    MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMAX 252
+#define    MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LENMAX_MCDI2 1020
+#define    MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_LEN(num) (4+1*(num))
+#define    MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_NUM(len) (((len)-4)/1)
+/* in bytes */
+#define       MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_OFST 0
+#define       MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATALEN_LEN 4
+#define       MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_OFST 4
+#define       MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_LEN 1
+#define       MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MINNUM 1
+#define       MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MAXNUM 248
+#define       MC_CMD_MUM_OUT_QSFP_GET_MEDIA_INFO_DATA_MAXNUM_MCDI2 1016
+
+/* MC_CMD_MUM_OUT_QSFP_FILL_STATS msgresponse */
+#define    MC_CMD_MUM_OUT_QSFP_FILL_STATS_LEN 8
+#define       MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PMA_PMD_LINK_UP_OFST 0
+#define       MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PMA_PMD_LINK_UP_LEN 4
+#define       MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PCS_LINK_UP_OFST 4
+#define       MC_CMD_MUM_OUT_QSFP_FILL_STATS_PORT_PHY_STATS_PCS_LINK_UP_LEN 4
+
+/* MC_CMD_MUM_OUT_QSFP_POLL_BIST msgresponse */
+#define    MC_CMD_MUM_OUT_QSFP_POLL_BIST_LEN 4
+#define       MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_OFST 0
+#define       MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_LEN 4
+
+/* MC_CMD_MUM_OUT_READ_DDR_INFO msgresponse */
+#define    MC_CMD_MUM_OUT_READ_DDR_INFO_LENMIN 24
+#define    MC_CMD_MUM_OUT_READ_DDR_INFO_LENMAX 248
+#define    MC_CMD_MUM_OUT_READ_DDR_INFO_LENMAX_MCDI2 1016
+#define    MC_CMD_MUM_OUT_READ_DDR_INFO_LEN(num) (8+8*(num))
+#define    MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_NUM(len) (((len)-8)/8)
+/* Discrete (soldered) DDR resistor strap info */
+#define       MC_CMD_MUM_OUT_READ_DDR_INFO_DISCRETE_DDR_INFO_OFST 0
+#define       MC_CMD_MUM_OUT_READ_DDR_INFO_DISCRETE_DDR_INFO_LEN 4
+#define        MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_OFST 0
+#define        MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_LBN 0
+#define        MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_WIDTH 16
+#define        MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_OFST 0
+#define        MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_LBN 16
+#define        MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_WIDTH 16
+/* Number of SODIMM info records */
+#define       MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_RECORDS_OFST 4
+#define       MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_RECORDS_LEN 4
+/* Array of SODIMM info records */
+#define       MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_OFST 8
+#define       MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LEN 8
+#define       MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LO_OFST 8
+#define       MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_HI_OFST 12
+#define       MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MINNUM 2
+#define       MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MAXNUM 30
+#define       MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MAXNUM_MCDI2 126
+#define        MC_CMD_MUM_OUT_READ_DDR_INFO_BANK_ID_OFST 8
+#define        MC_CMD_MUM_OUT_READ_DDR_INFO_BANK_ID_LBN 0
+#define        MC_CMD_MUM_OUT_READ_DDR_INFO_BANK_ID_WIDTH 8
+/* enum: SODIMM bank 1 (Top SODIMM for Sorrento) */
+#define          MC_CMD_MUM_OUT_READ_DDR_INFO_BANK1 0x0
+/* enum: SODIMM bank 2 (Bottom SODDIMM for Sorrento) */
+#define          MC_CMD_MUM_OUT_READ_DDR_INFO_BANK2 0x1
+/* enum: Total number of SODIMM banks */
+#define          MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_BANKS 0x2
+#define        MC_CMD_MUM_OUT_READ_DDR_INFO_TYPE_OFST 8
+#define        MC_CMD_MUM_OUT_READ_DDR_INFO_TYPE_LBN 8
+#define        MC_CMD_MUM_OUT_READ_DDR_INFO_TYPE_WIDTH 8
+#define        MC_CMD_MUM_OUT_READ_DDR_INFO_RANK_OFST 8
+#define        MC_CMD_MUM_OUT_READ_DDR_INFO_RANK_LBN 16
+#define        MC_CMD_MUM_OUT_READ_DDR_INFO_RANK_WIDTH 4
+#define        MC_CMD_MUM_OUT_READ_DDR_INFO_VOLTAGE_OFST 8
+#define        MC_CMD_MUM_OUT_READ_DDR_INFO_VOLTAGE_LBN 20
+#define        MC_CMD_MUM_OUT_READ_DDR_INFO_VOLTAGE_WIDTH 4
+#define          MC_CMD_MUM_OUT_READ_DDR_INFO_NOT_POWERED 0x0 /* enum */
+#define          MC_CMD_MUM_OUT_READ_DDR_INFO_1V25 0x1 /* enum */
+#define          MC_CMD_MUM_OUT_READ_DDR_INFO_1V35 0x2 /* enum */
+#define          MC_CMD_MUM_OUT_READ_DDR_INFO_1V5 0x3 /* enum */
+/* enum: Values 5-15 are reserved for future usage */
+#define          MC_CMD_MUM_OUT_READ_DDR_INFO_1V8 0x4
+#define        MC_CMD_MUM_OUT_READ_DDR_INFO_SIZE_OFST 8
+#define        MC_CMD_MUM_OUT_READ_DDR_INFO_SIZE_LBN 24
+#define        MC_CMD_MUM_OUT_READ_DDR_INFO_SIZE_WIDTH 8
+#define        MC_CMD_MUM_OUT_READ_DDR_INFO_SPEED_OFST 8
+#define        MC_CMD_MUM_OUT_READ_DDR_INFO_SPEED_LBN 32
+#define        MC_CMD_MUM_OUT_READ_DDR_INFO_SPEED_WIDTH 16
+#define        MC_CMD_MUM_OUT_READ_DDR_INFO_STATE_OFST 8
+#define        MC_CMD_MUM_OUT_READ_DDR_INFO_STATE_LBN 48
+#define        MC_CMD_MUM_OUT_READ_DDR_INFO_STATE_WIDTH 4
+/* enum: No module present */
+#define          MC_CMD_MUM_OUT_READ_DDR_INFO_ABSENT 0x0
+/* enum: Module present supported and powered on */
+#define          MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_POWERED 0x1
+/* enum: Module present but bad type */
+#define          MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_TYPE 0x2
+/* enum: Module present but incompatible voltage */
+#define          MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_VOLTAGE 0x3
+/* enum: Module present but unknown SPD */
+#define          MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_SPD 0x4
+/* enum: Module present but slot cannot support it */
+#define          MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_SLOT 0x5
+/* enum: Modules may or may not be present, but cannot establish contact by I2C
+ */
+#define          MC_CMD_MUM_OUT_READ_DDR_INFO_NOT_REACHABLE 0x6
+#define        MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED2_OFST 8
+#define        MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED2_LBN 52
+#define        MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED2_WIDTH 12
+
+/* MC_CMD_DYNAMIC_SENSORS_LIMITS structuredef: Set of sensor limits. This
+ * should match the equivalent structure in the sensor_query SPHINX service.
+ */
+#define    MC_CMD_DYNAMIC_SENSORS_LIMITS_LEN 24
+/* A value below this will trigger a warning event. */
+#define       MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_WARNING_OFST 0
+#define       MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_WARNING_LEN 4
+#define       MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_WARNING_LBN 0
+#define       MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_WARNING_WIDTH 32
+/* A value below this will trigger a critical event. */
+#define       MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_CRITICAL_OFST 4
+#define       MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_CRITICAL_LEN 4
+#define       MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_CRITICAL_LBN 32
+#define       MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_CRITICAL_WIDTH 32
+/* A value below this will shut down the card. */
+#define       MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_FATAL_OFST 8
+#define       MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_FATAL_LEN 4
+#define       MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_FATAL_LBN 64
+#define       MC_CMD_DYNAMIC_SENSORS_LIMITS_LO_FATAL_WIDTH 32
+/* A value above this will trigger a warning event. */
+#define       MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_WARNING_OFST 12
+#define       MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_WARNING_LEN 4
+#define       MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_WARNING_LBN 96
+#define       MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_WARNING_WIDTH 32
+/* A value above this will trigger a critical event. */
+#define       MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_CRITICAL_OFST 16
+#define       MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_CRITICAL_LEN 4
+#define       MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_CRITICAL_LBN 128
+#define       MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_CRITICAL_WIDTH 32
+/* A value above this will shut down the card. */
+#define       MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_FATAL_OFST 20
+#define       MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_FATAL_LEN 4
+#define       MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_FATAL_LBN 160
+#define       MC_CMD_DYNAMIC_SENSORS_LIMITS_HI_FATAL_WIDTH 32
+
+/* MC_CMD_DYNAMIC_SENSORS_DESCRIPTION structuredef: Description of a sensor.
+ * This should match the equivalent structure in the sensor_query SPHINX
+ * service.
+ */
+#define    MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_LEN 64
+/* The handle used to identify the sensor in calls to
+ * MC_CMD_DYNAMIC_SENSORS_GET_VALUES
+ */
+#define       MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_HANDLE_OFST 0
+#define       MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_HANDLE_LEN 4
+#define       MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_HANDLE_LBN 0
+#define       MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_HANDLE_WIDTH 32
+/* A human-readable name for the sensor (zero terminated string, max 32 bytes)
+ */
+#define       MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_NAME_OFST 4
+#define       MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_NAME_LEN 32
+#define       MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_NAME_LBN 32
+#define       MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_NAME_WIDTH 256
+/* The type of the sensor device, and by implication the unit of that the
+ * values will be reported in
+ */
+#define       MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_TYPE_OFST 36
+#define       MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_TYPE_LEN 4
+/* enum: A voltage sensor. Unit is mV */
+#define          MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_VOLTAGE 0x0
+/* enum: A current sensor. Unit is mA */
+#define          MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_CURRENT 0x1
+/* enum: A power sensor. Unit is mW */
+#define          MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_POWER 0x2
+/* enum: A temperature sensor. Unit is Celsius */
+#define          MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_TEMPERATURE 0x3
+/* enum: A cooling fan sensor. Unit is RPM */
+#define          MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_FAN 0x4
+#define       MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_TYPE_LBN 288
+#define       MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_TYPE_WIDTH 32
+/* A single MC_CMD_DYNAMIC_SENSORS_LIMITS structure */
+#define       MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_LIMITS_OFST 40
+#define       MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_LIMITS_LEN 24
+#define       MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_LIMITS_LBN 320
+#define       MC_CMD_DYNAMIC_SENSORS_DESCRIPTION_LIMITS_WIDTH 192
+
+/* MC_CMD_DYNAMIC_SENSORS_READING structuredef: State and value of a sensor.
+ * This should match the equivalent structure in the sensor_query SPHINX
+ * service.
+ */
+#define    MC_CMD_DYNAMIC_SENSORS_READING_LEN 12
+/* The handle used to identify the sensor */
+#define       MC_CMD_DYNAMIC_SENSORS_READING_HANDLE_OFST 0
+#define       MC_CMD_DYNAMIC_SENSORS_READING_HANDLE_LEN 4
+#define       MC_CMD_DYNAMIC_SENSORS_READING_HANDLE_LBN 0
+#define       MC_CMD_DYNAMIC_SENSORS_READING_HANDLE_WIDTH 32
+/* The current value of the sensor */
+#define       MC_CMD_DYNAMIC_SENSORS_READING_VALUE_OFST 4
+#define       MC_CMD_DYNAMIC_SENSORS_READING_VALUE_LEN 4
+#define       MC_CMD_DYNAMIC_SENSORS_READING_VALUE_LBN 32
+#define       MC_CMD_DYNAMIC_SENSORS_READING_VALUE_WIDTH 32
+/* The sensor's condition, e.g. good, broken or removed */
+#define       MC_CMD_DYNAMIC_SENSORS_READING_STATE_OFST 8
+#define       MC_CMD_DYNAMIC_SENSORS_READING_STATE_LEN 4
+/* enum: Sensor working normally within limits */
+#define          MC_CMD_DYNAMIC_SENSORS_READING_OK 0x0
+/* enum: Warning threshold breached */
+#define          MC_CMD_DYNAMIC_SENSORS_READING_WARNING 0x1
+/* enum: Critical threshold breached */
+#define          MC_CMD_DYNAMIC_SENSORS_READING_CRITICAL 0x2
+/* enum: Fatal threshold breached */
+#define          MC_CMD_DYNAMIC_SENSORS_READING_FATAL 0x3
+/* enum: Sensor not working */
+#define          MC_CMD_DYNAMIC_SENSORS_READING_BROKEN 0x4
+/* enum: Sensor working but no reading available */
+#define          MC_CMD_DYNAMIC_SENSORS_READING_NO_READING 0x5
+/* enum: Sensor initialization failed */
+#define          MC_CMD_DYNAMIC_SENSORS_READING_INIT_FAILED 0x6
+#define       MC_CMD_DYNAMIC_SENSORS_READING_STATE_LBN 64
+#define       MC_CMD_DYNAMIC_SENSORS_READING_STATE_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_DYNAMIC_SENSORS_LIST
+ * Return a complete list of handles for sensors currently managed by the MC,
+ * and a generation count for this version of the sensor table. On systems
+ * advertising the DYNAMIC_SENSORS capability bit, this replaces the
+ * MC_CMD_READ_SENSORS command. On multi-MC systems this may include sensors
+ * added by the NMC.
+ *
+ * Sensor handles are persistent for the lifetime of the sensor and are used to
+ * identify sensors in MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS and
+ * MC_CMD_DYNAMIC_SENSORS_GET_VALUES.
+ *
+ * The generation count is maintained by the MC, is persistent across reboots
+ * and will be incremented each time the sensor table is modified. When the
+ * table is modified, a CODE_DYNAMIC_SENSORS_CHANGE event will be generated
+ * containing the new generation count. The driver should compare this against
+ * the current generation count, and if it is different, call
+ * MC_CMD_DYNAMIC_SENSORS_LIST again to update it's copy of the sensor table.
+ *
+ * The sensor count is provided to allow a future path to supporting more than
+ * MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_HANDLES_MAXNUM_MCDI2 sensors, i.e.
+ * the maximum number that will fit in a single response. As this is a fairly
+ * large number (253) it is not anticipated that this will be needed in the
+ * near future, so can currently be ignored.
+ *
+ * On Riverhead this command is implemented as a a wrapper for `list` in the
+ * sensor_query SPHINX service.
+ */
+#define MC_CMD_DYNAMIC_SENSORS_LIST 0x66
+#undef MC_CMD_0x66_PRIVILEGE_CTG
+
+#define MC_CMD_0x66_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_DYNAMIC_SENSORS_LIST_IN msgrequest */
+#define    MC_CMD_DYNAMIC_SENSORS_LIST_IN_LEN 0
+
+/* MC_CMD_DYNAMIC_SENSORS_LIST_OUT msgresponse */
+#define    MC_CMD_DYNAMIC_SENSORS_LIST_OUT_LENMIN 8
+#define    MC_CMD_DYNAMIC_SENSORS_LIST_OUT_LENMAX 252
+#define    MC_CMD_DYNAMIC_SENSORS_LIST_OUT_LENMAX_MCDI2 1020
+#define    MC_CMD_DYNAMIC_SENSORS_LIST_OUT_LEN(num) (8+4*(num))
+#define    MC_CMD_DYNAMIC_SENSORS_LIST_OUT_HANDLES_NUM(len) (((len)-8)/4)
+/* Generation count, which will be updated each time a sensor is added to or
+ * removed from the MC sensor table.
+ */
+#define       MC_CMD_DYNAMIC_SENSORS_LIST_OUT_GENERATION_OFST 0
+#define       MC_CMD_DYNAMIC_SENSORS_LIST_OUT_GENERATION_LEN 4
+/* Number of sensors managed by the MC. Note that in principle, this can be
+ * larger than the size of the HANDLES array.
+ */
+#define       MC_CMD_DYNAMIC_SENSORS_LIST_OUT_COUNT_OFST 4
+#define       MC_CMD_DYNAMIC_SENSORS_LIST_OUT_COUNT_LEN 4
+/* Array of sensor handles */
+#define       MC_CMD_DYNAMIC_SENSORS_LIST_OUT_HANDLES_OFST 8
+#define       MC_CMD_DYNAMIC_SENSORS_LIST_OUT_HANDLES_LEN 4
+#define       MC_CMD_DYNAMIC_SENSORS_LIST_OUT_HANDLES_MINNUM 0
+#define       MC_CMD_DYNAMIC_SENSORS_LIST_OUT_HANDLES_MAXNUM 61
+#define       MC_CMD_DYNAMIC_SENSORS_LIST_OUT_HANDLES_MAXNUM_MCDI2 253
+
+
+/***********************************/
+/* MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS
+ * Get descriptions for a set of sensors, specified as an array of sensor
+ * handles as returned by MC_CMD_DYNAMIC_SENSORS_LIST
+ *
+ * Any handles which do not correspond to a sensor currently managed by the MC
+ * will be dropped from from the response. This may happen when a sensor table
+ * update is in progress, and effectively means the set of usable sensors is
+ * the intersection between the sets of sensors known to the driver and the MC.
+ *
+ * On Riverhead this command is implemented as a a wrapper for
+ * `get_descriptions` in the sensor_query SPHINX service.
+ */
+#define MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS 0x67
+#undef MC_CMD_0x67_PRIVILEGE_CTG
+
+#define MC_CMD_0x67_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN msgrequest */
+#define    MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_LENMIN 0
+#define    MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_LENMAX 252
+#define    MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_LENMAX_MCDI2 1020
+#define    MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_LEN(num) (0+4*(num))
+#define    MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_HANDLES_NUM(len) (((len)-0)/4)
+/* Array of sensor handles */
+#define       MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_HANDLES_OFST 0
+#define       MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_HANDLES_LEN 4
+#define       MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_HANDLES_MINNUM 0
+#define       MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_HANDLES_MAXNUM 63
+#define       MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_IN_HANDLES_MAXNUM_MCDI2 255
+
+/* MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT msgresponse */
+#define    MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_LENMIN 0
+#define    MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_LENMAX 192
+#define    MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_LENMAX_MCDI2 960
+#define    MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_LEN(num) (0+64*(num))
+#define    MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_SENSORS_NUM(len) (((len)-0)/64)
+/* Array of MC_CMD_DYNAMIC_SENSORS_DESCRIPTION structures */
+#define       MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_SENSORS_OFST 0
+#define       MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_SENSORS_LEN 64
+#define       MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_SENSORS_MINNUM 0
+#define       MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_SENSORS_MAXNUM 3
+#define       MC_CMD_DYNAMIC_SENSORS_GET_DESCRIPTIONS_OUT_SENSORS_MAXNUM_MCDI2 15
+
+
+/***********************************/
+/* MC_CMD_DYNAMIC_SENSORS_GET_READINGS
+ * Read the state and value for a set of sensors, specified as an array of
+ * sensor handles as returned by MC_CMD_DYNAMIC_SENSORS_LIST.
+ *
+ * In the case of a broken sensor, then the state of the response's
+ * MC_CMD_DYNAMIC_SENSORS_VALUE entry will be set to BROKEN, and any value
+ * provided should be treated as erroneous.
+ *
+ * Any handles which do not correspond to a sensor currently managed by the MC
+ * will be dropped from from the response. This may happen when a sensor table
+ * update is in progress, and effectively means the set of usable sensors is
+ * the intersection between the sets of sensors known to the driver and the MC.
+ *
+ * On Riverhead this command is implemented as a a wrapper for `get_readings`
+ * in the sensor_query SPHINX service.
+ */
+#define MC_CMD_DYNAMIC_SENSORS_GET_READINGS 0x68
+#undef MC_CMD_0x68_PRIVILEGE_CTG
+
+#define MC_CMD_0x68_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN msgrequest */
+#define    MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_LENMIN 0
+#define    MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_LENMAX 252
+#define    MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_LENMAX_MCDI2 1020
+#define    MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_LEN(num) (0+4*(num))
+#define    MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_HANDLES_NUM(len) (((len)-0)/4)
+/* Array of sensor handles */
+#define       MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_HANDLES_OFST 0
+#define       MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_HANDLES_LEN 4
+#define       MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_HANDLES_MINNUM 0
+#define       MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_HANDLES_MAXNUM 63
+#define       MC_CMD_DYNAMIC_SENSORS_GET_READINGS_IN_HANDLES_MAXNUM_MCDI2 255
+
+/* MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT msgresponse */
+#define    MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_LENMIN 0
+#define    MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_LENMAX 252
+#define    MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_LENMAX_MCDI2 1020
+#define    MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_LEN(num) (0+12*(num))
+#define    MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_NUM(len) (((len)-0)/12)
+/* Array of MC_CMD_DYNAMIC_SENSORS_READING structures */
+#define       MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_OFST 0
+#define       MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_LEN 12
+#define       MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_MINNUM 0
+#define       MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_MAXNUM 21
+#define       MC_CMD_DYNAMIC_SENSORS_GET_READINGS_OUT_VALUES_MAXNUM_MCDI2 85
+
+
+/***********************************/
+/* MC_CMD_EVENT_CTRL
+ * Configure which categories of unsolicited events the driver expects to
+ * receive (Riverhead).
+ */
+#define MC_CMD_EVENT_CTRL 0x69
+#undef MC_CMD_0x69_PRIVILEGE_CTG
+
+#define MC_CMD_0x69_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_EVENT_CTRL_IN msgrequest */
+#define    MC_CMD_EVENT_CTRL_IN_LENMIN 0
+#define    MC_CMD_EVENT_CTRL_IN_LENMAX 252
+#define    MC_CMD_EVENT_CTRL_IN_LENMAX_MCDI2 1020
+#define    MC_CMD_EVENT_CTRL_IN_LEN(num) (0+4*(num))
+#define    MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_NUM(len) (((len)-0)/4)
+/* Array of event categories for which the driver wishes to receive events. */
+#define       MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_OFST 0
+#define       MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_LEN 4
+#define       MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_MINNUM 0
+#define       MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_MAXNUM 63
+#define       MC_CMD_EVENT_CTRL_IN_EVENT_TYPE_MAXNUM_MCDI2 255
+/* enum: Driver wishes to receive LINKCHANGE events. */
+#define          MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_LINKCHANGE 0x0
+/* enum: Driver wishes to receive SENSOR_CHANGE and SENSOR_STATE_CHANGE events.
+ */
+#define          MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_SENSOREVT 0x1
+/* enum: Driver wishes to receive receive errors. */
+#define          MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_RX_ERR 0x2
+/* enum: Driver wishes to receive transmit errors. */
+#define          MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_TX_ERR 0x3
+/* enum: Driver wishes to receive firmware alerts. */
+#define          MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_FWALERT 0x4
+/* enum: Driver wishes to receive reboot events. */
+#define          MC_CMD_EVENT_CTRL_IN_MCDI_EVENT_CODE_MC_REBOOT 0x5
+
+/* MC_CMD_EVENT_CTRL_OUT msgrequest */
+#define    MC_CMD_EVENT_CTRL_OUT_LEN 0
+
+/* EVB_PORT_ID structuredef */
+#define    EVB_PORT_ID_LEN 4
+#define       EVB_PORT_ID_PORT_ID_OFST 0
+#define       EVB_PORT_ID_PORT_ID_LEN 4
+/* enum: An invalid port handle. */
+#define          EVB_PORT_ID_NULL 0x0
+/* enum: The port assigned to this function.. */
+#define          EVB_PORT_ID_ASSIGNED 0x1000000
+/* enum: External network port 0 */
+#define          EVB_PORT_ID_MAC0 0x2000000
+/* enum: External network port 1 */
+#define          EVB_PORT_ID_MAC1 0x2000001
+/* enum: External network port 2 */
+#define          EVB_PORT_ID_MAC2 0x2000002
+/* enum: External network port 3 */
+#define          EVB_PORT_ID_MAC3 0x2000003
+#define       EVB_PORT_ID_PORT_ID_LBN 0
+#define       EVB_PORT_ID_PORT_ID_WIDTH 32
+
+/* EVB_VLAN_TAG structuredef */
+#define    EVB_VLAN_TAG_LEN 2
+/* The VLAN tag value */
+#define       EVB_VLAN_TAG_VLAN_ID_LBN 0
+#define       EVB_VLAN_TAG_VLAN_ID_WIDTH 12
+#define       EVB_VLAN_TAG_MODE_LBN 12
+#define       EVB_VLAN_TAG_MODE_WIDTH 4
+/* enum: Insert the VLAN. */
+#define          EVB_VLAN_TAG_INSERT 0x0
+/* enum: Replace the VLAN if already present. */
+#define          EVB_VLAN_TAG_REPLACE 0x1
+
+/* BUFTBL_ENTRY structuredef */
+#define    BUFTBL_ENTRY_LEN 12
+/* the owner ID */
+#define       BUFTBL_ENTRY_OID_OFST 0
+#define       BUFTBL_ENTRY_OID_LEN 2
+#define       BUFTBL_ENTRY_OID_LBN 0
+#define       BUFTBL_ENTRY_OID_WIDTH 16
+/* the page parameter as one of ESE_DZ_SMC_PAGE_SIZE_ */
+#define       BUFTBL_ENTRY_PGSZ_OFST 2
+#define       BUFTBL_ENTRY_PGSZ_LEN 2
+#define       BUFTBL_ENTRY_PGSZ_LBN 16
+#define       BUFTBL_ENTRY_PGSZ_WIDTH 16
+/* the raw 64-bit address field from the SMC, not adjusted for page size */
+#define       BUFTBL_ENTRY_RAWADDR_OFST 4
+#define       BUFTBL_ENTRY_RAWADDR_LEN 8
+#define       BUFTBL_ENTRY_RAWADDR_LO_OFST 4
+#define       BUFTBL_ENTRY_RAWADDR_HI_OFST 8
+#define       BUFTBL_ENTRY_RAWADDR_LBN 32
+#define       BUFTBL_ENTRY_RAWADDR_WIDTH 64
+
+/* NVRAM_PARTITION_TYPE structuredef */
+#define    NVRAM_PARTITION_TYPE_LEN 2
+#define       NVRAM_PARTITION_TYPE_ID_OFST 0
+#define       NVRAM_PARTITION_TYPE_ID_LEN 2
+/* enum: Primary MC firmware partition */
+#define          NVRAM_PARTITION_TYPE_MC_FIRMWARE 0x100
+/* enum: Secondary MC firmware partition */
+#define          NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP 0x200
+/* enum: Expansion ROM partition */
+#define          NVRAM_PARTITION_TYPE_EXPANSION_ROM 0x300
+/* enum: Static configuration TLV partition */
+#define          NVRAM_PARTITION_TYPE_STATIC_CONFIG 0x400
+/* enum: Dynamic configuration TLV partition */
+#define          NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG 0x500
+/* enum: Expansion ROM configuration data for port 0 */
+#define          NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0 0x600
+/* enum: Synonym for EXPROM_CONFIG_PORT0 as used in pmap files */
+#define          NVRAM_PARTITION_TYPE_EXPROM_CONFIG 0x600
+/* enum: Expansion ROM configuration data for port 1 */
+#define          NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1 0x601
+/* enum: Expansion ROM configuration data for port 2 */
+#define          NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2 0x602
+/* enum: Expansion ROM configuration data for port 3 */
+#define          NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3 0x603
+/* enum: Non-volatile log output partition */
+#define          NVRAM_PARTITION_TYPE_LOG 0x700
+/* enum: Non-volatile log output of second core on dual-core device */
+#define          NVRAM_PARTITION_TYPE_LOG_SLAVE 0x701
+/* enum: Device state dump output partition */
+#define          NVRAM_PARTITION_TYPE_DUMP 0x800
+/* enum: Application license key storage partition */
+#define          NVRAM_PARTITION_TYPE_LICENSE 0x900
+/* enum: Start of range used for PHY partitions (low 8 bits are the PHY ID) */
+#define          NVRAM_PARTITION_TYPE_PHY_MIN 0xa00
+/* enum: End of range used for PHY partitions (low 8 bits are the PHY ID) */
+#define          NVRAM_PARTITION_TYPE_PHY_MAX 0xaff
+/* enum: Primary FPGA partition */
+#define          NVRAM_PARTITION_TYPE_FPGA 0xb00
+/* enum: Secondary FPGA partition */
+#define          NVRAM_PARTITION_TYPE_FPGA_BACKUP 0xb01
+/* enum: FC firmware partition */
+#define          NVRAM_PARTITION_TYPE_FC_FIRMWARE 0xb02
+/* enum: FC License partition */
+#define          NVRAM_PARTITION_TYPE_FC_LICENSE 0xb03
+/* enum: Non-volatile log output partition for FC */
+#define          NVRAM_PARTITION_TYPE_FC_LOG 0xb04
+/* enum: MUM firmware partition */
+#define          NVRAM_PARTITION_TYPE_MUM_FIRMWARE 0xc00
+/* enum: SUC firmware partition (this is intentionally an alias of
+ * MUM_FIRMWARE)
+ */
+#define          NVRAM_PARTITION_TYPE_SUC_FIRMWARE 0xc00
+/* enum: MUM Non-volatile log output partition. */
+#define          NVRAM_PARTITION_TYPE_MUM_LOG 0xc01
+/* enum: MUM Application table partition. */
+#define          NVRAM_PARTITION_TYPE_MUM_APPTABLE 0xc02
+/* enum: MUM boot rom partition. */
+#define          NVRAM_PARTITION_TYPE_MUM_BOOT_ROM 0xc03
+/* enum: MUM production signatures & calibration rom partition. */
+#define          NVRAM_PARTITION_TYPE_MUM_PROD_ROM 0xc04
+/* enum: MUM user signatures & calibration rom partition. */
+#define          NVRAM_PARTITION_TYPE_MUM_USER_ROM 0xc05
+/* enum: MUM fuses and lockbits partition. */
+#define          NVRAM_PARTITION_TYPE_MUM_FUSELOCK 0xc06
+/* enum: UEFI expansion ROM if separate from PXE */
+#define          NVRAM_PARTITION_TYPE_EXPANSION_UEFI 0xd00
+/* enum: Used by the expansion ROM for logging */
+#define          NVRAM_PARTITION_TYPE_PXE_LOG 0x1000
+/* enum: Used for XIP code of shmbooted images */
+#define          NVRAM_PARTITION_TYPE_XIP_SCRATCH 0x1100
+/* enum: Spare partition 2 */
+#define          NVRAM_PARTITION_TYPE_SPARE_2 0x1200
+/* enum: Manufacturing partition. Used during manufacture to pass information
+ * between XJTAG and Manftest.
+ */
+#define          NVRAM_PARTITION_TYPE_MANUFACTURING 0x1300
+/* enum: Spare partition 4 */
+#define          NVRAM_PARTITION_TYPE_SPARE_4 0x1400
+/* enum: Spare partition 5 */
+#define          NVRAM_PARTITION_TYPE_SPARE_5 0x1500
+/* enum: Partition for reporting MC status. See mc_flash_layout.h
+ * medford_mc_status_hdr_t for layout on Medford.
+ */
+#define          NVRAM_PARTITION_TYPE_STATUS 0x1600
+/* enum: Spare partition 13 */
+#define          NVRAM_PARTITION_TYPE_SPARE_13 0x1700
+/* enum: Spare partition 14 */
+#define          NVRAM_PARTITION_TYPE_SPARE_14 0x1800
+/* enum: Spare partition 15 */
+#define          NVRAM_PARTITION_TYPE_SPARE_15 0x1900
+/* enum: Spare partition 16 */
+#define          NVRAM_PARTITION_TYPE_SPARE_16 0x1a00
+/* enum: Factory defaults for dynamic configuration */
+#define          NVRAM_PARTITION_TYPE_DYNCONFIG_DEFAULTS 0x1b00
+/* enum: Factory defaults for expansion ROM configuration */
+#define          NVRAM_PARTITION_TYPE_ROMCONFIG_DEFAULTS 0x1c00
+/* enum: Field Replaceable Unit inventory information for use on IPMI
+ * platforms. See SF-119124-PS. The STATIC_CONFIG partition may contain a
+ * subset of the information stored in this partition.
+ */
+#define          NVRAM_PARTITION_TYPE_FRU_INFORMATION 0x1d00
+/* enum: Bundle image partition */
+#define          NVRAM_PARTITION_TYPE_BUNDLE 0x1e00
+/* enum: Bundle metadata partition that holds additional information related to
+ * a bundle update in TLV format
+ */
+#define          NVRAM_PARTITION_TYPE_BUNDLE_METADATA 0x1e01
+/* enum: Bundle update non-volatile log output partition */
+#define          NVRAM_PARTITION_TYPE_BUNDLE_LOG 0x1e02
+/* enum: Partition for Solarflare gPXE bootrom installed via Bundle update. */
+#define          NVRAM_PARTITION_TYPE_EXPANSION_ROM_INTERNAL 0x1e03
+/* enum: Start of reserved value range (firmware may use for any purpose) */
+#define          NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN 0xff00
+/* enum: End of reserved value range (firmware may use for any purpose) */
+#define          NVRAM_PARTITION_TYPE_RESERVED_VALUES_MAX 0xfffd
+/* enum: Recovery partition map (provided if real map is missing or corrupt) */
+#define          NVRAM_PARTITION_TYPE_RECOVERY_MAP 0xfffe
+/* enum: Partition map (real map as stored in flash) */
+#define          NVRAM_PARTITION_TYPE_PARTITION_MAP 0xffff
+#define       NVRAM_PARTITION_TYPE_ID_LBN 0
+#define       NVRAM_PARTITION_TYPE_ID_WIDTH 16
+
+/* LICENSED_APP_ID structuredef */
+#define    LICENSED_APP_ID_LEN 4
+#define       LICENSED_APP_ID_ID_OFST 0
+#define       LICENSED_APP_ID_ID_LEN 4
+/* enum: OpenOnload */
+#define          LICENSED_APP_ID_ONLOAD 0x1
+/* enum: PTP timestamping */
+#define          LICENSED_APP_ID_PTP 0x2
+/* enum: SolarCapture Pro */
+#define          LICENSED_APP_ID_SOLARCAPTURE_PRO 0x4
+/* enum: SolarSecure filter engine */
+#define          LICENSED_APP_ID_SOLARSECURE 0x8
+/* enum: Performance monitor */
+#define          LICENSED_APP_ID_PERF_MONITOR 0x10
+/* enum: SolarCapture Live */
+#define          LICENSED_APP_ID_SOLARCAPTURE_LIVE 0x20
+/* enum: Capture SolarSystem */
+#define          LICENSED_APP_ID_CAPTURE_SOLARSYSTEM 0x40
+/* enum: Network Access Control */
+#define          LICENSED_APP_ID_NETWORK_ACCESS_CONTROL 0x80
+/* enum: TCP Direct */
+#define          LICENSED_APP_ID_TCP_DIRECT 0x100
+/* enum: Low Latency */
+#define          LICENSED_APP_ID_LOW_LATENCY 0x200
+/* enum: SolarCapture Tap */
+#define          LICENSED_APP_ID_SOLARCAPTURE_TAP 0x400
+/* enum: Capture SolarSystem 40G */
+#define          LICENSED_APP_ID_CAPTURE_SOLARSYSTEM_40G 0x800
+/* enum: Capture SolarSystem 1G */
+#define          LICENSED_APP_ID_CAPTURE_SOLARSYSTEM_1G 0x1000
+/* enum: ScaleOut Onload */
+#define          LICENSED_APP_ID_SCALEOUT_ONLOAD 0x2000
+/* enum: SCS Network Analytics Dashboard */
+#define          LICENSED_APP_ID_DSHBRD 0x4000
+/* enum: SolarCapture Trading Analytics */
+#define          LICENSED_APP_ID_SCATRD 0x8000
+#define       LICENSED_APP_ID_ID_LBN 0
+#define       LICENSED_APP_ID_ID_WIDTH 32
+
+/* LICENSED_FEATURES structuredef */
+#define    LICENSED_FEATURES_LEN 8
+/* Bitmask of licensed firmware features */
+#define       LICENSED_FEATURES_MASK_OFST 0
+#define       LICENSED_FEATURES_MASK_LEN 8
+#define       LICENSED_FEATURES_MASK_LO_OFST 0
+#define       LICENSED_FEATURES_MASK_HI_OFST 4
+#define        LICENSED_FEATURES_RX_CUT_THROUGH_OFST 0
+#define        LICENSED_FEATURES_RX_CUT_THROUGH_LBN 0
+#define        LICENSED_FEATURES_RX_CUT_THROUGH_WIDTH 1
+#define        LICENSED_FEATURES_PIO_OFST 0
+#define        LICENSED_FEATURES_PIO_LBN 1
+#define        LICENSED_FEATURES_PIO_WIDTH 1
+#define        LICENSED_FEATURES_EVQ_TIMER_OFST 0
+#define        LICENSED_FEATURES_EVQ_TIMER_LBN 2
+#define        LICENSED_FEATURES_EVQ_TIMER_WIDTH 1
+#define        LICENSED_FEATURES_CLOCK_OFST 0
+#define        LICENSED_FEATURES_CLOCK_LBN 3
+#define        LICENSED_FEATURES_CLOCK_WIDTH 1
+#define        LICENSED_FEATURES_RX_TIMESTAMPS_OFST 0
+#define        LICENSED_FEATURES_RX_TIMESTAMPS_LBN 4
+#define        LICENSED_FEATURES_RX_TIMESTAMPS_WIDTH 1
+#define        LICENSED_FEATURES_TX_TIMESTAMPS_OFST 0
+#define        LICENSED_FEATURES_TX_TIMESTAMPS_LBN 5
+#define        LICENSED_FEATURES_TX_TIMESTAMPS_WIDTH 1
+#define        LICENSED_FEATURES_RX_SNIFF_OFST 0
+#define        LICENSED_FEATURES_RX_SNIFF_LBN 6
+#define        LICENSED_FEATURES_RX_SNIFF_WIDTH 1
+#define        LICENSED_FEATURES_TX_SNIFF_OFST 0
+#define        LICENSED_FEATURES_TX_SNIFF_LBN 7
+#define        LICENSED_FEATURES_TX_SNIFF_WIDTH 1
+#define        LICENSED_FEATURES_PROXY_FILTER_OPS_OFST 0
+#define        LICENSED_FEATURES_PROXY_FILTER_OPS_LBN 8
+#define        LICENSED_FEATURES_PROXY_FILTER_OPS_WIDTH 1
+#define        LICENSED_FEATURES_EVENT_CUT_THROUGH_OFST 0
+#define        LICENSED_FEATURES_EVENT_CUT_THROUGH_LBN 9
+#define        LICENSED_FEATURES_EVENT_CUT_THROUGH_WIDTH 1
+#define       LICENSED_FEATURES_MASK_LBN 0
+#define       LICENSED_FEATURES_MASK_WIDTH 64
+
+/* LICENSED_V3_APPS structuredef */
+#define    LICENSED_V3_APPS_LEN 8
+/* Bitmask of licensed applications */
+#define       LICENSED_V3_APPS_MASK_OFST 0
+#define       LICENSED_V3_APPS_MASK_LEN 8
+#define       LICENSED_V3_APPS_MASK_LO_OFST 0
+#define       LICENSED_V3_APPS_MASK_HI_OFST 4
+#define        LICENSED_V3_APPS_ONLOAD_OFST 0
+#define        LICENSED_V3_APPS_ONLOAD_LBN 0
+#define        LICENSED_V3_APPS_ONLOAD_WIDTH 1
+#define        LICENSED_V3_APPS_PTP_OFST 0
+#define        LICENSED_V3_APPS_PTP_LBN 1
+#define        LICENSED_V3_APPS_PTP_WIDTH 1
+#define        LICENSED_V3_APPS_SOLARCAPTURE_PRO_OFST 0
+#define        LICENSED_V3_APPS_SOLARCAPTURE_PRO_LBN 2
+#define        LICENSED_V3_APPS_SOLARCAPTURE_PRO_WIDTH 1
+#define        LICENSED_V3_APPS_SOLARSECURE_OFST 0
+#define        LICENSED_V3_APPS_SOLARSECURE_LBN 3
+#define        LICENSED_V3_APPS_SOLARSECURE_WIDTH 1
+#define        LICENSED_V3_APPS_PERF_MONITOR_OFST 0
+#define        LICENSED_V3_APPS_PERF_MONITOR_LBN 4
+#define        LICENSED_V3_APPS_PERF_MONITOR_WIDTH 1
+#define        LICENSED_V3_APPS_SOLARCAPTURE_LIVE_OFST 0
+#define        LICENSED_V3_APPS_SOLARCAPTURE_LIVE_LBN 5
+#define        LICENSED_V3_APPS_SOLARCAPTURE_LIVE_WIDTH 1
+#define        LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_OFST 0
+#define        LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_LBN 6
+#define        LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_WIDTH 1
+#define        LICENSED_V3_APPS_NETWORK_ACCESS_CONTROL_OFST 0
+#define        LICENSED_V3_APPS_NETWORK_ACCESS_CONTROL_LBN 7
+#define        LICENSED_V3_APPS_NETWORK_ACCESS_CONTROL_WIDTH 1
+#define        LICENSED_V3_APPS_TCP_DIRECT_OFST 0
+#define        LICENSED_V3_APPS_TCP_DIRECT_LBN 8
+#define        LICENSED_V3_APPS_TCP_DIRECT_WIDTH 1
+#define        LICENSED_V3_APPS_LOW_LATENCY_OFST 0
+#define        LICENSED_V3_APPS_LOW_LATENCY_LBN 9
+#define        LICENSED_V3_APPS_LOW_LATENCY_WIDTH 1
+#define        LICENSED_V3_APPS_SOLARCAPTURE_TAP_OFST 0
+#define        LICENSED_V3_APPS_SOLARCAPTURE_TAP_LBN 10
+#define        LICENSED_V3_APPS_SOLARCAPTURE_TAP_WIDTH 1
+#define        LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_OFST 0
+#define        LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_LBN 11
+#define        LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_40G_WIDTH 1
+#define        LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_OFST 0
+#define        LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_LBN 12
+#define        LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_1G_WIDTH 1
+#define        LICENSED_V3_APPS_SCALEOUT_ONLOAD_OFST 0
+#define        LICENSED_V3_APPS_SCALEOUT_ONLOAD_LBN 13
+#define        LICENSED_V3_APPS_SCALEOUT_ONLOAD_WIDTH 1
+#define        LICENSED_V3_APPS_DSHBRD_OFST 0
+#define        LICENSED_V3_APPS_DSHBRD_LBN 14
+#define        LICENSED_V3_APPS_DSHBRD_WIDTH 1
+#define        LICENSED_V3_APPS_SCATRD_OFST 0
+#define        LICENSED_V3_APPS_SCATRD_LBN 15
+#define        LICENSED_V3_APPS_SCATRD_WIDTH 1
+#define       LICENSED_V3_APPS_MASK_LBN 0
+#define       LICENSED_V3_APPS_MASK_WIDTH 64
+
+/* LICENSED_V3_FEATURES structuredef */
+#define    LICENSED_V3_FEATURES_LEN 8
+/* Bitmask of licensed firmware features */
+#define       LICENSED_V3_FEATURES_MASK_OFST 0
+#define       LICENSED_V3_FEATURES_MASK_LEN 8
+#define       LICENSED_V3_FEATURES_MASK_LO_OFST 0
+#define       LICENSED_V3_FEATURES_MASK_HI_OFST 4
+#define        LICENSED_V3_FEATURES_RX_CUT_THROUGH_OFST 0
+#define        LICENSED_V3_FEATURES_RX_CUT_THROUGH_LBN 0
+#define        LICENSED_V3_FEATURES_RX_CUT_THROUGH_WIDTH 1
+#define        LICENSED_V3_FEATURES_PIO_OFST 0
+#define        LICENSED_V3_FEATURES_PIO_LBN 1
+#define        LICENSED_V3_FEATURES_PIO_WIDTH 1
+#define        LICENSED_V3_FEATURES_EVQ_TIMER_OFST 0
+#define        LICENSED_V3_FEATURES_EVQ_TIMER_LBN 2
+#define        LICENSED_V3_FEATURES_EVQ_TIMER_WIDTH 1
+#define        LICENSED_V3_FEATURES_CLOCK_OFST 0
+#define        LICENSED_V3_FEATURES_CLOCK_LBN 3
+#define        LICENSED_V3_FEATURES_CLOCK_WIDTH 1
+#define        LICENSED_V3_FEATURES_RX_TIMESTAMPS_OFST 0
+#define        LICENSED_V3_FEATURES_RX_TIMESTAMPS_LBN 4
+#define        LICENSED_V3_FEATURES_RX_TIMESTAMPS_WIDTH 1
+#define        LICENSED_V3_FEATURES_TX_TIMESTAMPS_OFST 0
+#define        LICENSED_V3_FEATURES_TX_TIMESTAMPS_LBN 5
+#define        LICENSED_V3_FEATURES_TX_TIMESTAMPS_WIDTH 1
+#define        LICENSED_V3_FEATURES_RX_SNIFF_OFST 0
+#define        LICENSED_V3_FEATURES_RX_SNIFF_LBN 6
+#define        LICENSED_V3_FEATURES_RX_SNIFF_WIDTH 1
+#define        LICENSED_V3_FEATURES_TX_SNIFF_OFST 0
+#define        LICENSED_V3_FEATURES_TX_SNIFF_LBN 7
+#define        LICENSED_V3_FEATURES_TX_SNIFF_WIDTH 1
+#define        LICENSED_V3_FEATURES_PROXY_FILTER_OPS_OFST 0
+#define        LICENSED_V3_FEATURES_PROXY_FILTER_OPS_LBN 8
+#define        LICENSED_V3_FEATURES_PROXY_FILTER_OPS_WIDTH 1
+#define        LICENSED_V3_FEATURES_EVENT_CUT_THROUGH_OFST 0
+#define        LICENSED_V3_FEATURES_EVENT_CUT_THROUGH_LBN 9
+#define        LICENSED_V3_FEATURES_EVENT_CUT_THROUGH_WIDTH 1
+#define       LICENSED_V3_FEATURES_MASK_LBN 0
+#define       LICENSED_V3_FEATURES_MASK_WIDTH 64
+
+/* TX_TIMESTAMP_EVENT structuredef */
+#define    TX_TIMESTAMP_EVENT_LEN 6
+/* lower 16 bits of timestamp data */
+#define       TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_OFST 0
+#define       TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_LEN 2
+#define       TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_LBN 0
+#define       TX_TIMESTAMP_EVENT_TSTAMP_DATA_LO_WIDTH 16
+/* Type of TX event, ordinary TX completion, low or high part of TX timestamp
+ */
+#define       TX_TIMESTAMP_EVENT_TX_EV_TYPE_OFST 3
+#define       TX_TIMESTAMP_EVENT_TX_EV_TYPE_LEN 1
+/* enum: This is a TX completion event, not a timestamp */
+#define          TX_TIMESTAMP_EVENT_TX_EV_COMPLETION 0x0
+/* enum: This is a TX completion event for a CTPIO transmit. The event format
+ * is the same as for TX_EV_COMPLETION.
+ */
+#define          TX_TIMESTAMP_EVENT_TX_EV_CTPIO_COMPLETION 0x11
+/* enum: This is the low part of a TX timestamp for a CTPIO transmission. The
+ * event format is the same as for TX_EV_TSTAMP_LO
+ */
+#define          TX_TIMESTAMP_EVENT_TX_EV_CTPIO_TS_LO 0x12
+/* enum: This is the high part of a TX timestamp for a CTPIO transmission. The
+ * event format is the same as for TX_EV_TSTAMP_HI
+ */
+#define          TX_TIMESTAMP_EVENT_TX_EV_CTPIO_TS_HI 0x13
+/* enum: This is the low part of a TX timestamp event */
+#define          TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_LO 0x51
+/* enum: This is the high part of a TX timestamp event */
+#define          TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_HI 0x52
+#define       TX_TIMESTAMP_EVENT_TX_EV_TYPE_LBN 24
+#define       TX_TIMESTAMP_EVENT_TX_EV_TYPE_WIDTH 8
+/* upper 16 bits of timestamp data */
+#define       TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_OFST 4
+#define       TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_LEN 2
+#define       TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_LBN 32
+#define       TX_TIMESTAMP_EVENT_TSTAMP_DATA_HI_WIDTH 16
+
+/* RSS_MODE structuredef */
+#define    RSS_MODE_LEN 1
+/* The RSS mode for a particular packet type is a value from 0 - 15 which can
+ * be considered as 4 bits selecting which fields are included in the hash. (A
+ * value 0 effectively disables RSS spreading for the packet type.) The YAML
+ * generation tools require this structure to be a whole number of bytes wide,
+ * but only 4 bits are relevant.
+ */
+#define       RSS_MODE_HASH_SELECTOR_OFST 0
+#define       RSS_MODE_HASH_SELECTOR_LEN 1
+#define        RSS_MODE_HASH_SRC_ADDR_OFST 0
+#define        RSS_MODE_HASH_SRC_ADDR_LBN 0
+#define        RSS_MODE_HASH_SRC_ADDR_WIDTH 1
+#define        RSS_MODE_HASH_DST_ADDR_OFST 0
+#define        RSS_MODE_HASH_DST_ADDR_LBN 1
+#define        RSS_MODE_HASH_DST_ADDR_WIDTH 1
+#define        RSS_MODE_HASH_SRC_PORT_OFST 0
+#define        RSS_MODE_HASH_SRC_PORT_LBN 2
+#define        RSS_MODE_HASH_SRC_PORT_WIDTH 1
+#define        RSS_MODE_HASH_DST_PORT_OFST 0
+#define        RSS_MODE_HASH_DST_PORT_LBN 3
+#define        RSS_MODE_HASH_DST_PORT_WIDTH 1
+#define       RSS_MODE_HASH_SELECTOR_LBN 0
+#define       RSS_MODE_HASH_SELECTOR_WIDTH 8
+
+/* CTPIO_STATS_MAP structuredef */
+#define    CTPIO_STATS_MAP_LEN 4
+/* The (function relative) VI number */
+#define       CTPIO_STATS_MAP_VI_OFST 0
+#define       CTPIO_STATS_MAP_VI_LEN 2
+#define       CTPIO_STATS_MAP_VI_LBN 0
+#define       CTPIO_STATS_MAP_VI_WIDTH 16
+/* The target bucket for the VI */
+#define       CTPIO_STATS_MAP_BUCKET_OFST 2
+#define       CTPIO_STATS_MAP_BUCKET_LEN 2
+#define       CTPIO_STATS_MAP_BUCKET_LBN 16
+#define       CTPIO_STATS_MAP_BUCKET_WIDTH 16
+
+
+/***********************************/
+/* MC_CMD_READ_REGS
+ * Get a dump of the MCPU registers
+ */
+#define MC_CMD_READ_REGS 0x50
+#undef MC_CMD_0x50_PRIVILEGE_CTG
+
+#define MC_CMD_0x50_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_READ_REGS_IN msgrequest */
+#define    MC_CMD_READ_REGS_IN_LEN 0
+
+/* MC_CMD_READ_REGS_OUT msgresponse */
+#define    MC_CMD_READ_REGS_OUT_LEN 308
+/* Whether the corresponding register entry contains a valid value */
+#define       MC_CMD_READ_REGS_OUT_MASK_OFST 0
+#define       MC_CMD_READ_REGS_OUT_MASK_LEN 16
+/* Same order as MIPS GDB (r0-r31, sr, lo, hi, bad, cause, 32 x float, fsr,
+ * fir, fp)
+ */
+#define       MC_CMD_READ_REGS_OUT_REGS_OFST 16
+#define       MC_CMD_READ_REGS_OUT_REGS_LEN 4
+#define       MC_CMD_READ_REGS_OUT_REGS_NUM 73
+
+
+/***********************************/
+/* MC_CMD_INIT_EVQ
+ * Set up an event queue according to the supplied parameters. The IN arguments
+ * end with an address for each 4k of host memory required to back the EVQ.
+ */
+#define MC_CMD_INIT_EVQ 0x80
+#undef MC_CMD_0x80_PRIVILEGE_CTG
+
+#define MC_CMD_0x80_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_INIT_EVQ_IN msgrequest */
+#define    MC_CMD_INIT_EVQ_IN_LENMIN 44
+#define    MC_CMD_INIT_EVQ_IN_LENMAX 548
+#define    MC_CMD_INIT_EVQ_IN_LENMAX_MCDI2 548
+#define    MC_CMD_INIT_EVQ_IN_LEN(num) (36+8*(num))
+#define    MC_CMD_INIT_EVQ_IN_DMA_ADDR_NUM(len) (((len)-36)/8)
+/* Size, in entries */
+#define       MC_CMD_INIT_EVQ_IN_SIZE_OFST 0
+#define       MC_CMD_INIT_EVQ_IN_SIZE_LEN 4
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define       MC_CMD_INIT_EVQ_IN_INSTANCE_OFST 4
+#define       MC_CMD_INIT_EVQ_IN_INSTANCE_LEN 4
+/* The initial timer value. The load value is ignored if the timer mode is DIS.
+ */
+#define       MC_CMD_INIT_EVQ_IN_TMR_LOAD_OFST 8
+#define       MC_CMD_INIT_EVQ_IN_TMR_LOAD_LEN 4
+/* The reload value is ignored in one-shot modes */
+#define       MC_CMD_INIT_EVQ_IN_TMR_RELOAD_OFST 12
+#define       MC_CMD_INIT_EVQ_IN_TMR_RELOAD_LEN 4
+/* tbd */
+#define       MC_CMD_INIT_EVQ_IN_FLAGS_OFST 16
+#define       MC_CMD_INIT_EVQ_IN_FLAGS_LEN 4
+#define        MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_OFST 16
+#define        MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_LBN 0
+#define        MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_WIDTH 1
+#define        MC_CMD_INIT_EVQ_IN_FLAG_RPTR_DOS_OFST 16
+#define        MC_CMD_INIT_EVQ_IN_FLAG_RPTR_DOS_LBN 1
+#define        MC_CMD_INIT_EVQ_IN_FLAG_RPTR_DOS_WIDTH 1
+#define        MC_CMD_INIT_EVQ_IN_FLAG_INT_ARMD_OFST 16
+#define        MC_CMD_INIT_EVQ_IN_FLAG_INT_ARMD_LBN 2
+#define        MC_CMD_INIT_EVQ_IN_FLAG_INT_ARMD_WIDTH 1
+#define        MC_CMD_INIT_EVQ_IN_FLAG_CUT_THRU_OFST 16
+#define        MC_CMD_INIT_EVQ_IN_FLAG_CUT_THRU_LBN 3
+#define        MC_CMD_INIT_EVQ_IN_FLAG_CUT_THRU_WIDTH 1
+#define        MC_CMD_INIT_EVQ_IN_FLAG_RX_MERGE_OFST 16
+#define        MC_CMD_INIT_EVQ_IN_FLAG_RX_MERGE_LBN 4
+#define        MC_CMD_INIT_EVQ_IN_FLAG_RX_MERGE_WIDTH 1
+#define        MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_OFST 16
+#define        MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_LBN 5
+#define        MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_WIDTH 1
+#define        MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_OFST 16
+#define        MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_LBN 6
+#define        MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_WIDTH 1
+#define       MC_CMD_INIT_EVQ_IN_TMR_MODE_OFST 20
+#define       MC_CMD_INIT_EVQ_IN_TMR_MODE_LEN 4
+/* enum: Disabled */
+#define          MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS 0x0
+/* enum: Immediate */
+#define          MC_CMD_INIT_EVQ_IN_TMR_IMMED_START 0x1
+/* enum: Triggered */
+#define          MC_CMD_INIT_EVQ_IN_TMR_TRIG_START 0x2
+/* enum: Hold-off */
+#define          MC_CMD_INIT_EVQ_IN_TMR_INT_HLDOFF 0x3
+/* Target EVQ for wakeups if in wakeup mode. */
+#define       MC_CMD_INIT_EVQ_IN_TARGET_EVQ_OFST 24
+#define       MC_CMD_INIT_EVQ_IN_TARGET_EVQ_LEN 4
+/* Target interrupt if in interrupting mode (note union with target EVQ). Use
+ * MC_CMD_RESOURCE_INSTANCE_ANY unless a specific one required for test
+ * purposes.
+ */
+#define       MC_CMD_INIT_EVQ_IN_IRQ_NUM_OFST 24
+#define       MC_CMD_INIT_EVQ_IN_IRQ_NUM_LEN 4
+/* Event Counter Mode. */
+#define       MC_CMD_INIT_EVQ_IN_COUNT_MODE_OFST 28
+#define       MC_CMD_INIT_EVQ_IN_COUNT_MODE_LEN 4
+/* enum: Disabled */
+#define          MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS 0x0
+/* enum: Disabled */
+#define          MC_CMD_INIT_EVQ_IN_COUNT_MODE_RX 0x1
+/* enum: Disabled */
+#define          MC_CMD_INIT_EVQ_IN_COUNT_MODE_TX 0x2
+/* enum: Disabled */
+#define          MC_CMD_INIT_EVQ_IN_COUNT_MODE_RXTX 0x3
+/* Event queue packet count threshold. */
+#define       MC_CMD_INIT_EVQ_IN_COUNT_THRSHLD_OFST 32
+#define       MC_CMD_INIT_EVQ_IN_COUNT_THRSHLD_LEN 4
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define       MC_CMD_INIT_EVQ_IN_DMA_ADDR_OFST 36
+#define       MC_CMD_INIT_EVQ_IN_DMA_ADDR_LEN 8
+#define       MC_CMD_INIT_EVQ_IN_DMA_ADDR_LO_OFST 36
+#define       MC_CMD_INIT_EVQ_IN_DMA_ADDR_HI_OFST 40
+#define       MC_CMD_INIT_EVQ_IN_DMA_ADDR_MINNUM 1
+#define       MC_CMD_INIT_EVQ_IN_DMA_ADDR_MAXNUM 64
+#define       MC_CMD_INIT_EVQ_IN_DMA_ADDR_MAXNUM_MCDI2 64
+
+/* MC_CMD_INIT_EVQ_OUT msgresponse */
+#define    MC_CMD_INIT_EVQ_OUT_LEN 4
+/* Only valid if INTRFLAG was true */
+#define       MC_CMD_INIT_EVQ_OUT_IRQ_OFST 0
+#define       MC_CMD_INIT_EVQ_OUT_IRQ_LEN 4
+
+/* MC_CMD_INIT_EVQ_V2_IN msgrequest */
+#define    MC_CMD_INIT_EVQ_V2_IN_LENMIN 44
+#define    MC_CMD_INIT_EVQ_V2_IN_LENMAX 548
+#define    MC_CMD_INIT_EVQ_V2_IN_LENMAX_MCDI2 548
+#define    MC_CMD_INIT_EVQ_V2_IN_LEN(num) (36+8*(num))
+#define    MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_NUM(len) (((len)-36)/8)
+/* Size, in entries */
+#define       MC_CMD_INIT_EVQ_V2_IN_SIZE_OFST 0
+#define       MC_CMD_INIT_EVQ_V2_IN_SIZE_LEN 4
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define       MC_CMD_INIT_EVQ_V2_IN_INSTANCE_OFST 4
+#define       MC_CMD_INIT_EVQ_V2_IN_INSTANCE_LEN 4
+/* The initial timer value. The load value is ignored if the timer mode is DIS.
+ */
+#define       MC_CMD_INIT_EVQ_V2_IN_TMR_LOAD_OFST 8
+#define       MC_CMD_INIT_EVQ_V2_IN_TMR_LOAD_LEN 4
+/* The reload value is ignored in one-shot modes */
+#define       MC_CMD_INIT_EVQ_V2_IN_TMR_RELOAD_OFST 12
+#define       MC_CMD_INIT_EVQ_V2_IN_TMR_RELOAD_LEN 4
+/* tbd */
+#define       MC_CMD_INIT_EVQ_V2_IN_FLAGS_OFST 16
+#define       MC_CMD_INIT_EVQ_V2_IN_FLAGS_LEN 4
+#define        MC_CMD_INIT_EVQ_V2_IN_FLAG_INTERRUPTING_OFST 16
+#define        MC_CMD_INIT_EVQ_V2_IN_FLAG_INTERRUPTING_LBN 0
+#define        MC_CMD_INIT_EVQ_V2_IN_FLAG_INTERRUPTING_WIDTH 1
+#define        MC_CMD_INIT_EVQ_V2_IN_FLAG_RPTR_DOS_OFST 16
+#define        MC_CMD_INIT_EVQ_V2_IN_FLAG_RPTR_DOS_LBN 1
+#define        MC_CMD_INIT_EVQ_V2_IN_FLAG_RPTR_DOS_WIDTH 1
+#define        MC_CMD_INIT_EVQ_V2_IN_FLAG_INT_ARMD_OFST 16
+#define        MC_CMD_INIT_EVQ_V2_IN_FLAG_INT_ARMD_LBN 2
+#define        MC_CMD_INIT_EVQ_V2_IN_FLAG_INT_ARMD_WIDTH 1
+#define        MC_CMD_INIT_EVQ_V2_IN_FLAG_CUT_THRU_OFST 16
+#define        MC_CMD_INIT_EVQ_V2_IN_FLAG_CUT_THRU_LBN 3
+#define        MC_CMD_INIT_EVQ_V2_IN_FLAG_CUT_THRU_WIDTH 1
+#define        MC_CMD_INIT_EVQ_V2_IN_FLAG_RX_MERGE_OFST 16
+#define        MC_CMD_INIT_EVQ_V2_IN_FLAG_RX_MERGE_LBN 4
+#define        MC_CMD_INIT_EVQ_V2_IN_FLAG_RX_MERGE_WIDTH 1
+#define        MC_CMD_INIT_EVQ_V2_IN_FLAG_TX_MERGE_OFST 16
+#define        MC_CMD_INIT_EVQ_V2_IN_FLAG_TX_MERGE_LBN 5
+#define        MC_CMD_INIT_EVQ_V2_IN_FLAG_TX_MERGE_WIDTH 1
+#define        MC_CMD_INIT_EVQ_V2_IN_FLAG_USE_TIMER_OFST 16
+#define        MC_CMD_INIT_EVQ_V2_IN_FLAG_USE_TIMER_LBN 6
+#define        MC_CMD_INIT_EVQ_V2_IN_FLAG_USE_TIMER_WIDTH 1
+#define        MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_OFST 16
+#define        MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_LBN 7
+#define        MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_WIDTH 4
+/* enum: All initialisation flags specified by host. */
+#define          MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_MANUAL 0x0
+/* enum: MEDFORD only. Certain initialisation flags specified by host may be
+ * over-ridden by firmware based on licenses and firmware variant in order to
+ * provide the lowest latency achievable. See
+ * MC_CMD_INIT_EVQ_V2/MC_CMD_INIT_EVQ_V2_OUT/FLAGS for list of affected flags.
+ */
+#define          MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_LOW_LATENCY 0x1
+/* enum: MEDFORD only. Certain initialisation flags specified by host may be
+ * over-ridden by firmware based on licenses and firmware variant in order to
+ * provide the best throughput achievable. See
+ * MC_CMD_INIT_EVQ_V2/MC_CMD_INIT_EVQ_V2_OUT/FLAGS for list of affected flags.
+ */
+#define          MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_THROUGHPUT 0x2
+/* enum: MEDFORD only. Certain initialisation flags may be over-ridden by
+ * firmware based on licenses and firmware variant. See
+ * MC_CMD_INIT_EVQ_V2/MC_CMD_INIT_EVQ_V2_OUT/FLAGS for list of affected flags.
+ */
+#define          MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO 0x3
+#define        MC_CMD_INIT_EVQ_V2_IN_FLAG_EXT_WIDTH_OFST 16
+#define        MC_CMD_INIT_EVQ_V2_IN_FLAG_EXT_WIDTH_LBN 11
+#define        MC_CMD_INIT_EVQ_V2_IN_FLAG_EXT_WIDTH_WIDTH 1
+#define       MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_OFST 20
+#define       MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_LEN 4
+/* enum: Disabled */
+#define          MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_DIS 0x0
+/* enum: Immediate */
+#define          MC_CMD_INIT_EVQ_V2_IN_TMR_IMMED_START 0x1
+/* enum: Triggered */
+#define          MC_CMD_INIT_EVQ_V2_IN_TMR_TRIG_START 0x2
+/* enum: Hold-off */
+#define          MC_CMD_INIT_EVQ_V2_IN_TMR_INT_HLDOFF 0x3
+/* Target EVQ for wakeups if in wakeup mode. */
+#define       MC_CMD_INIT_EVQ_V2_IN_TARGET_EVQ_OFST 24
+#define       MC_CMD_INIT_EVQ_V2_IN_TARGET_EVQ_LEN 4
+/* Target interrupt if in interrupting mode (note union with target EVQ). Use
+ * MC_CMD_RESOURCE_INSTANCE_ANY unless a specific one required for test
+ * purposes.
+ */
+#define       MC_CMD_INIT_EVQ_V2_IN_IRQ_NUM_OFST 24
+#define       MC_CMD_INIT_EVQ_V2_IN_IRQ_NUM_LEN 4
+/* Event Counter Mode. */
+#define       MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_OFST 28
+#define       MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_LEN 4
+/* enum: Disabled */
+#define          MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_DIS 0x0
+/* enum: Disabled */
+#define          MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_RX 0x1
+/* enum: Disabled */
+#define          MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_TX 0x2
+/* enum: Disabled */
+#define          MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_RXTX 0x3
+/* Event queue packet count threshold. */
+#define       MC_CMD_INIT_EVQ_V2_IN_COUNT_THRSHLD_OFST 32
+#define       MC_CMD_INIT_EVQ_V2_IN_COUNT_THRSHLD_LEN 4
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define       MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_OFST 36
+#define       MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_LEN 8
+#define       MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_LO_OFST 36
+#define       MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_HI_OFST 40
+#define       MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_MINNUM 1
+#define       MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_MAXNUM 64
+#define       MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_MAXNUM_MCDI2 64
+
+/* MC_CMD_INIT_EVQ_V2_OUT msgresponse */
+#define    MC_CMD_INIT_EVQ_V2_OUT_LEN 8
+/* Only valid if INTRFLAG was true */
+#define       MC_CMD_INIT_EVQ_V2_OUT_IRQ_OFST 0
+#define       MC_CMD_INIT_EVQ_V2_OUT_IRQ_LEN 4
+/* Actual configuration applied on the card */
+#define       MC_CMD_INIT_EVQ_V2_OUT_FLAGS_OFST 4
+#define       MC_CMD_INIT_EVQ_V2_OUT_FLAGS_LEN 4
+#define        MC_CMD_INIT_EVQ_V2_OUT_FLAG_CUT_THRU_OFST 4
+#define        MC_CMD_INIT_EVQ_V2_OUT_FLAG_CUT_THRU_LBN 0
+#define        MC_CMD_INIT_EVQ_V2_OUT_FLAG_CUT_THRU_WIDTH 1
+#define        MC_CMD_INIT_EVQ_V2_OUT_FLAG_RX_MERGE_OFST 4
+#define        MC_CMD_INIT_EVQ_V2_OUT_FLAG_RX_MERGE_LBN 1
+#define        MC_CMD_INIT_EVQ_V2_OUT_FLAG_RX_MERGE_WIDTH 1
+#define        MC_CMD_INIT_EVQ_V2_OUT_FLAG_TX_MERGE_OFST 4
+#define        MC_CMD_INIT_EVQ_V2_OUT_FLAG_TX_MERGE_LBN 2
+#define        MC_CMD_INIT_EVQ_V2_OUT_FLAG_TX_MERGE_WIDTH 1
+#define        MC_CMD_INIT_EVQ_V2_OUT_FLAG_RXQ_FORCE_EV_MERGING_OFST 4
+#define        MC_CMD_INIT_EVQ_V2_OUT_FLAG_RXQ_FORCE_EV_MERGING_LBN 3
+#define        MC_CMD_INIT_EVQ_V2_OUT_FLAG_RXQ_FORCE_EV_MERGING_WIDTH 1
+
+/* QUEUE_CRC_MODE structuredef */
+#define    QUEUE_CRC_MODE_LEN 1
+#define       QUEUE_CRC_MODE_MODE_LBN 0
+#define       QUEUE_CRC_MODE_MODE_WIDTH 4
+/* enum: No CRC. */
+#define          QUEUE_CRC_MODE_NONE 0x0
+/* enum: CRC Fiber channel over ethernet. */
+#define          QUEUE_CRC_MODE_FCOE 0x1
+/* enum: CRC (digest) iSCSI header only. */
+#define          QUEUE_CRC_MODE_ISCSI_HDR 0x2
+/* enum: CRC (digest) iSCSI header and payload. */
+#define          QUEUE_CRC_MODE_ISCSI 0x3
+/* enum: CRC Fiber channel over IP over ethernet. */
+#define          QUEUE_CRC_MODE_FCOIPOE 0x4
+/* enum: CRC MPA. */
+#define          QUEUE_CRC_MODE_MPA 0x5
+#define       QUEUE_CRC_MODE_SPARE_LBN 4
+#define       QUEUE_CRC_MODE_SPARE_WIDTH 4
+
+
+/***********************************/
+/* MC_CMD_INIT_RXQ
+ * set up a receive queue according to the supplied parameters. The IN
+ * arguments end with an address for each 4k of host memory required to back
+ * the RXQ.
+ */
+#define MC_CMD_INIT_RXQ 0x81
+#undef MC_CMD_0x81_PRIVILEGE_CTG
+
+#define MC_CMD_0x81_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_INIT_RXQ_IN msgrequest: Legacy RXQ_INIT request. Use extended version
+ * in new code.
+ */
+#define    MC_CMD_INIT_RXQ_IN_LENMIN 36
+#define    MC_CMD_INIT_RXQ_IN_LENMAX 252
+#define    MC_CMD_INIT_RXQ_IN_LENMAX_MCDI2 1020
+#define    MC_CMD_INIT_RXQ_IN_LEN(num) (28+8*(num))
+#define    MC_CMD_INIT_RXQ_IN_DMA_ADDR_NUM(len) (((len)-28)/8)
+/* Size, in entries */
+#define       MC_CMD_INIT_RXQ_IN_SIZE_OFST 0
+#define       MC_CMD_INIT_RXQ_IN_SIZE_LEN 4
+/* The EVQ to send events to. This is an index originally specified to INIT_EVQ
+ */
+#define       MC_CMD_INIT_RXQ_IN_TARGET_EVQ_OFST 4
+#define       MC_CMD_INIT_RXQ_IN_TARGET_EVQ_LEN 4
+/* The value to put in the event data. Check hardware spec. for valid range. */
+#define       MC_CMD_INIT_RXQ_IN_LABEL_OFST 8
+#define       MC_CMD_INIT_RXQ_IN_LABEL_LEN 4
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define       MC_CMD_INIT_RXQ_IN_INSTANCE_OFST 12
+#define       MC_CMD_INIT_RXQ_IN_INSTANCE_LEN 4
+/* There will be more flags here. */
+#define       MC_CMD_INIT_RXQ_IN_FLAGS_OFST 16
+#define       MC_CMD_INIT_RXQ_IN_FLAGS_LEN 4
+#define        MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_OFST 16
+#define        MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_LBN 0
+#define        MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_WIDTH 1
+#define        MC_CMD_INIT_RXQ_IN_FLAG_HDR_SPLIT_OFST 16
+#define        MC_CMD_INIT_RXQ_IN_FLAG_HDR_SPLIT_LBN 1
+#define        MC_CMD_INIT_RXQ_IN_FLAG_HDR_SPLIT_WIDTH 1
+#define        MC_CMD_INIT_RXQ_IN_FLAG_TIMESTAMP_OFST 16
+#define        MC_CMD_INIT_RXQ_IN_FLAG_TIMESTAMP_LBN 2
+#define        MC_CMD_INIT_RXQ_IN_FLAG_TIMESTAMP_WIDTH 1
+#define        MC_CMD_INIT_RXQ_IN_CRC_MODE_OFST 16
+#define        MC_CMD_INIT_RXQ_IN_CRC_MODE_LBN 3
+#define        MC_CMD_INIT_RXQ_IN_CRC_MODE_WIDTH 4
+#define        MC_CMD_INIT_RXQ_IN_FLAG_CHAIN_OFST 16
+#define        MC_CMD_INIT_RXQ_IN_FLAG_CHAIN_LBN 7
+#define        MC_CMD_INIT_RXQ_IN_FLAG_CHAIN_WIDTH 1
+#define        MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_OFST 16
+#define        MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_LBN 8
+#define        MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_WIDTH 1
+#define        MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_OFST 16
+#define        MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_LBN 9
+#define        MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_WIDTH 1
+#define        MC_CMD_INIT_RXQ_IN_UNUSED_OFST 16
+#define        MC_CMD_INIT_RXQ_IN_UNUSED_LBN 10
+#define        MC_CMD_INIT_RXQ_IN_UNUSED_WIDTH 1
+/* Owner ID to use if in buffer mode (zero if physical) */
+#define       MC_CMD_INIT_RXQ_IN_OWNER_ID_OFST 20
+#define       MC_CMD_INIT_RXQ_IN_OWNER_ID_LEN 4
+/* The port ID associated with the v-adaptor which should contain this DMAQ. */
+#define       MC_CMD_INIT_RXQ_IN_PORT_ID_OFST 24
+#define       MC_CMD_INIT_RXQ_IN_PORT_ID_LEN 4
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define       MC_CMD_INIT_RXQ_IN_DMA_ADDR_OFST 28
+#define       MC_CMD_INIT_RXQ_IN_DMA_ADDR_LEN 8
+#define       MC_CMD_INIT_RXQ_IN_DMA_ADDR_LO_OFST 28
+#define       MC_CMD_INIT_RXQ_IN_DMA_ADDR_HI_OFST 32
+#define       MC_CMD_INIT_RXQ_IN_DMA_ADDR_MINNUM 1
+#define       MC_CMD_INIT_RXQ_IN_DMA_ADDR_MAXNUM 28
+#define       MC_CMD_INIT_RXQ_IN_DMA_ADDR_MAXNUM_MCDI2 124
+
+/* MC_CMD_INIT_RXQ_EXT_IN msgrequest: Extended RXQ_INIT with additional mode
+ * flags
+ */
+#define    MC_CMD_INIT_RXQ_EXT_IN_LEN 544
+/* Size, in entries */
+#define       MC_CMD_INIT_RXQ_EXT_IN_SIZE_OFST 0
+#define       MC_CMD_INIT_RXQ_EXT_IN_SIZE_LEN 4
+/* The EVQ to send events to. This is an index originally specified to
+ * INIT_EVQ. If DMA_MODE == PACKED_STREAM this must be equal to INSTANCE.
+ */
+#define       MC_CMD_INIT_RXQ_EXT_IN_TARGET_EVQ_OFST 4
+#define       MC_CMD_INIT_RXQ_EXT_IN_TARGET_EVQ_LEN 4
+/* The value to put in the event data. Check hardware spec. for valid range.
+ * This field is ignored if DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER or DMA_MODE
+ * == PACKED_STREAM.
+ */
+#define       MC_CMD_INIT_RXQ_EXT_IN_LABEL_OFST 8
+#define       MC_CMD_INIT_RXQ_EXT_IN_LABEL_LEN 4
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define       MC_CMD_INIT_RXQ_EXT_IN_INSTANCE_OFST 12
+#define       MC_CMD_INIT_RXQ_EXT_IN_INSTANCE_LEN 4
+/* There will be more flags here. */
+#define       MC_CMD_INIT_RXQ_EXT_IN_FLAGS_OFST 16
+#define       MC_CMD_INIT_RXQ_EXT_IN_FLAGS_LEN 4
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_OFST 16
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_OFST 16
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_LBN 1
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT_WIDTH 1
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_TIMESTAMP_OFST 16
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_TIMESTAMP_LBN 2
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_TIMESTAMP_WIDTH 1
+#define        MC_CMD_INIT_RXQ_EXT_IN_CRC_MODE_OFST 16
+#define        MC_CMD_INIT_RXQ_EXT_IN_CRC_MODE_LBN 3
+#define        MC_CMD_INIT_RXQ_EXT_IN_CRC_MODE_WIDTH 4
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_CHAIN_OFST 16
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_CHAIN_LBN 7
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_CHAIN_WIDTH 1
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_PREFIX_OFST 16
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_PREFIX_LBN 8
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_PREFIX_WIDTH 1
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER_OFST 16
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER_LBN 9
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER_WIDTH 1
+#define        MC_CMD_INIT_RXQ_EXT_IN_DMA_MODE_OFST 16
+#define        MC_CMD_INIT_RXQ_EXT_IN_DMA_MODE_LBN 10
+#define        MC_CMD_INIT_RXQ_EXT_IN_DMA_MODE_WIDTH 4
+/* enum: One packet per descriptor (for normal networking) */
+#define          MC_CMD_INIT_RXQ_EXT_IN_SINGLE_PACKET 0x0
+/* enum: Pack multiple packets into large descriptors (for SolarCapture) */
+#define          MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM 0x1
+/* enum: Pack multiple packets into large descriptors using the format designed
+ * to maximise packet rate. This mode uses 1 "bucket" per descriptor with
+ * multiple fixed-size packet buffers within each bucket. For a full
+ * description see SF-119419-TC. This mode is only supported by "dpdk" datapath
+ * firmware.
+ */
+#define          MC_CMD_INIT_RXQ_EXT_IN_EQUAL_STRIDE_SUPER_BUFFER 0x2
+/* enum: Deprecated name for EQUAL_STRIDE_SUPER_BUFFER. */
+#define          MC_CMD_INIT_RXQ_EXT_IN_EQUAL_STRIDE_PACKED_STREAM 0x2
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_SNAPSHOT_MODE_OFST 16
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_SNAPSHOT_MODE_LBN 14
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_SNAPSHOT_MODE_WIDTH 1
+#define        MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE_OFST 16
+#define        MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE_LBN 15
+#define        MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE_WIDTH 3
+#define          MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_1M 0x0 /* enum */
+#define          MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_512K 0x1 /* enum */
+#define          MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_256K 0x2 /* enum */
+#define          MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_128K 0x3 /* enum */
+#define          MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_64K 0x4 /* enum */
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_OFST 16
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_LBN 18
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_WIDTH 1
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_FORCE_EV_MERGING_OFST 16
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_FORCE_EV_MERGING_LBN 19
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_FORCE_EV_MERGING_WIDTH 1
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_NO_CONT_EV_OFST 16
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_NO_CONT_EV_LBN 20
+#define        MC_CMD_INIT_RXQ_EXT_IN_FLAG_NO_CONT_EV_WIDTH 1
+/* Owner ID to use if in buffer mode (zero if physical) */
+#define       MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_OFST 20
+#define       MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_LEN 4
+/* The port ID associated with the v-adaptor which should contain this DMAQ. */
+#define       MC_CMD_INIT_RXQ_EXT_IN_PORT_ID_OFST 24
+#define       MC_CMD_INIT_RXQ_EXT_IN_PORT_ID_LEN 4
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define       MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_OFST 28
+#define       MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LEN 8
+#define       MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_LO_OFST 28
+#define       MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_HI_OFST 32
+#define       MC_CMD_INIT_RXQ_EXT_IN_DMA_ADDR_NUM 64
+/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */
+#define       MC_CMD_INIT_RXQ_EXT_IN_SNAPSHOT_LENGTH_OFST 540
+#define       MC_CMD_INIT_RXQ_EXT_IN_SNAPSHOT_LENGTH_LEN 4
+
+/* MC_CMD_INIT_RXQ_V3_IN msgrequest */
+#define    MC_CMD_INIT_RXQ_V3_IN_LEN 560
+/* Size, in entries */
+#define       MC_CMD_INIT_RXQ_V3_IN_SIZE_OFST 0
+#define       MC_CMD_INIT_RXQ_V3_IN_SIZE_LEN 4
+/* The EVQ to send events to. This is an index originally specified to
+ * INIT_EVQ. If DMA_MODE == PACKED_STREAM this must be equal to INSTANCE.
+ */
+#define       MC_CMD_INIT_RXQ_V3_IN_TARGET_EVQ_OFST 4
+#define       MC_CMD_INIT_RXQ_V3_IN_TARGET_EVQ_LEN 4
+/* The value to put in the event data. Check hardware spec. for valid range.
+ * This field is ignored if DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER or DMA_MODE
+ * == PACKED_STREAM.
+ */
+#define       MC_CMD_INIT_RXQ_V3_IN_LABEL_OFST 8
+#define       MC_CMD_INIT_RXQ_V3_IN_LABEL_LEN 4
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define       MC_CMD_INIT_RXQ_V3_IN_INSTANCE_OFST 12
+#define       MC_CMD_INIT_RXQ_V3_IN_INSTANCE_LEN 4
+/* There will be more flags here. */
+#define       MC_CMD_INIT_RXQ_V3_IN_FLAGS_OFST 16
+#define       MC_CMD_INIT_RXQ_V3_IN_FLAGS_LEN 4
+#define        MC_CMD_INIT_RXQ_V3_IN_FLAG_BUFF_MODE_OFST 16
+#define        MC_CMD_INIT_RXQ_V3_IN_FLAG_BUFF_MODE_LBN 0
+#define        MC_CMD_INIT_RXQ_V3_IN_FLAG_BUFF_MODE_WIDTH 1
+#define        MC_CMD_INIT_RXQ_V3_IN_FLAG_HDR_SPLIT_OFST 16
+#define        MC_CMD_INIT_RXQ_V3_IN_FLAG_HDR_SPLIT_LBN 1
+#define        MC_CMD_INIT_RXQ_V3_IN_FLAG_HDR_SPLIT_WIDTH 1
+#define        MC_CMD_INIT_RXQ_V3_IN_FLAG_TIMESTAMP_OFST 16
+#define        MC_CMD_INIT_RXQ_V3_IN_FLAG_TIMESTAMP_LBN 2
+#define        MC_CMD_INIT_RXQ_V3_IN_FLAG_TIMESTAMP_WIDTH 1
+#define        MC_CMD_INIT_RXQ_V3_IN_CRC_MODE_OFST 16
+#define        MC_CMD_INIT_RXQ_V3_IN_CRC_MODE_LBN 3
+#define        MC_CMD_INIT_RXQ_V3_IN_CRC_MODE_WIDTH 4
+#define        MC_CMD_INIT_RXQ_V3_IN_FLAG_CHAIN_OFST 16
+#define        MC_CMD_INIT_RXQ_V3_IN_FLAG_CHAIN_LBN 7
+#define        MC_CMD_INIT_RXQ_V3_IN_FLAG_CHAIN_WIDTH 1
+#define        MC_CMD_INIT_RXQ_V3_IN_FLAG_PREFIX_OFST 16
+#define        MC_CMD_INIT_RXQ_V3_IN_FLAG_PREFIX_LBN 8
+#define        MC_CMD_INIT_RXQ_V3_IN_FLAG_PREFIX_WIDTH 1
+#define        MC_CMD_INIT_RXQ_V3_IN_FLAG_DISABLE_SCATTER_OFST 16
+#define        MC_CMD_INIT_RXQ_V3_IN_FLAG_DISABLE_SCATTER_LBN 9
+#define        MC_CMD_INIT_RXQ_V3_IN_FLAG_DISABLE_SCATTER_WIDTH 1
+#define        MC_CMD_INIT_RXQ_V3_IN_DMA_MODE_OFST 16
+#define        MC_CMD_INIT_RXQ_V3_IN_DMA_MODE_LBN 10
+#define        MC_CMD_INIT_RXQ_V3_IN_DMA_MODE_WIDTH 4
+/* enum: One packet per descriptor (for normal networking) */
+#define          MC_CMD_INIT_RXQ_V3_IN_SINGLE_PACKET 0x0
+/* enum: Pack multiple packets into large descriptors (for SolarCapture) */
+#define          MC_CMD_INIT_RXQ_V3_IN_PACKED_STREAM 0x1
+/* enum: Pack multiple packets into large descriptors using the format designed
+ * to maximise packet rate. This mode uses 1 "bucket" per descriptor with
+ * multiple fixed-size packet buffers within each bucket. For a full
+ * description see SF-119419-TC. This mode is only supported by "dpdk" datapath
+ * firmware.
+ */
+#define          MC_CMD_INIT_RXQ_V3_IN_EQUAL_STRIDE_SUPER_BUFFER 0x2
+/* enum: Deprecated name for EQUAL_STRIDE_SUPER_BUFFER. */
+#define          MC_CMD_INIT_RXQ_V3_IN_EQUAL_STRIDE_PACKED_STREAM 0x2
+#define        MC_CMD_INIT_RXQ_V3_IN_FLAG_SNAPSHOT_MODE_OFST 16
+#define        MC_CMD_INIT_RXQ_V3_IN_FLAG_SNAPSHOT_MODE_LBN 14
+#define        MC_CMD_INIT_RXQ_V3_IN_FLAG_SNAPSHOT_MODE_WIDTH 1
+#define        MC_CMD_INIT_RXQ_V3_IN_PACKED_STREAM_BUFF_SIZE_OFST 16
+#define        MC_CMD_INIT_RXQ_V3_IN_PACKED_STREAM_BUFF_SIZE_LBN 15
+#define        MC_CMD_INIT_RXQ_V3_IN_PACKED_STREAM_BUFF_SIZE_WIDTH 3
+#define          MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_1M 0x0 /* enum */
+#define          MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_512K 0x1 /* enum */
+#define          MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_256K 0x2 /* enum */
+#define          MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_128K 0x3 /* enum */
+#define          MC_CMD_INIT_RXQ_V3_IN_PS_BUFF_64K 0x4 /* enum */
+#define        MC_CMD_INIT_RXQ_V3_IN_FLAG_WANT_OUTER_CLASSES_OFST 16
+#define        MC_CMD_INIT_RXQ_V3_IN_FLAG_WANT_OUTER_CLASSES_LBN 18
+#define        MC_CMD_INIT_RXQ_V3_IN_FLAG_WANT_OUTER_CLASSES_WIDTH 1
+#define        MC_CMD_INIT_RXQ_V3_IN_FLAG_FORCE_EV_MERGING_OFST 16
+#define        MC_CMD_INIT_RXQ_V3_IN_FLAG_FORCE_EV_MERGING_LBN 19
+#define        MC_CMD_INIT_RXQ_V3_IN_FLAG_FORCE_EV_MERGING_WIDTH 1
+#define        MC_CMD_INIT_RXQ_V3_IN_FLAG_NO_CONT_EV_OFST 16
+#define        MC_CMD_INIT_RXQ_V3_IN_FLAG_NO_CONT_EV_LBN 20
+#define        MC_CMD_INIT_RXQ_V3_IN_FLAG_NO_CONT_EV_WIDTH 1
+/* Owner ID to use if in buffer mode (zero if physical) */
+#define       MC_CMD_INIT_RXQ_V3_IN_OWNER_ID_OFST 20
+#define       MC_CMD_INIT_RXQ_V3_IN_OWNER_ID_LEN 4
+/* The port ID associated with the v-adaptor which should contain this DMAQ. */
+#define       MC_CMD_INIT_RXQ_V3_IN_PORT_ID_OFST 24
+#define       MC_CMD_INIT_RXQ_V3_IN_PORT_ID_LEN 4
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define       MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_OFST 28
+#define       MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_LEN 8
+#define       MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_LO_OFST 28
+#define       MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_HI_OFST 32
+#define       MC_CMD_INIT_RXQ_V3_IN_DMA_ADDR_NUM 64
+/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */
+#define       MC_CMD_INIT_RXQ_V3_IN_SNAPSHOT_LENGTH_OFST 540
+#define       MC_CMD_INIT_RXQ_V3_IN_SNAPSHOT_LENGTH_LEN 4
+/* The number of packet buffers that will be contained within each
+ * EQUAL_STRIDE_SUPER_BUFFER format bucket supplied by the driver. This field
+ * is ignored unless DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER.
+ */
+#define       MC_CMD_INIT_RXQ_V3_IN_ES_PACKET_BUFFERS_PER_BUCKET_OFST 544
+#define       MC_CMD_INIT_RXQ_V3_IN_ES_PACKET_BUFFERS_PER_BUCKET_LEN 4
+/* The length in bytes of the area in each packet buffer that can be written to
+ * by the adapter. This is used to store the packet prefix and the packet
+ * payload. This length does not include any end padding added by the driver.
+ * This field is ignored unless DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER.
+ */
+#define       MC_CMD_INIT_RXQ_V3_IN_ES_MAX_DMA_LEN_OFST 548
+#define       MC_CMD_INIT_RXQ_V3_IN_ES_MAX_DMA_LEN_LEN 4
+/* The length in bytes of a single packet buffer within a
+ * EQUAL_STRIDE_SUPER_BUFFER format bucket. This field is ignored unless
+ * DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER.
+ */
+#define       MC_CMD_INIT_RXQ_V3_IN_ES_PACKET_STRIDE_OFST 552
+#define       MC_CMD_INIT_RXQ_V3_IN_ES_PACKET_STRIDE_LEN 4
+/* The maximum time in nanoseconds that the datapath will be backpressured if
+ * there are no RX descriptors available. If the timeout is reached and there
+ * are still no descriptors then the packet will be dropped. A timeout of 0
+ * means the datapath will never be blocked. This field is ignored unless
+ * DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER.
+ */
+#define       MC_CMD_INIT_RXQ_V3_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT_OFST 556
+#define       MC_CMD_INIT_RXQ_V3_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT_LEN 4
+
+/* MC_CMD_INIT_RXQ_V4_IN msgrequest: INIT_RXQ request with new field required
+ * for systems with a QDMA (currently, Riverhead)
+ */
+#define    MC_CMD_INIT_RXQ_V4_IN_LEN 564
+/* Size, in entries */
+#define       MC_CMD_INIT_RXQ_V4_IN_SIZE_OFST 0
+#define       MC_CMD_INIT_RXQ_V4_IN_SIZE_LEN 4
+/* The EVQ to send events to. This is an index originally specified to
+ * INIT_EVQ. If DMA_MODE == PACKED_STREAM this must be equal to INSTANCE.
+ */
+#define       MC_CMD_INIT_RXQ_V4_IN_TARGET_EVQ_OFST 4
+#define       MC_CMD_INIT_RXQ_V4_IN_TARGET_EVQ_LEN 4
+/* The value to put in the event data. Check hardware spec. for valid range.
+ * This field is ignored if DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER or DMA_MODE
+ * == PACKED_STREAM.
+ */
+#define       MC_CMD_INIT_RXQ_V4_IN_LABEL_OFST 8
+#define       MC_CMD_INIT_RXQ_V4_IN_LABEL_LEN 4
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define       MC_CMD_INIT_RXQ_V4_IN_INSTANCE_OFST 12
+#define       MC_CMD_INIT_RXQ_V4_IN_INSTANCE_LEN 4
+/* There will be more flags here. */
+#define       MC_CMD_INIT_RXQ_V4_IN_FLAGS_OFST 16
+#define       MC_CMD_INIT_RXQ_V4_IN_FLAGS_LEN 4
+#define        MC_CMD_INIT_RXQ_V4_IN_FLAG_BUFF_MODE_OFST 16
+#define        MC_CMD_INIT_RXQ_V4_IN_FLAG_BUFF_MODE_LBN 0
+#define        MC_CMD_INIT_RXQ_V4_IN_FLAG_BUFF_MODE_WIDTH 1
+#define        MC_CMD_INIT_RXQ_V4_IN_FLAG_HDR_SPLIT_OFST 16
+#define        MC_CMD_INIT_RXQ_V4_IN_FLAG_HDR_SPLIT_LBN 1
+#define        MC_CMD_INIT_RXQ_V4_IN_FLAG_HDR_SPLIT_WIDTH 1
+#define        MC_CMD_INIT_RXQ_V4_IN_FLAG_TIMESTAMP_OFST 16
+#define        MC_CMD_INIT_RXQ_V4_IN_FLAG_TIMESTAMP_LBN 2
+#define        MC_CMD_INIT_RXQ_V4_IN_FLAG_TIMESTAMP_WIDTH 1
+#define        MC_CMD_INIT_RXQ_V4_IN_CRC_MODE_OFST 16
+#define        MC_CMD_INIT_RXQ_V4_IN_CRC_MODE_LBN 3
+#define        MC_CMD_INIT_RXQ_V4_IN_CRC_MODE_WIDTH 4
+#define        MC_CMD_INIT_RXQ_V4_IN_FLAG_CHAIN_OFST 16
+#define        MC_CMD_INIT_RXQ_V4_IN_FLAG_CHAIN_LBN 7
+#define        MC_CMD_INIT_RXQ_V4_IN_FLAG_CHAIN_WIDTH 1
+#define        MC_CMD_INIT_RXQ_V4_IN_FLAG_PREFIX_OFST 16
+#define        MC_CMD_INIT_RXQ_V4_IN_FLAG_PREFIX_LBN 8
+#define        MC_CMD_INIT_RXQ_V4_IN_FLAG_PREFIX_WIDTH 1
+#define        MC_CMD_INIT_RXQ_V4_IN_FLAG_DISABLE_SCATTER_OFST 16
+#define        MC_CMD_INIT_RXQ_V4_IN_FLAG_DISABLE_SCATTER_LBN 9
+#define        MC_CMD_INIT_RXQ_V4_IN_FLAG_DISABLE_SCATTER_WIDTH 1
+#define        MC_CMD_INIT_RXQ_V4_IN_DMA_MODE_OFST 16
+#define        MC_CMD_INIT_RXQ_V4_IN_DMA_MODE_LBN 10
+#define        MC_CMD_INIT_RXQ_V4_IN_DMA_MODE_WIDTH 4
+/* enum: One packet per descriptor (for normal networking) */
+#define          MC_CMD_INIT_RXQ_V4_IN_SINGLE_PACKET 0x0
+/* enum: Pack multiple packets into large descriptors (for SolarCapture) */
+#define          MC_CMD_INIT_RXQ_V4_IN_PACKED_STREAM 0x1
+/* enum: Pack multiple packets into large descriptors using the format designed
+ * to maximise packet rate. This mode uses 1 "bucket" per descriptor with
+ * multiple fixed-size packet buffers within each bucket. For a full
+ * description see SF-119419-TC. This mode is only supported by "dpdk" datapath
+ * firmware.
+ */
+#define          MC_CMD_INIT_RXQ_V4_IN_EQUAL_STRIDE_SUPER_BUFFER 0x2
+/* enum: Deprecated name for EQUAL_STRIDE_SUPER_BUFFER. */
+#define          MC_CMD_INIT_RXQ_V4_IN_EQUAL_STRIDE_PACKED_STREAM 0x2
+#define        MC_CMD_INIT_RXQ_V4_IN_FLAG_SNAPSHOT_MODE_OFST 16
+#define        MC_CMD_INIT_RXQ_V4_IN_FLAG_SNAPSHOT_MODE_LBN 14
+#define        MC_CMD_INIT_RXQ_V4_IN_FLAG_SNAPSHOT_MODE_WIDTH 1
+#define        MC_CMD_INIT_RXQ_V4_IN_PACKED_STREAM_BUFF_SIZE_OFST 16
+#define        MC_CMD_INIT_RXQ_V4_IN_PACKED_STREAM_BUFF_SIZE_LBN 15
+#define        MC_CMD_INIT_RXQ_V4_IN_PACKED_STREAM_BUFF_SIZE_WIDTH 3
+#define          MC_CMD_INIT_RXQ_V4_IN_PS_BUFF_1M 0x0 /* enum */
+#define          MC_CMD_INIT_RXQ_V4_IN_PS_BUFF_512K 0x1 /* enum */
+#define          MC_CMD_INIT_RXQ_V4_IN_PS_BUFF_256K 0x2 /* enum */
+#define          MC_CMD_INIT_RXQ_V4_IN_PS_BUFF_128K 0x3 /* enum */
+#define          MC_CMD_INIT_RXQ_V4_IN_PS_BUFF_64K 0x4 /* enum */
+#define        MC_CMD_INIT_RXQ_V4_IN_FLAG_WANT_OUTER_CLASSES_OFST 16
+#define        MC_CMD_INIT_RXQ_V4_IN_FLAG_WANT_OUTER_CLASSES_LBN 18
+#define        MC_CMD_INIT_RXQ_V4_IN_FLAG_WANT_OUTER_CLASSES_WIDTH 1
+#define        MC_CMD_INIT_RXQ_V4_IN_FLAG_FORCE_EV_MERGING_OFST 16
+#define        MC_CMD_INIT_RXQ_V4_IN_FLAG_FORCE_EV_MERGING_LBN 19
+#define        MC_CMD_INIT_RXQ_V4_IN_FLAG_FORCE_EV_MERGING_WIDTH 1
+#define        MC_CMD_INIT_RXQ_V4_IN_FLAG_NO_CONT_EV_OFST 16
+#define        MC_CMD_INIT_RXQ_V4_IN_FLAG_NO_CONT_EV_LBN 20
+#define        MC_CMD_INIT_RXQ_V4_IN_FLAG_NO_CONT_EV_WIDTH 1
+/* Owner ID to use if in buffer mode (zero if physical) */
+#define       MC_CMD_INIT_RXQ_V4_IN_OWNER_ID_OFST 20
+#define       MC_CMD_INIT_RXQ_V4_IN_OWNER_ID_LEN 4
+/* The port ID associated with the v-adaptor which should contain this DMAQ. */
+#define       MC_CMD_INIT_RXQ_V4_IN_PORT_ID_OFST 24
+#define       MC_CMD_INIT_RXQ_V4_IN_PORT_ID_LEN 4
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define       MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_OFST 28
+#define       MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_LEN 8
+#define       MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_LO_OFST 28
+#define       MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_HI_OFST 32
+#define       MC_CMD_INIT_RXQ_V4_IN_DMA_ADDR_NUM 64
+/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */
+#define       MC_CMD_INIT_RXQ_V4_IN_SNAPSHOT_LENGTH_OFST 540
+#define       MC_CMD_INIT_RXQ_V4_IN_SNAPSHOT_LENGTH_LEN 4
+/* The number of packet buffers that will be contained within each
+ * EQUAL_STRIDE_SUPER_BUFFER format bucket supplied by the driver. This field
+ * is ignored unless DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER.
+ */
+#define       MC_CMD_INIT_RXQ_V4_IN_ES_PACKET_BUFFERS_PER_BUCKET_OFST 544
+#define       MC_CMD_INIT_RXQ_V4_IN_ES_PACKET_BUFFERS_PER_BUCKET_LEN 4
+/* The length in bytes of the area in each packet buffer that can be written to
+ * by the adapter. This is used to store the packet prefix and the packet
+ * payload. This length does not include any end padding added by the driver.
+ * This field is ignored unless DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER.
+ */
+#define       MC_CMD_INIT_RXQ_V4_IN_ES_MAX_DMA_LEN_OFST 548
+#define       MC_CMD_INIT_RXQ_V4_IN_ES_MAX_DMA_LEN_LEN 4
+/* The length in bytes of a single packet buffer within a
+ * EQUAL_STRIDE_SUPER_BUFFER format bucket. This field is ignored unless
+ * DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER.
+ */
+#define       MC_CMD_INIT_RXQ_V4_IN_ES_PACKET_STRIDE_OFST 552
+#define       MC_CMD_INIT_RXQ_V4_IN_ES_PACKET_STRIDE_LEN 4
+/* The maximum time in nanoseconds that the datapath will be backpressured if
+ * there are no RX descriptors available. If the timeout is reached and there
+ * are still no descriptors then the packet will be dropped. A timeout of 0
+ * means the datapath will never be blocked. This field is ignored unless
+ * DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER.
+ */
+#define       MC_CMD_INIT_RXQ_V4_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT_OFST 556
+#define       MC_CMD_INIT_RXQ_V4_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT_LEN 4
+/* V4 message data */
+#define       MC_CMD_INIT_RXQ_V4_IN_V4_DATA_OFST 560
+#define       MC_CMD_INIT_RXQ_V4_IN_V4_DATA_LEN 4
+/* Size in bytes of buffers attached to descriptors posted to this queue. Set
+ * to zero if using this message on non-QDMA based platforms. Currently in
+ * Riverhead there is a global limit of eight different buffer sizes across all
+ * active queues. A 2KB and 4KB buffer is guaranteed to be available, but a
+ * request for a different buffer size will fail if there are already eight
+ * other buffer sizes in use. In future Riverhead this limit will go away and
+ * any size will be accepted.
+ */
+#define       MC_CMD_INIT_RXQ_V4_IN_BUFFER_SIZE_BYTES_OFST 560
+#define       MC_CMD_INIT_RXQ_V4_IN_BUFFER_SIZE_BYTES_LEN 4
+
+/* MC_CMD_INIT_RXQ_V5_IN msgrequest: INIT_RXQ request with ability to request a
+ * different RX packet prefix
+ */
+#define    MC_CMD_INIT_RXQ_V5_IN_LEN 568
+/* Size, in entries */
+#define       MC_CMD_INIT_RXQ_V5_IN_SIZE_OFST 0
+#define       MC_CMD_INIT_RXQ_V5_IN_SIZE_LEN 4
+/* The EVQ to send events to. This is an index originally specified to
+ * INIT_EVQ. If DMA_MODE == PACKED_STREAM this must be equal to INSTANCE.
+ */
+#define       MC_CMD_INIT_RXQ_V5_IN_TARGET_EVQ_OFST 4
+#define       MC_CMD_INIT_RXQ_V5_IN_TARGET_EVQ_LEN 4
+/* The value to put in the event data. Check hardware spec. for valid range.
+ * This field is ignored if DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER or DMA_MODE
+ * == PACKED_STREAM.
+ */
+#define       MC_CMD_INIT_RXQ_V5_IN_LABEL_OFST 8
+#define       MC_CMD_INIT_RXQ_V5_IN_LABEL_LEN 4
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define       MC_CMD_INIT_RXQ_V5_IN_INSTANCE_OFST 12
+#define       MC_CMD_INIT_RXQ_V5_IN_INSTANCE_LEN 4
+/* There will be more flags here. */
+#define       MC_CMD_INIT_RXQ_V5_IN_FLAGS_OFST 16
+#define       MC_CMD_INIT_RXQ_V5_IN_FLAGS_LEN 4
+#define        MC_CMD_INIT_RXQ_V5_IN_FLAG_BUFF_MODE_OFST 16
+#define        MC_CMD_INIT_RXQ_V5_IN_FLAG_BUFF_MODE_LBN 0
+#define        MC_CMD_INIT_RXQ_V5_IN_FLAG_BUFF_MODE_WIDTH 1
+#define        MC_CMD_INIT_RXQ_V5_IN_FLAG_HDR_SPLIT_OFST 16
+#define        MC_CMD_INIT_RXQ_V5_IN_FLAG_HDR_SPLIT_LBN 1
+#define        MC_CMD_INIT_RXQ_V5_IN_FLAG_HDR_SPLIT_WIDTH 1
+#define        MC_CMD_INIT_RXQ_V5_IN_FLAG_TIMESTAMP_OFST 16
+#define        MC_CMD_INIT_RXQ_V5_IN_FLAG_TIMESTAMP_LBN 2
+#define        MC_CMD_INIT_RXQ_V5_IN_FLAG_TIMESTAMP_WIDTH 1
+#define        MC_CMD_INIT_RXQ_V5_IN_CRC_MODE_OFST 16
+#define        MC_CMD_INIT_RXQ_V5_IN_CRC_MODE_LBN 3
+#define        MC_CMD_INIT_RXQ_V5_IN_CRC_MODE_WIDTH 4
+#define        MC_CMD_INIT_RXQ_V5_IN_FLAG_CHAIN_OFST 16
+#define        MC_CMD_INIT_RXQ_V5_IN_FLAG_CHAIN_LBN 7
+#define        MC_CMD_INIT_RXQ_V5_IN_FLAG_CHAIN_WIDTH 1
+#define        MC_CMD_INIT_RXQ_V5_IN_FLAG_PREFIX_OFST 16
+#define        MC_CMD_INIT_RXQ_V5_IN_FLAG_PREFIX_LBN 8
+#define        MC_CMD_INIT_RXQ_V5_IN_FLAG_PREFIX_WIDTH 1
+#define        MC_CMD_INIT_RXQ_V5_IN_FLAG_DISABLE_SCATTER_OFST 16
+#define        MC_CMD_INIT_RXQ_V5_IN_FLAG_DISABLE_SCATTER_LBN 9
+#define        MC_CMD_INIT_RXQ_V5_IN_FLAG_DISABLE_SCATTER_WIDTH 1
+#define        MC_CMD_INIT_RXQ_V5_IN_DMA_MODE_OFST 16
+#define        MC_CMD_INIT_RXQ_V5_IN_DMA_MODE_LBN 10
+#define        MC_CMD_INIT_RXQ_V5_IN_DMA_MODE_WIDTH 4
+/* enum: One packet per descriptor (for normal networking) */
+#define          MC_CMD_INIT_RXQ_V5_IN_SINGLE_PACKET 0x0
+/* enum: Pack multiple packets into large descriptors (for SolarCapture) */
+#define          MC_CMD_INIT_RXQ_V5_IN_PACKED_STREAM 0x1
+/* enum: Pack multiple packets into large descriptors using the format designed
+ * to maximise packet rate. This mode uses 1 "bucket" per descriptor with
+ * multiple fixed-size packet buffers within each bucket. For a full
+ * description see SF-119419-TC. This mode is only supported by "dpdk" datapath
+ * firmware.
+ */
+#define          MC_CMD_INIT_RXQ_V5_IN_EQUAL_STRIDE_SUPER_BUFFER 0x2
+/* enum: Deprecated name for EQUAL_STRIDE_SUPER_BUFFER. */
+#define          MC_CMD_INIT_RXQ_V5_IN_EQUAL_STRIDE_PACKED_STREAM 0x2
+#define        MC_CMD_INIT_RXQ_V5_IN_FLAG_SNAPSHOT_MODE_OFST 16
+#define        MC_CMD_INIT_RXQ_V5_IN_FLAG_SNAPSHOT_MODE_LBN 14
+#define        MC_CMD_INIT_RXQ_V5_IN_FLAG_SNAPSHOT_MODE_WIDTH 1
+#define        MC_CMD_INIT_RXQ_V5_IN_PACKED_STREAM_BUFF_SIZE_OFST 16
+#define        MC_CMD_INIT_RXQ_V5_IN_PACKED_STREAM_BUFF_SIZE_LBN 15
+#define        MC_CMD_INIT_RXQ_V5_IN_PACKED_STREAM_BUFF_SIZE_WIDTH 3
+#define          MC_CMD_INIT_RXQ_V5_IN_PS_BUFF_1M 0x0 /* enum */
+#define          MC_CMD_INIT_RXQ_V5_IN_PS_BUFF_512K 0x1 /* enum */
+#define          MC_CMD_INIT_RXQ_V5_IN_PS_BUFF_256K 0x2 /* enum */
+#define          MC_CMD_INIT_RXQ_V5_IN_PS_BUFF_128K 0x3 /* enum */
+#define          MC_CMD_INIT_RXQ_V5_IN_PS_BUFF_64K 0x4 /* enum */
+#define        MC_CMD_INIT_RXQ_V5_IN_FLAG_WANT_OUTER_CLASSES_OFST 16
+#define        MC_CMD_INIT_RXQ_V5_IN_FLAG_WANT_OUTER_CLASSES_LBN 18
+#define        MC_CMD_INIT_RXQ_V5_IN_FLAG_WANT_OUTER_CLASSES_WIDTH 1
+#define        MC_CMD_INIT_RXQ_V5_IN_FLAG_FORCE_EV_MERGING_OFST 16
+#define        MC_CMD_INIT_RXQ_V5_IN_FLAG_FORCE_EV_MERGING_LBN 19
+#define        MC_CMD_INIT_RXQ_V5_IN_FLAG_FORCE_EV_MERGING_WIDTH 1
+#define        MC_CMD_INIT_RXQ_V5_IN_FLAG_NO_CONT_EV_OFST 16
+#define        MC_CMD_INIT_RXQ_V5_IN_FLAG_NO_CONT_EV_LBN 20
+#define        MC_CMD_INIT_RXQ_V5_IN_FLAG_NO_CONT_EV_WIDTH 1
+/* Owner ID to use if in buffer mode (zero if physical) */
+#define       MC_CMD_INIT_RXQ_V5_IN_OWNER_ID_OFST 20
+#define       MC_CMD_INIT_RXQ_V5_IN_OWNER_ID_LEN 4
+/* The port ID associated with the v-adaptor which should contain this DMAQ. */
+#define       MC_CMD_INIT_RXQ_V5_IN_PORT_ID_OFST 24
+#define       MC_CMD_INIT_RXQ_V5_IN_PORT_ID_LEN 4
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define       MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_OFST 28
+#define       MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_LEN 8
+#define       MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_LO_OFST 28
+#define       MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_HI_OFST 32
+#define       MC_CMD_INIT_RXQ_V5_IN_DMA_ADDR_NUM 64
+/* Maximum length of packet to receive, if SNAPSHOT_MODE flag is set */
+#define       MC_CMD_INIT_RXQ_V5_IN_SNAPSHOT_LENGTH_OFST 540
+#define       MC_CMD_INIT_RXQ_V5_IN_SNAPSHOT_LENGTH_LEN 4
+/* The number of packet buffers that will be contained within each
+ * EQUAL_STRIDE_SUPER_BUFFER format bucket supplied by the driver. This field
+ * is ignored unless DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER.
+ */
+#define       MC_CMD_INIT_RXQ_V5_IN_ES_PACKET_BUFFERS_PER_BUCKET_OFST 544
+#define       MC_CMD_INIT_RXQ_V5_IN_ES_PACKET_BUFFERS_PER_BUCKET_LEN 4
+/* The length in bytes of the area in each packet buffer that can be written to
+ * by the adapter. This is used to store the packet prefix and the packet
+ * payload. This length does not include any end padding added by the driver.
+ * This field is ignored unless DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER.
+ */
+#define       MC_CMD_INIT_RXQ_V5_IN_ES_MAX_DMA_LEN_OFST 548
+#define       MC_CMD_INIT_RXQ_V5_IN_ES_MAX_DMA_LEN_LEN 4
+/* The length in bytes of a single packet buffer within a
+ * EQUAL_STRIDE_SUPER_BUFFER format bucket. This field is ignored unless
+ * DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER.
+ */
+#define       MC_CMD_INIT_RXQ_V5_IN_ES_PACKET_STRIDE_OFST 552
+#define       MC_CMD_INIT_RXQ_V5_IN_ES_PACKET_STRIDE_LEN 4
+/* The maximum time in nanoseconds that the datapath will be backpressured if
+ * there are no RX descriptors available. If the timeout is reached and there
+ * are still no descriptors then the packet will be dropped. A timeout of 0
+ * means the datapath will never be blocked. This field is ignored unless
+ * DMA_MODE == EQUAL_STRIDE_SUPER_BUFFER.
+ */
+#define       MC_CMD_INIT_RXQ_V5_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT_OFST 556
+#define       MC_CMD_INIT_RXQ_V5_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT_LEN 4
+/* V4 message data */
+#define       MC_CMD_INIT_RXQ_V5_IN_V4_DATA_OFST 560
+#define       MC_CMD_INIT_RXQ_V5_IN_V4_DATA_LEN 4
+/* Size in bytes of buffers attached to descriptors posted to this queue. Set
+ * to zero if using this message on non-QDMA based platforms. Currently in
+ * Riverhead there is a global limit of eight different buffer sizes across all
+ * active queues. A 2KB and 4KB buffer is guaranteed to be available, but a
+ * request for a different buffer size will fail if there are already eight
+ * other buffer sizes in use. In future Riverhead this limit will go away and
+ * any size will be accepted.
+ */
+#define       MC_CMD_INIT_RXQ_V5_IN_BUFFER_SIZE_BYTES_OFST 560
+#define       MC_CMD_INIT_RXQ_V5_IN_BUFFER_SIZE_BYTES_LEN 4
+/* Prefix id for the RX prefix format to use on packets delivered this queue.
+ * Zero is always a valid prefix id and means the default prefix format
+ * documented for the platform. Other prefix ids can be obtained by calling
+ * MC_CMD_GET_RX_PREFIX_ID with a requested set of prefix fields.
+ */
+#define       MC_CMD_INIT_RXQ_V5_IN_RX_PREFIX_ID_OFST 564
+#define       MC_CMD_INIT_RXQ_V5_IN_RX_PREFIX_ID_LEN 4
+
+/* MC_CMD_INIT_RXQ_OUT msgresponse */
+#define    MC_CMD_INIT_RXQ_OUT_LEN 0
+
+/* MC_CMD_INIT_RXQ_EXT_OUT msgresponse */
+#define    MC_CMD_INIT_RXQ_EXT_OUT_LEN 0
+
+/* MC_CMD_INIT_RXQ_V3_OUT msgresponse */
+#define    MC_CMD_INIT_RXQ_V3_OUT_LEN 0
+
+/* MC_CMD_INIT_RXQ_V4_OUT msgresponse */
+#define    MC_CMD_INIT_RXQ_V4_OUT_LEN 0
+
+/* MC_CMD_INIT_RXQ_V5_OUT msgresponse */
+#define    MC_CMD_INIT_RXQ_V5_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_INIT_TXQ
+ */
+#define MC_CMD_INIT_TXQ 0x82
+#undef MC_CMD_0x82_PRIVILEGE_CTG
+
+#define MC_CMD_0x82_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_INIT_TXQ_IN msgrequest: Legacy INIT_TXQ request. Use extended version
+ * in new code.
+ */
+#define    MC_CMD_INIT_TXQ_IN_LENMIN 36
+#define    MC_CMD_INIT_TXQ_IN_LENMAX 252
+#define    MC_CMD_INIT_TXQ_IN_LENMAX_MCDI2 1020
+#define    MC_CMD_INIT_TXQ_IN_LEN(num) (28+8*(num))
+#define    MC_CMD_INIT_TXQ_IN_DMA_ADDR_NUM(len) (((len)-28)/8)
+/* Size, in entries */
+#define       MC_CMD_INIT_TXQ_IN_SIZE_OFST 0
+#define       MC_CMD_INIT_TXQ_IN_SIZE_LEN 4
+/* The EVQ to send events to. This is an index originally specified to
+ * INIT_EVQ.
+ */
+#define       MC_CMD_INIT_TXQ_IN_TARGET_EVQ_OFST 4
+#define       MC_CMD_INIT_TXQ_IN_TARGET_EVQ_LEN 4
+/* The value to put in the event data. Check hardware spec. for valid range. */
+#define       MC_CMD_INIT_TXQ_IN_LABEL_OFST 8
+#define       MC_CMD_INIT_TXQ_IN_LABEL_LEN 4
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define       MC_CMD_INIT_TXQ_IN_INSTANCE_OFST 12
+#define       MC_CMD_INIT_TXQ_IN_INSTANCE_LEN 4
+/* There will be more flags here. */
+#define       MC_CMD_INIT_TXQ_IN_FLAGS_OFST 16
+#define       MC_CMD_INIT_TXQ_IN_FLAGS_LEN 4
+#define        MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_OFST 16
+#define        MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_LBN 0
+#define        MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_WIDTH 1
+#define        MC_CMD_INIT_TXQ_IN_FLAG_IP_CSUM_DIS_OFST 16
+#define        MC_CMD_INIT_TXQ_IN_FLAG_IP_CSUM_DIS_LBN 1
+#define        MC_CMD_INIT_TXQ_IN_FLAG_IP_CSUM_DIS_WIDTH 1
+#define        MC_CMD_INIT_TXQ_IN_FLAG_TCP_CSUM_DIS_OFST 16
+#define        MC_CMD_INIT_TXQ_IN_FLAG_TCP_CSUM_DIS_LBN 2
+#define        MC_CMD_INIT_TXQ_IN_FLAG_TCP_CSUM_DIS_WIDTH 1
+#define        MC_CMD_INIT_TXQ_IN_FLAG_TCP_UDP_ONLY_OFST 16
+#define        MC_CMD_INIT_TXQ_IN_FLAG_TCP_UDP_ONLY_LBN 3
+#define        MC_CMD_INIT_TXQ_IN_FLAG_TCP_UDP_ONLY_WIDTH 1
+#define        MC_CMD_INIT_TXQ_IN_CRC_MODE_OFST 16
+#define        MC_CMD_INIT_TXQ_IN_CRC_MODE_LBN 4
+#define        MC_CMD_INIT_TXQ_IN_CRC_MODE_WIDTH 4
+#define        MC_CMD_INIT_TXQ_IN_FLAG_TIMESTAMP_OFST 16
+#define        MC_CMD_INIT_TXQ_IN_FLAG_TIMESTAMP_LBN 8
+#define        MC_CMD_INIT_TXQ_IN_FLAG_TIMESTAMP_WIDTH 1
+#define        MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_OFST 16
+#define        MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_LBN 9
+#define        MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_WIDTH 1
+#define        MC_CMD_INIT_TXQ_IN_FLAG_INNER_IP_CSUM_EN_OFST 16
+#define        MC_CMD_INIT_TXQ_IN_FLAG_INNER_IP_CSUM_EN_LBN 10
+#define        MC_CMD_INIT_TXQ_IN_FLAG_INNER_IP_CSUM_EN_WIDTH 1
+#define        MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_OFST 16
+#define        MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_LBN 11
+#define        MC_CMD_INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1
+/* Owner ID to use if in buffer mode (zero if physical) */
+#define       MC_CMD_INIT_TXQ_IN_OWNER_ID_OFST 20
+#define       MC_CMD_INIT_TXQ_IN_OWNER_ID_LEN 4
+/* The port ID associated with the v-adaptor which should contain this DMAQ. */
+#define       MC_CMD_INIT_TXQ_IN_PORT_ID_OFST 24
+#define       MC_CMD_INIT_TXQ_IN_PORT_ID_LEN 4
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define       MC_CMD_INIT_TXQ_IN_DMA_ADDR_OFST 28
+#define       MC_CMD_INIT_TXQ_IN_DMA_ADDR_LEN 8
+#define       MC_CMD_INIT_TXQ_IN_DMA_ADDR_LO_OFST 28
+#define       MC_CMD_INIT_TXQ_IN_DMA_ADDR_HI_OFST 32
+#define       MC_CMD_INIT_TXQ_IN_DMA_ADDR_MINNUM 1
+#define       MC_CMD_INIT_TXQ_IN_DMA_ADDR_MAXNUM 28
+#define       MC_CMD_INIT_TXQ_IN_DMA_ADDR_MAXNUM_MCDI2 124
+
+/* MC_CMD_INIT_TXQ_EXT_IN msgrequest: Extended INIT_TXQ with additional mode
+ * flags
+ */
+#define    MC_CMD_INIT_TXQ_EXT_IN_LEN 544
+/* Size, in entries */
+#define       MC_CMD_INIT_TXQ_EXT_IN_SIZE_OFST 0
+#define       MC_CMD_INIT_TXQ_EXT_IN_SIZE_LEN 4
+/* The EVQ to send events to. This is an index originally specified to
+ * INIT_EVQ.
+ */
+#define       MC_CMD_INIT_TXQ_EXT_IN_TARGET_EVQ_OFST 4
+#define       MC_CMD_INIT_TXQ_EXT_IN_TARGET_EVQ_LEN 4
+/* The value to put in the event data. Check hardware spec. for valid range. */
+#define       MC_CMD_INIT_TXQ_EXT_IN_LABEL_OFST 8
+#define       MC_CMD_INIT_TXQ_EXT_IN_LABEL_LEN 4
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define       MC_CMD_INIT_TXQ_EXT_IN_INSTANCE_OFST 12
+#define       MC_CMD_INIT_TXQ_EXT_IN_INSTANCE_LEN 4
+/* There will be more flags here. */
+#define       MC_CMD_INIT_TXQ_EXT_IN_FLAGS_OFST 16
+#define       MC_CMD_INIT_TXQ_EXT_IN_FLAGS_LEN 4
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_OFST 16
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_LBN 0
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_BUFF_MODE_WIDTH 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_OFST 16
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_LBN 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_IP_CSUM_DIS_WIDTH 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_CSUM_DIS_OFST 16
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_CSUM_DIS_LBN 2
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_CSUM_DIS_WIDTH 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_UDP_ONLY_OFST 16
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_UDP_ONLY_LBN 3
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_TCP_UDP_ONLY_WIDTH 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_CRC_MODE_OFST 16
+#define        MC_CMD_INIT_TXQ_EXT_IN_CRC_MODE_LBN 4
+#define        MC_CMD_INIT_TXQ_EXT_IN_CRC_MODE_WIDTH 4
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_TIMESTAMP_OFST 16
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_TIMESTAMP_LBN 8
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_TIMESTAMP_WIDTH 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_PACER_BYPASS_OFST 16
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_PACER_BYPASS_LBN 9
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_PACER_BYPASS_WIDTH 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN_OFST 16
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN_LBN 10
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN_WIDTH 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_OFST 16
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_LBN 11
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_TSOV2_EN_OFST 16
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_TSOV2_EN_LBN 12
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_TSOV2_EN_WIDTH 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_OFST 16
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_LBN 13
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_WIDTH 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_UTHRESH_OFST 16
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_UTHRESH_LBN 14
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_CTPIO_UTHRESH_WIDTH 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_M2M_D2C_OFST 16
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_M2M_D2C_LBN 15
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_M2M_D2C_WIDTH 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_DESC_PROXY_OFST 16
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_DESC_PROXY_LBN 16
+#define        MC_CMD_INIT_TXQ_EXT_IN_FLAG_DESC_PROXY_WIDTH 1
+/* Owner ID to use if in buffer mode (zero if physical) */
+#define       MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_OFST 20
+#define       MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_LEN 4
+/* The port ID associated with the v-adaptor which should contain this DMAQ. */
+#define       MC_CMD_INIT_TXQ_EXT_IN_PORT_ID_OFST 24
+#define       MC_CMD_INIT_TXQ_EXT_IN_PORT_ID_LEN 4
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define       MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_OFST 28
+#define       MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LEN 8
+#define       MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_LO_OFST 28
+#define       MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_HI_OFST 32
+#define       MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MINNUM 1
+#define       MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MAXNUM 64
+#define       MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MAXNUM_MCDI2 64
+/* Flags related to Qbb flow control mode. */
+#define       MC_CMD_INIT_TXQ_EXT_IN_QBB_FLAGS_OFST 540
+#define       MC_CMD_INIT_TXQ_EXT_IN_QBB_FLAGS_LEN 4
+#define        MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_OFST 540
+#define        MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_LBN 0
+#define        MC_CMD_INIT_TXQ_EXT_IN_QBB_ENABLE_WIDTH 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_OFST 540
+#define        MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_LBN 1
+#define        MC_CMD_INIT_TXQ_EXT_IN_QBB_PRIORITY_WIDTH 3
+
+/* MC_CMD_INIT_TXQ_OUT msgresponse */
+#define    MC_CMD_INIT_TXQ_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_FINI_EVQ
+ * Teardown an EVQ.
+ *
+ * All DMAQs or EVQs that point to the EVQ to tear down must be torn down first
+ * or the operation will fail with EBUSY
+ */
+#define MC_CMD_FINI_EVQ 0x83
+#undef MC_CMD_0x83_PRIVILEGE_CTG
+
+#define MC_CMD_0x83_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_FINI_EVQ_IN msgrequest */
+#define    MC_CMD_FINI_EVQ_IN_LEN 4
+/* Instance of EVQ to destroy. Should be the same instance as that previously
+ * passed to INIT_EVQ
+ */
+#define       MC_CMD_FINI_EVQ_IN_INSTANCE_OFST 0
+#define       MC_CMD_FINI_EVQ_IN_INSTANCE_LEN 4
+
+/* MC_CMD_FINI_EVQ_OUT msgresponse */
+#define    MC_CMD_FINI_EVQ_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_FINI_RXQ
+ * Teardown a RXQ.
+ */
+#define MC_CMD_FINI_RXQ 0x84
+#undef MC_CMD_0x84_PRIVILEGE_CTG
+
+#define MC_CMD_0x84_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_FINI_RXQ_IN msgrequest */
+#define    MC_CMD_FINI_RXQ_IN_LEN 4
+/* Instance of RXQ to destroy */
+#define       MC_CMD_FINI_RXQ_IN_INSTANCE_OFST 0
+#define       MC_CMD_FINI_RXQ_IN_INSTANCE_LEN 4
+
+/* MC_CMD_FINI_RXQ_OUT msgresponse */
+#define    MC_CMD_FINI_RXQ_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_FINI_TXQ
+ * Teardown a TXQ.
+ */
+#define MC_CMD_FINI_TXQ 0x85
+#undef MC_CMD_0x85_PRIVILEGE_CTG
+
+#define MC_CMD_0x85_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_FINI_TXQ_IN msgrequest */
+#define    MC_CMD_FINI_TXQ_IN_LEN 4
+/* Instance of TXQ to destroy */
+#define       MC_CMD_FINI_TXQ_IN_INSTANCE_OFST 0
+#define       MC_CMD_FINI_TXQ_IN_INSTANCE_LEN 4
+
+/* MC_CMD_FINI_TXQ_OUT msgresponse */
+#define    MC_CMD_FINI_TXQ_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_DRIVER_EVENT
+ * Generate an event on an EVQ belonging to the function issuing the command.
+ */
+#define MC_CMD_DRIVER_EVENT 0x86
+#undef MC_CMD_0x86_PRIVILEGE_CTG
+
+#define MC_CMD_0x86_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_DRIVER_EVENT_IN msgrequest */
+#define    MC_CMD_DRIVER_EVENT_IN_LEN 12
+/* Handle of target EVQ */
+#define       MC_CMD_DRIVER_EVENT_IN_EVQ_OFST 0
+#define       MC_CMD_DRIVER_EVENT_IN_EVQ_LEN 4
+/* Bits 0 - 63 of event */
+#define       MC_CMD_DRIVER_EVENT_IN_DATA_OFST 4
+#define       MC_CMD_DRIVER_EVENT_IN_DATA_LEN 8
+#define       MC_CMD_DRIVER_EVENT_IN_DATA_LO_OFST 4
+#define       MC_CMD_DRIVER_EVENT_IN_DATA_HI_OFST 8
+
+/* MC_CMD_DRIVER_EVENT_OUT msgresponse */
+#define    MC_CMD_DRIVER_EVENT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_PROXY_CMD
+ * Execute an arbitrary MCDI command on behalf of a different function, subject
+ * to security restrictions. The command to be proxied follows immediately
+ * afterward in the host buffer (or on the UART). This command supercedes
+ * MC_CMD_SET_FUNC, which remains available for Siena but now deprecated.
+ */
+#define MC_CMD_PROXY_CMD 0x5b
+#undef MC_CMD_0x5b_PRIVILEGE_CTG
+
+#define MC_CMD_0x5b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_PROXY_CMD_IN msgrequest */
+#define    MC_CMD_PROXY_CMD_IN_LEN 4
+/* The handle of the target function. */
+#define       MC_CMD_PROXY_CMD_IN_TARGET_OFST 0
+#define       MC_CMD_PROXY_CMD_IN_TARGET_LEN 4
+#define        MC_CMD_PROXY_CMD_IN_TARGET_PF_OFST 0
+#define        MC_CMD_PROXY_CMD_IN_TARGET_PF_LBN 0
+#define        MC_CMD_PROXY_CMD_IN_TARGET_PF_WIDTH 16
+#define        MC_CMD_PROXY_CMD_IN_TARGET_VF_OFST 0
+#define        MC_CMD_PROXY_CMD_IN_TARGET_VF_LBN 16
+#define        MC_CMD_PROXY_CMD_IN_TARGET_VF_WIDTH 16
+#define          MC_CMD_PROXY_CMD_IN_VF_NULL 0xffff /* enum */
+
+/* MC_CMD_PROXY_CMD_OUT msgresponse */
+#define    MC_CMD_PROXY_CMD_OUT_LEN 0
+
+/* MC_PROXY_STATUS_BUFFER structuredef: Host memory status buffer used to
+ * manage proxied requests
+ */
+#define    MC_PROXY_STATUS_BUFFER_LEN 16
+/* Handle allocated by the firmware for this proxy transaction */
+#define       MC_PROXY_STATUS_BUFFER_HANDLE_OFST 0
+#define       MC_PROXY_STATUS_BUFFER_HANDLE_LEN 4
+/* enum: An invalid handle. */
+#define          MC_PROXY_STATUS_BUFFER_HANDLE_INVALID 0x0
+#define       MC_PROXY_STATUS_BUFFER_HANDLE_LBN 0
+#define       MC_PROXY_STATUS_BUFFER_HANDLE_WIDTH 32
+/* The requesting physical function number */
+#define       MC_PROXY_STATUS_BUFFER_PF_OFST 4
+#define       MC_PROXY_STATUS_BUFFER_PF_LEN 2
+#define       MC_PROXY_STATUS_BUFFER_PF_LBN 32
+#define       MC_PROXY_STATUS_BUFFER_PF_WIDTH 16
+/* The requesting virtual function number. Set to VF_NULL if the target is a
+ * PF.
+ */
+#define       MC_PROXY_STATUS_BUFFER_VF_OFST 6
+#define       MC_PROXY_STATUS_BUFFER_VF_LEN 2
+#define       MC_PROXY_STATUS_BUFFER_VF_LBN 48
+#define       MC_PROXY_STATUS_BUFFER_VF_WIDTH 16
+/* The target function RID. */
+#define       MC_PROXY_STATUS_BUFFER_RID_OFST 8
+#define       MC_PROXY_STATUS_BUFFER_RID_LEN 2
+#define       MC_PROXY_STATUS_BUFFER_RID_LBN 64
+#define       MC_PROXY_STATUS_BUFFER_RID_WIDTH 16
+/* The status of the proxy as described in MC_CMD_PROXY_COMPLETE. */
+#define       MC_PROXY_STATUS_BUFFER_STATUS_OFST 10
+#define       MC_PROXY_STATUS_BUFFER_STATUS_LEN 2
+#define       MC_PROXY_STATUS_BUFFER_STATUS_LBN 80
+#define       MC_PROXY_STATUS_BUFFER_STATUS_WIDTH 16
+/* If a request is authorized rather than carried out by the host, this is the
+ * elevated privilege mask granted to the requesting function.
+ */
+#define       MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_OFST 12
+#define       MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_LEN 4
+#define       MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_LBN 96
+#define       MC_PROXY_STATUS_BUFFER_GRANTED_PRIVILEGES_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_PROXY_CONFIGURE
+ * Enable/disable authorization of MCDI requests from unprivileged functions by
+ * a designated admin function
+ */
+#define MC_CMD_PROXY_CONFIGURE 0x58
+#undef MC_CMD_0x58_PRIVILEGE_CTG
+
+#define MC_CMD_0x58_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_PROXY_CONFIGURE_IN msgrequest */
+#define    MC_CMD_PROXY_CONFIGURE_IN_LEN 108
+#define       MC_CMD_PROXY_CONFIGURE_IN_FLAGS_OFST 0
+#define       MC_CMD_PROXY_CONFIGURE_IN_FLAGS_LEN 4
+#define        MC_CMD_PROXY_CONFIGURE_IN_ENABLE_OFST 0
+#define        MC_CMD_PROXY_CONFIGURE_IN_ENABLE_LBN 0
+#define        MC_CMD_PROXY_CONFIGURE_IN_ENABLE_WIDTH 1
+/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
+ * of blocks, each of the size REQUEST_BLOCK_SIZE.
+ */
+#define       MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_OFST 4
+#define       MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LEN 8
+#define       MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_LO_OFST 4
+#define       MC_CMD_PROXY_CONFIGURE_IN_STATUS_BUFF_ADDR_HI_OFST 8
+/* Must be a power of 2 */
+#define       MC_CMD_PROXY_CONFIGURE_IN_STATUS_BLOCK_SIZE_OFST 12
+#define       MC_CMD_PROXY_CONFIGURE_IN_STATUS_BLOCK_SIZE_LEN 4
+/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
+ * of blocks, each of the size REPLY_BLOCK_SIZE.
+ */
+#define       MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_OFST 16
+#define       MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LEN 8
+#define       MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_LO_OFST 16
+#define       MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BUFF_ADDR_HI_OFST 20
+/* Must be a power of 2 */
+#define       MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BLOCK_SIZE_OFST 24
+#define       MC_CMD_PROXY_CONFIGURE_IN_REQUEST_BLOCK_SIZE_LEN 4
+/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
+ * of blocks, each of the size STATUS_BLOCK_SIZE. This buffer is only needed if
+ * host intends to complete proxied operations by using MC_CMD_PROXY_CMD.
+ */
+#define       MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_OFST 28
+#define       MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LEN 8
+#define       MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_LO_OFST 28
+#define       MC_CMD_PROXY_CONFIGURE_IN_REPLY_BUFF_ADDR_HI_OFST 32
+/* Must be a power of 2, or zero if this buffer is not provided */
+#define       MC_CMD_PROXY_CONFIGURE_IN_REPLY_BLOCK_SIZE_OFST 36
+#define       MC_CMD_PROXY_CONFIGURE_IN_REPLY_BLOCK_SIZE_LEN 4
+/* Applies to all three buffers */
+#define       MC_CMD_PROXY_CONFIGURE_IN_NUM_BLOCKS_OFST 40
+#define       MC_CMD_PROXY_CONFIGURE_IN_NUM_BLOCKS_LEN 4
+/* A bit mask defining which MCDI operations may be proxied */
+#define       MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_OFST 44
+#define       MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_LEN 64
+
+/* MC_CMD_PROXY_CONFIGURE_EXT_IN msgrequest */
+#define    MC_CMD_PROXY_CONFIGURE_EXT_IN_LEN 112
+#define       MC_CMD_PROXY_CONFIGURE_EXT_IN_FLAGS_OFST 0
+#define       MC_CMD_PROXY_CONFIGURE_EXT_IN_FLAGS_LEN 4
+#define        MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_OFST 0
+#define        MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_LBN 0
+#define        MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_WIDTH 1
+/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
+ * of blocks, each of the size REQUEST_BLOCK_SIZE.
+ */
+#define       MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_OFST 4
+#define       MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LEN 8
+#define       MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LO_OFST 4
+#define       MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_HI_OFST 8
+/* Must be a power of 2 */
+#define       MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BLOCK_SIZE_OFST 12
+#define       MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BLOCK_SIZE_LEN 4
+/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
+ * of blocks, each of the size REPLY_BLOCK_SIZE.
+ */
+#define       MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_OFST 16
+#define       MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LEN 8
+#define       MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LO_OFST 16
+#define       MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_HI_OFST 20
+/* Must be a power of 2 */
+#define       MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BLOCK_SIZE_OFST 24
+#define       MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BLOCK_SIZE_LEN 4
+/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
+ * of blocks, each of the size STATUS_BLOCK_SIZE. This buffer is only needed if
+ * host intends to complete proxied operations by using MC_CMD_PROXY_CMD.
+ */
+#define       MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_OFST 28
+#define       MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LEN 8
+#define       MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LO_OFST 28
+#define       MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_HI_OFST 32
+/* Must be a power of 2, or zero if this buffer is not provided */
+#define       MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BLOCK_SIZE_OFST 36
+#define       MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BLOCK_SIZE_LEN 4
+/* Applies to all three buffers */
+#define       MC_CMD_PROXY_CONFIGURE_EXT_IN_NUM_BLOCKS_OFST 40
+#define       MC_CMD_PROXY_CONFIGURE_EXT_IN_NUM_BLOCKS_LEN 4
+/* A bit mask defining which MCDI operations may be proxied */
+#define       MC_CMD_PROXY_CONFIGURE_EXT_IN_ALLOWED_MCDI_MASK_OFST 44
+#define       MC_CMD_PROXY_CONFIGURE_EXT_IN_ALLOWED_MCDI_MASK_LEN 64
+#define       MC_CMD_PROXY_CONFIGURE_EXT_IN_RESERVED_OFST 108
+#define       MC_CMD_PROXY_CONFIGURE_EXT_IN_RESERVED_LEN 4
+
+/* MC_CMD_PROXY_CONFIGURE_OUT msgresponse */
+#define    MC_CMD_PROXY_CONFIGURE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_PROXY_COMPLETE
+ * Tells FW that a requested proxy operation has either been completed (by
+ * using MC_CMD_PROXY_CMD) or authorized/declined. May only be sent by the
+ * function that enabled proxying/authorization (by using
+ * MC_CMD_PROXY_CONFIGURE).
+ */
+#define MC_CMD_PROXY_COMPLETE 0x5f
+#undef MC_CMD_0x5f_PRIVILEGE_CTG
+
+#define MC_CMD_0x5f_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_PROXY_COMPLETE_IN msgrequest */
+#define    MC_CMD_PROXY_COMPLETE_IN_LEN 12
+#define       MC_CMD_PROXY_COMPLETE_IN_BLOCK_INDEX_OFST 0
+#define       MC_CMD_PROXY_COMPLETE_IN_BLOCK_INDEX_LEN 4
+#define       MC_CMD_PROXY_COMPLETE_IN_STATUS_OFST 4
+#define       MC_CMD_PROXY_COMPLETE_IN_STATUS_LEN 4
+/* enum: The operation has been completed by using MC_CMD_PROXY_CMD, the reply
+ * is stored in the REPLY_BUFF.
+ */
+#define          MC_CMD_PROXY_COMPLETE_IN_COMPLETE 0x0
+/* enum: The operation has been authorized. The originating function may now
+ * try again.
+ */
+#define          MC_CMD_PROXY_COMPLETE_IN_AUTHORIZED 0x1
+/* enum: The operation has been declined. */
+#define          MC_CMD_PROXY_COMPLETE_IN_DECLINED 0x2
+/* enum: The authorization failed because the relevant application did not
+ * respond in time.
+ */
+#define          MC_CMD_PROXY_COMPLETE_IN_TIMEDOUT 0x3
+#define       MC_CMD_PROXY_COMPLETE_IN_HANDLE_OFST 8
+#define       MC_CMD_PROXY_COMPLETE_IN_HANDLE_LEN 4
+
+/* MC_CMD_PROXY_COMPLETE_OUT msgresponse */
+#define    MC_CMD_PROXY_COMPLETE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_ALLOC_BUFTBL_CHUNK
+ * Allocate a set of buffer table entries using the specified owner ID. This
+ * operation allocates the required buffer table entries (and fails if it
+ * cannot do so). The buffer table entries will initially be zeroed.
+ */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK 0x87
+#undef MC_CMD_0x87_PRIVILEGE_CTG
+
+#define MC_CMD_0x87_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_ALLOC_BUFTBL_CHUNK_IN msgrequest */
+#define    MC_CMD_ALLOC_BUFTBL_CHUNK_IN_LEN 8
+/* Owner ID to use */
+#define       MC_CMD_ALLOC_BUFTBL_CHUNK_IN_OWNER_OFST 0
+#define       MC_CMD_ALLOC_BUFTBL_CHUNK_IN_OWNER_LEN 4
+/* Size of buffer table pages to use, in bytes (note that only a few values are
+ * legal on any specific hardware).
+ */
+#define       MC_CMD_ALLOC_BUFTBL_CHUNK_IN_PAGE_SIZE_OFST 4
+#define       MC_CMD_ALLOC_BUFTBL_CHUNK_IN_PAGE_SIZE_LEN 4
+
+/* MC_CMD_ALLOC_BUFTBL_CHUNK_OUT msgresponse */
+#define    MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_LEN 12
+#define       MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_HANDLE_OFST 0
+#define       MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_HANDLE_LEN 4
+#define       MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_NUMENTRIES_OFST 4
+#define       MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_NUMENTRIES_LEN 4
+/* Buffer table IDs for use in DMA descriptors. */
+#define       MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_ID_OFST 8
+#define       MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_ID_LEN 4
+
+
+/***********************************/
+/* MC_CMD_PROGRAM_BUFTBL_ENTRIES
+ * Reprogram a set of buffer table entries in the specified chunk.
+ */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES 0x88
+#undef MC_CMD_0x88_PRIVILEGE_CTG
+
+#define MC_CMD_0x88_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN msgrequest */
+#define    MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMIN 20
+#define    MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX 268
+#define    MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX_MCDI2 268
+#define    MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LEN(num) (12+8*(num))
+#define    MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_NUM(len) (((len)-12)/8)
+#define       MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_OFST 0
+#define       MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_LEN 4
+/* ID */
+#define       MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_FIRSTID_OFST 4
+#define       MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_FIRSTID_LEN 4
+/* Num entries */
+#define       MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 8
+#define       MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_NUMENTRIES_LEN 4
+/* Buffer table entry address */
+#define       MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_OFST 12
+#define       MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LEN 8
+#define       MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_OFST 12
+#define       MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_OFST 16
+#define       MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MINNUM 1
+#define       MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MAXNUM 32
+#define       MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MAXNUM_MCDI2 32
+
+/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT msgresponse */
+#define    MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_FREE_BUFTBL_CHUNK
+ */
+#define MC_CMD_FREE_BUFTBL_CHUNK 0x89
+#undef MC_CMD_0x89_PRIVILEGE_CTG
+
+#define MC_CMD_0x89_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_FREE_BUFTBL_CHUNK_IN msgrequest */
+#define    MC_CMD_FREE_BUFTBL_CHUNK_IN_LEN 4
+#define       MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_OFST 0
+#define       MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_LEN 4
+
+/* MC_CMD_FREE_BUFTBL_CHUNK_OUT msgresponse */
+#define    MC_CMD_FREE_BUFTBL_CHUNK_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_FILTER_OP
+ * Multiplexed MCDI call for filter operations
+ */
+#define MC_CMD_FILTER_OP 0x8a
+#undef MC_CMD_0x8a_PRIVILEGE_CTG
+
+#define MC_CMD_0x8a_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_FILTER_OP_IN msgrequest */
+#define    MC_CMD_FILTER_OP_IN_LEN 108
+/* identifies the type of operation requested */
+#define       MC_CMD_FILTER_OP_IN_OP_OFST 0
+#define       MC_CMD_FILTER_OP_IN_OP_LEN 4
+/* enum: single-recipient filter insert */
+#define          MC_CMD_FILTER_OP_IN_OP_INSERT 0x0
+/* enum: single-recipient filter remove */
+#define          MC_CMD_FILTER_OP_IN_OP_REMOVE 0x1
+/* enum: multi-recipient filter subscribe */
+#define          MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE 0x2
+/* enum: multi-recipient filter unsubscribe */
+#define          MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE 0x3
+/* enum: replace one recipient with another (warning - the filter handle may
+ * change)
+ */
+#define          MC_CMD_FILTER_OP_IN_OP_REPLACE 0x4
+/* filter handle (for remove / unsubscribe operations) */
+#define       MC_CMD_FILTER_OP_IN_HANDLE_OFST 4
+#define       MC_CMD_FILTER_OP_IN_HANDLE_LEN 8
+#define       MC_CMD_FILTER_OP_IN_HANDLE_LO_OFST 4
+#define       MC_CMD_FILTER_OP_IN_HANDLE_HI_OFST 8
+/* The port ID associated with the v-adaptor which should contain this filter.
+ */
+#define       MC_CMD_FILTER_OP_IN_PORT_ID_OFST 12
+#define       MC_CMD_FILTER_OP_IN_PORT_ID_LEN 4
+/* fields to include in match criteria */
+#define       MC_CMD_FILTER_OP_IN_MATCH_FIELDS_OFST 16
+#define       MC_CMD_FILTER_OP_IN_MATCH_FIELDS_LEN 4
+#define        MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_OFST 16
+#define        MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_LBN 0
+#define        MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_WIDTH 1
+#define        MC_CMD_FILTER_OP_IN_MATCH_DST_IP_OFST 16
+#define        MC_CMD_FILTER_OP_IN_MATCH_DST_IP_LBN 1
+#define        MC_CMD_FILTER_OP_IN_MATCH_DST_IP_WIDTH 1
+#define        MC_CMD_FILTER_OP_IN_MATCH_SRC_MAC_OFST 16
+#define        MC_CMD_FILTER_OP_IN_MATCH_SRC_MAC_LBN 2
+#define        MC_CMD_FILTER_OP_IN_MATCH_SRC_MAC_WIDTH 1
+#define        MC_CMD_FILTER_OP_IN_MATCH_SRC_PORT_OFST 16
+#define        MC_CMD_FILTER_OP_IN_MATCH_SRC_PORT_LBN 3
+#define        MC_CMD_FILTER_OP_IN_MATCH_SRC_PORT_WIDTH 1
+#define        MC_CMD_FILTER_OP_IN_MATCH_DST_MAC_OFST 16
+#define        MC_CMD_FILTER_OP_IN_MATCH_DST_MAC_LBN 4
+#define        MC_CMD_FILTER_OP_IN_MATCH_DST_MAC_WIDTH 1
+#define        MC_CMD_FILTER_OP_IN_MATCH_DST_PORT_OFST 16
+#define        MC_CMD_FILTER_OP_IN_MATCH_DST_PORT_LBN 5
+#define        MC_CMD_FILTER_OP_IN_MATCH_DST_PORT_WIDTH 1
+#define        MC_CMD_FILTER_OP_IN_MATCH_ETHER_TYPE_OFST 16
+#define        MC_CMD_FILTER_OP_IN_MATCH_ETHER_TYPE_LBN 6
+#define        MC_CMD_FILTER_OP_IN_MATCH_ETHER_TYPE_WIDTH 1
+#define        MC_CMD_FILTER_OP_IN_MATCH_INNER_VLAN_OFST 16
+#define        MC_CMD_FILTER_OP_IN_MATCH_INNER_VLAN_LBN 7
+#define        MC_CMD_FILTER_OP_IN_MATCH_INNER_VLAN_WIDTH 1
+#define        MC_CMD_FILTER_OP_IN_MATCH_OUTER_VLAN_OFST 16
+#define        MC_CMD_FILTER_OP_IN_MATCH_OUTER_VLAN_LBN 8
+#define        MC_CMD_FILTER_OP_IN_MATCH_OUTER_VLAN_WIDTH 1
+#define        MC_CMD_FILTER_OP_IN_MATCH_IP_PROTO_OFST 16
+#define        MC_CMD_FILTER_OP_IN_MATCH_IP_PROTO_LBN 9
+#define        MC_CMD_FILTER_OP_IN_MATCH_IP_PROTO_WIDTH 1
+#define        MC_CMD_FILTER_OP_IN_MATCH_FWDEF0_OFST 16
+#define        MC_CMD_FILTER_OP_IN_MATCH_FWDEF0_LBN 10
+#define        MC_CMD_FILTER_OP_IN_MATCH_FWDEF0_WIDTH 1
+#define        MC_CMD_FILTER_OP_IN_MATCH_FWDEF1_OFST 16
+#define        MC_CMD_FILTER_OP_IN_MATCH_FWDEF1_LBN 11
+#define        MC_CMD_FILTER_OP_IN_MATCH_FWDEF1_WIDTH 1
+#define        MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_OFST 16
+#define        MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30
+#define        MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1
+#define        MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_OFST 16
+#define        MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN 31
+#define        MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1
+/* receive destination */
+#define       MC_CMD_FILTER_OP_IN_RX_DEST_OFST 20
+#define       MC_CMD_FILTER_OP_IN_RX_DEST_LEN 4
+/* enum: drop packets */
+#define          MC_CMD_FILTER_OP_IN_RX_DEST_DROP 0x0
+/* enum: receive to host */
+#define          MC_CMD_FILTER_OP_IN_RX_DEST_HOST 0x1
+/* enum: receive to MC */
+#define          MC_CMD_FILTER_OP_IN_RX_DEST_MC 0x2
+/* enum: loop back to TXDP 0 */
+#define          MC_CMD_FILTER_OP_IN_RX_DEST_TX0 0x3
+/* enum: loop back to TXDP 1 */
+#define          MC_CMD_FILTER_OP_IN_RX_DEST_TX1 0x4
+/* receive queue handle (for multiple queue modes, this is the base queue) */
+#define       MC_CMD_FILTER_OP_IN_RX_QUEUE_OFST 24
+#define       MC_CMD_FILTER_OP_IN_RX_QUEUE_LEN 4
+/* receive mode */
+#define       MC_CMD_FILTER_OP_IN_RX_MODE_OFST 28
+#define       MC_CMD_FILTER_OP_IN_RX_MODE_LEN 4
+/* enum: receive to just the specified queue */
+#define          MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE 0x0
+/* enum: receive to multiple queues using RSS context */
+#define          MC_CMD_FILTER_OP_IN_RX_MODE_RSS 0x1
+/* enum: receive to multiple queues using .1p mapping */
+#define          MC_CMD_FILTER_OP_IN_RX_MODE_DOT1P_MAPPING 0x2
+/* enum: install a filter entry that will never match; for test purposes only
+ */
+#define          MC_CMD_FILTER_OP_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000
+/* RSS context (for RX_MODE_RSS) or .1p mapping handle (for
+ * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or
+ * MC_CMD_DOT1P_MAPPING_ALLOC.
+ */
+#define       MC_CMD_FILTER_OP_IN_RX_CONTEXT_OFST 32
+#define       MC_CMD_FILTER_OP_IN_RX_CONTEXT_LEN 4
+/* transmit domain (reserved; set to 0) */
+#define       MC_CMD_FILTER_OP_IN_TX_DOMAIN_OFST 36
+#define       MC_CMD_FILTER_OP_IN_TX_DOMAIN_LEN 4
+/* transmit destination (either set the MAC and/or PM bits for explicit
+ * control, or set this field to TX_DEST_DEFAULT for sensible default
+ * behaviour)
+ */
+#define       MC_CMD_FILTER_OP_IN_TX_DEST_OFST 40
+#define       MC_CMD_FILTER_OP_IN_TX_DEST_LEN 4
+/* enum: request default behaviour (based on filter type) */
+#define          MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT 0xffffffff
+#define        MC_CMD_FILTER_OP_IN_TX_DEST_MAC_OFST 40
+#define        MC_CMD_FILTER_OP_IN_TX_DEST_MAC_LBN 0
+#define        MC_CMD_FILTER_OP_IN_TX_DEST_MAC_WIDTH 1
+#define        MC_CMD_FILTER_OP_IN_TX_DEST_PM_OFST 40
+#define        MC_CMD_FILTER_OP_IN_TX_DEST_PM_LBN 1
+#define        MC_CMD_FILTER_OP_IN_TX_DEST_PM_WIDTH 1
+/* source MAC address to match (as bytes in network order) */
+#define       MC_CMD_FILTER_OP_IN_SRC_MAC_OFST 44
+#define       MC_CMD_FILTER_OP_IN_SRC_MAC_LEN 6
+/* source port to match (as bytes in network order) */
+#define       MC_CMD_FILTER_OP_IN_SRC_PORT_OFST 50
+#define       MC_CMD_FILTER_OP_IN_SRC_PORT_LEN 2
+/* destination MAC address to match (as bytes in network order) */
+#define       MC_CMD_FILTER_OP_IN_DST_MAC_OFST 52
+#define       MC_CMD_FILTER_OP_IN_DST_MAC_LEN 6
+/* destination port to match (as bytes in network order) */
+#define       MC_CMD_FILTER_OP_IN_DST_PORT_OFST 58
+#define       MC_CMD_FILTER_OP_IN_DST_PORT_LEN 2
+/* Ethernet type to match (as bytes in network order) */
+#define       MC_CMD_FILTER_OP_IN_ETHER_TYPE_OFST 60
+#define       MC_CMD_FILTER_OP_IN_ETHER_TYPE_LEN 2
+/* Inner VLAN tag to match (as bytes in network order) */
+#define       MC_CMD_FILTER_OP_IN_INNER_VLAN_OFST 62
+#define       MC_CMD_FILTER_OP_IN_INNER_VLAN_LEN 2
+/* Outer VLAN tag to match (as bytes in network order) */
+#define       MC_CMD_FILTER_OP_IN_OUTER_VLAN_OFST 64
+#define       MC_CMD_FILTER_OP_IN_OUTER_VLAN_LEN 2
+/* IP protocol to match (in low byte; set high byte to 0) */
+#define       MC_CMD_FILTER_OP_IN_IP_PROTO_OFST 66
+#define       MC_CMD_FILTER_OP_IN_IP_PROTO_LEN 2
+/* Firmware defined register 0 to match (reserved; set to 0) */
+#define       MC_CMD_FILTER_OP_IN_FWDEF0_OFST 68
+#define       MC_CMD_FILTER_OP_IN_FWDEF0_LEN 4
+/* Firmware defined register 1 to match (reserved; set to 0) */
+#define       MC_CMD_FILTER_OP_IN_FWDEF1_OFST 72
+#define       MC_CMD_FILTER_OP_IN_FWDEF1_LEN 4
+/* source IP address to match (as bytes in network order; set last 12 bytes to
+ * 0 for IPv4 address)
+ */
+#define       MC_CMD_FILTER_OP_IN_SRC_IP_OFST 76
+#define       MC_CMD_FILTER_OP_IN_SRC_IP_LEN 16
+/* destination IP address to match (as bytes in network order; set last 12
+ * bytes to 0 for IPv4 address)
+ */
+#define       MC_CMD_FILTER_OP_IN_DST_IP_OFST 92
+#define       MC_CMD_FILTER_OP_IN_DST_IP_LEN 16
+
+/* MC_CMD_FILTER_OP_EXT_IN msgrequest: Extension to MC_CMD_FILTER_OP_IN to
+ * include handling of VXLAN/NVGRE encapsulated frame filtering (which is
+ * supported on Medford only).
+ */
+#define    MC_CMD_FILTER_OP_EXT_IN_LEN 172
+/* identifies the type of operation requested */
+#define       MC_CMD_FILTER_OP_EXT_IN_OP_OFST 0
+#define       MC_CMD_FILTER_OP_EXT_IN_OP_LEN 4
+/*            Enum values, see field(s): */
+/*               MC_CMD_FILTER_OP_IN/OP */
+/* filter handle (for remove / unsubscribe operations) */
+#define       MC_CMD_FILTER_OP_EXT_IN_HANDLE_OFST 4
+#define       MC_CMD_FILTER_OP_EXT_IN_HANDLE_LEN 8
+#define       MC_CMD_FILTER_OP_EXT_IN_HANDLE_LO_OFST 4
+#define       MC_CMD_FILTER_OP_EXT_IN_HANDLE_HI_OFST 8
+/* The port ID associated with the v-adaptor which should contain this filter.
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_PORT_ID_OFST 12
+#define       MC_CMD_FILTER_OP_EXT_IN_PORT_ID_LEN 4
+/* fields to include in match criteria */
+#define       MC_CMD_FILTER_OP_EXT_IN_MATCH_FIELDS_OFST 16
+#define       MC_CMD_FILTER_OP_EXT_IN_MATCH_FIELDS_LEN 4
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_OFST 16
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_LBN 0
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_IP_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_OFST 16
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_LBN 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_IP_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_MAC_OFST 16
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_MAC_LBN 2
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_MAC_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_PORT_OFST 16
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_PORT_LBN 3
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_SRC_PORT_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_MAC_OFST 16
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_MAC_LBN 4
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_MAC_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_PORT_OFST 16
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_PORT_LBN 5
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_DST_PORT_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_OFST 16
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN 6
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_INNER_VLAN_OFST 16
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_INNER_VLAN_LBN 7
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_INNER_VLAN_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_OUTER_VLAN_OFST 16
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_OUTER_VLAN_LBN 8
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_OUTER_VLAN_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_OFST 16
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN 9
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_FWDEF0_OFST 16
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_FWDEF0_LBN 10
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_FWDEF0_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_VNI_OR_VSID_OFST 16
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_VNI_OR_VSID_LBN 11
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_VNI_OR_VSID_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_IP_OFST 16
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_IP_LBN 12
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_IP_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_IP_OFST 16
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_IP_LBN 13
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_IP_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_MAC_OFST 16
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_MAC_LBN 14
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_MAC_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_PORT_OFST 16
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_PORT_LBN 15
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_SRC_PORT_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_MAC_OFST 16
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_MAC_LBN 16
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_MAC_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_PORT_OFST 16
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_PORT_LBN 17
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_DST_PORT_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ETHER_TYPE_OFST 16
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ETHER_TYPE_LBN 18
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ETHER_TYPE_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_INNER_VLAN_OFST 16
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_INNER_VLAN_LBN 19
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_INNER_VLAN_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_OUTER_VLAN_OFST 16
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_OUTER_VLAN_LBN 20
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_OUTER_VLAN_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_IP_PROTO_OFST 16
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_IP_PROTO_LBN 21
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_IP_PROTO_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF0_OFST 16
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF0_LBN 22
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF0_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF1_OFST 16
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF1_LBN 23
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_FWDEF1_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_OFST 16
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN 24
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_OFST 16
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN 25
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_OFST 16
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_OFST 16
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN 31
+#define        MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1
+/* receive destination */
+#define       MC_CMD_FILTER_OP_EXT_IN_RX_DEST_OFST 20
+#define       MC_CMD_FILTER_OP_EXT_IN_RX_DEST_LEN 4
+/* enum: drop packets */
+#define          MC_CMD_FILTER_OP_EXT_IN_RX_DEST_DROP 0x0
+/* enum: receive to host */
+#define          MC_CMD_FILTER_OP_EXT_IN_RX_DEST_HOST 0x1
+/* enum: receive to MC */
+#define          MC_CMD_FILTER_OP_EXT_IN_RX_DEST_MC 0x2
+/* enum: loop back to TXDP 0 */
+#define          MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX0 0x3
+/* enum: loop back to TXDP 1 */
+#define          MC_CMD_FILTER_OP_EXT_IN_RX_DEST_TX1 0x4
+/* receive queue handle (for multiple queue modes, this is the base queue) */
+#define       MC_CMD_FILTER_OP_EXT_IN_RX_QUEUE_OFST 24
+#define       MC_CMD_FILTER_OP_EXT_IN_RX_QUEUE_LEN 4
+/* receive mode */
+#define       MC_CMD_FILTER_OP_EXT_IN_RX_MODE_OFST 28
+#define       MC_CMD_FILTER_OP_EXT_IN_RX_MODE_LEN 4
+/* enum: receive to just the specified queue */
+#define          MC_CMD_FILTER_OP_EXT_IN_RX_MODE_SIMPLE 0x0
+/* enum: receive to multiple queues using RSS context */
+#define          MC_CMD_FILTER_OP_EXT_IN_RX_MODE_RSS 0x1
+/* enum: receive to multiple queues using .1p mapping */
+#define          MC_CMD_FILTER_OP_EXT_IN_RX_MODE_DOT1P_MAPPING 0x2
+/* enum: install a filter entry that will never match; for test purposes only
+ */
+#define          MC_CMD_FILTER_OP_EXT_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000
+/* RSS context (for RX_MODE_RSS) or .1p mapping handle (for
+ * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or
+ * MC_CMD_DOT1P_MAPPING_ALLOC.
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_RX_CONTEXT_OFST 32
+#define       MC_CMD_FILTER_OP_EXT_IN_RX_CONTEXT_LEN 4
+/* transmit domain (reserved; set to 0) */
+#define       MC_CMD_FILTER_OP_EXT_IN_TX_DOMAIN_OFST 36
+#define       MC_CMD_FILTER_OP_EXT_IN_TX_DOMAIN_LEN 4
+/* transmit destination (either set the MAC and/or PM bits for explicit
+ * control, or set this field to TX_DEST_DEFAULT for sensible default
+ * behaviour)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_TX_DEST_OFST 40
+#define       MC_CMD_FILTER_OP_EXT_IN_TX_DEST_LEN 4
+/* enum: request default behaviour (based on filter type) */
+#define          MC_CMD_FILTER_OP_EXT_IN_TX_DEST_DEFAULT 0xffffffff
+#define        MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_OFST 40
+#define        MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_LBN 0
+#define        MC_CMD_FILTER_OP_EXT_IN_TX_DEST_MAC_WIDTH 1
+#define        MC_CMD_FILTER_OP_EXT_IN_TX_DEST_PM_OFST 40
+#define        MC_CMD_FILTER_OP_EXT_IN_TX_DEST_PM_LBN 1
+#define        MC_CMD_FILTER_OP_EXT_IN_TX_DEST_PM_WIDTH 1
+/* source MAC address to match (as bytes in network order) */
+#define       MC_CMD_FILTER_OP_EXT_IN_SRC_MAC_OFST 44
+#define       MC_CMD_FILTER_OP_EXT_IN_SRC_MAC_LEN 6
+/* source port to match (as bytes in network order) */
+#define       MC_CMD_FILTER_OP_EXT_IN_SRC_PORT_OFST 50
+#define       MC_CMD_FILTER_OP_EXT_IN_SRC_PORT_LEN 2
+/* destination MAC address to match (as bytes in network order) */
+#define       MC_CMD_FILTER_OP_EXT_IN_DST_MAC_OFST 52
+#define       MC_CMD_FILTER_OP_EXT_IN_DST_MAC_LEN 6
+/* destination port to match (as bytes in network order) */
+#define       MC_CMD_FILTER_OP_EXT_IN_DST_PORT_OFST 58
+#define       MC_CMD_FILTER_OP_EXT_IN_DST_PORT_LEN 2
+/* Ethernet type to match (as bytes in network order) */
+#define       MC_CMD_FILTER_OP_EXT_IN_ETHER_TYPE_OFST 60
+#define       MC_CMD_FILTER_OP_EXT_IN_ETHER_TYPE_LEN 2
+/* Inner VLAN tag to match (as bytes in network order) */
+#define       MC_CMD_FILTER_OP_EXT_IN_INNER_VLAN_OFST 62
+#define       MC_CMD_FILTER_OP_EXT_IN_INNER_VLAN_LEN 2
+/* Outer VLAN tag to match (as bytes in network order) */
+#define       MC_CMD_FILTER_OP_EXT_IN_OUTER_VLAN_OFST 64
+#define       MC_CMD_FILTER_OP_EXT_IN_OUTER_VLAN_LEN 2
+/* IP protocol to match (in low byte; set high byte to 0) */
+#define       MC_CMD_FILTER_OP_EXT_IN_IP_PROTO_OFST 66
+#define       MC_CMD_FILTER_OP_EXT_IN_IP_PROTO_LEN 2
+/* Firmware defined register 0 to match (reserved; set to 0) */
+#define       MC_CMD_FILTER_OP_EXT_IN_FWDEF0_OFST 68
+#define       MC_CMD_FILTER_OP_EXT_IN_FWDEF0_LEN 4
+/* VNI (for VXLAN/Geneve, when IP protocol is UDP) or VSID (for NVGRE, when IP
+ * protocol is GRE) to match (as bytes in network order; set last byte to 0 for
+ * VXLAN/NVGRE, or 1 for Geneve)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_VNI_OR_VSID_OFST 72
+#define       MC_CMD_FILTER_OP_EXT_IN_VNI_OR_VSID_LEN 4
+#define        MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_OFST 72
+#define        MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_LBN 0
+#define        MC_CMD_FILTER_OP_EXT_IN_VNI_VALUE_WIDTH 24
+#define        MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_OFST 72
+#define        MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_LBN 24
+#define        MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_WIDTH 8
+/* enum: Match VXLAN traffic with this VNI */
+#define          MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN 0x0
+/* enum: Match Geneve traffic with this VNI */
+#define          MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_GENEVE 0x1
+/* enum: Reserved for experimental development use */
+#define          MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_EXPERIMENTAL 0xfe
+#define        MC_CMD_FILTER_OP_EXT_IN_VSID_VALUE_OFST 72
+#define        MC_CMD_FILTER_OP_EXT_IN_VSID_VALUE_LBN 0
+#define        MC_CMD_FILTER_OP_EXT_IN_VSID_VALUE_WIDTH 24
+#define        MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_OFST 72
+#define        MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_LBN 24
+#define        MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_WIDTH 8
+/* enum: Match NVGRE traffic with this VSID */
+#define          MC_CMD_FILTER_OP_EXT_IN_VSID_TYPE_NVGRE 0x0
+/* source IP address to match (as bytes in network order; set last 12 bytes to
+ * 0 for IPv4 address)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_SRC_IP_OFST 76
+#define       MC_CMD_FILTER_OP_EXT_IN_SRC_IP_LEN 16
+/* destination IP address to match (as bytes in network order; set last 12
+ * bytes to 0 for IPv4 address)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_DST_IP_OFST 92
+#define       MC_CMD_FILTER_OP_EXT_IN_DST_IP_LEN 16
+/* VXLAN/NVGRE inner frame source MAC address to match (as bytes in network
+ * order)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_MAC_OFST 108
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_MAC_LEN 6
+/* VXLAN/NVGRE inner frame source port to match (as bytes in network order) */
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_PORT_OFST 114
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_PORT_LEN 2
+/* VXLAN/NVGRE inner frame destination MAC address to match (as bytes in
+ * network order)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_MAC_OFST 116
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_MAC_LEN 6
+/* VXLAN/NVGRE inner frame destination port to match (as bytes in network
+ * order)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_PORT_OFST 122
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_PORT_LEN 2
+/* VXLAN/NVGRE inner frame Ethernet type to match (as bytes in network order)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_ETHER_TYPE_OFST 124
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_ETHER_TYPE_LEN 2
+/* VXLAN/NVGRE inner frame Inner VLAN tag to match (as bytes in network order)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_INNER_VLAN_OFST 126
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_INNER_VLAN_LEN 2
+/* VXLAN/NVGRE inner frame Outer VLAN tag to match (as bytes in network order)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_OUTER_VLAN_OFST 128
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_OUTER_VLAN_LEN 2
+/* VXLAN/NVGRE inner frame IP protocol to match (in low byte; set high byte to
+ * 0)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_IP_PROTO_OFST 130
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_IP_PROTO_LEN 2
+/* VXLAN/NVGRE inner frame Firmware defined register 0 to match (reserved; set
+ * to 0)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF0_OFST 132
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF0_LEN 4
+/* VXLAN/NVGRE inner frame Firmware defined register 1 to match (reserved; set
+ * to 0)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF1_OFST 136
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_FWDEF1_LEN 4
+/* VXLAN/NVGRE inner frame source IP address to match (as bytes in network
+ * order; set last 12 bytes to 0 for IPv4 address)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_IP_OFST 140
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_SRC_IP_LEN 16
+/* VXLAN/NVGRE inner frame destination IP address to match (as bytes in network
+ * order; set last 12 bytes to 0 for IPv4 address)
+ */
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_IP_OFST 156
+#define       MC_CMD_FILTER_OP_EXT_IN_IFRM_DST_IP_LEN 16
+
+/* MC_CMD_FILTER_OP_V3_IN msgrequest: FILTER_OP extension to support additional
+ * filter actions for Intel's DPDK (Data Plane Development Kit, dpdk.org) via
+ * its rte_flow API. This extension is only useful with the sfc_efx driver
+ * included as part of DPDK, used in conjunction with the dpdk datapath
+ * firmware variant.
+ */
+#define    MC_CMD_FILTER_OP_V3_IN_LEN 180
+/* identifies the type of operation requested */
+#define       MC_CMD_FILTER_OP_V3_IN_OP_OFST 0
+#define       MC_CMD_FILTER_OP_V3_IN_OP_LEN 4
+/*            Enum values, see field(s): */
+/*               MC_CMD_FILTER_OP_IN/OP */
+/* filter handle (for remove / unsubscribe operations) */
+#define       MC_CMD_FILTER_OP_V3_IN_HANDLE_OFST 4
+#define       MC_CMD_FILTER_OP_V3_IN_HANDLE_LEN 8
+#define       MC_CMD_FILTER_OP_V3_IN_HANDLE_LO_OFST 4
+#define       MC_CMD_FILTER_OP_V3_IN_HANDLE_HI_OFST 8
+/* The port ID associated with the v-adaptor which should contain this filter.
+ */
+#define       MC_CMD_FILTER_OP_V3_IN_PORT_ID_OFST 12
+#define       MC_CMD_FILTER_OP_V3_IN_PORT_ID_LEN 4
+/* fields to include in match criteria */
+#define       MC_CMD_FILTER_OP_V3_IN_MATCH_FIELDS_OFST 16
+#define       MC_CMD_FILTER_OP_V3_IN_MATCH_FIELDS_LEN 4
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_IP_OFST 16
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_IP_LBN 0
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_IP_WIDTH 1
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_DST_IP_OFST 16
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_DST_IP_LBN 1
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_DST_IP_WIDTH 1
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_MAC_OFST 16
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_MAC_LBN 2
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_MAC_WIDTH 1
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_PORT_OFST 16
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_PORT_LBN 3
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_SRC_PORT_WIDTH 1
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_DST_MAC_OFST 16
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_DST_MAC_LBN 4
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_DST_MAC_WIDTH 1
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_DST_PORT_OFST 16
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_DST_PORT_LBN 5
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_DST_PORT_WIDTH 1
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_ETHER_TYPE_OFST 16
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_ETHER_TYPE_LBN 6
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_ETHER_TYPE_WIDTH 1
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_INNER_VLAN_OFST 16
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_INNER_VLAN_LBN 7
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_INNER_VLAN_WIDTH 1
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_OUTER_VLAN_OFST 16
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_OUTER_VLAN_LBN 8
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_OUTER_VLAN_WIDTH 1
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_IP_PROTO_OFST 16
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_IP_PROTO_LBN 9
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_IP_PROTO_WIDTH 1
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_FWDEF0_OFST 16
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_FWDEF0_LBN 10
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_FWDEF0_WIDTH 1
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_VNI_OR_VSID_OFST 16
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_VNI_OR_VSID_LBN 11
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_VNI_OR_VSID_WIDTH 1
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_IP_OFST 16
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_IP_LBN 12
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_IP_WIDTH 1
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_IP_OFST 16
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_IP_LBN 13
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_IP_WIDTH 1
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_MAC_OFST 16
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_MAC_LBN 14
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_MAC_WIDTH 1
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_PORT_OFST 16
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_PORT_LBN 15
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_SRC_PORT_WIDTH 1
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_MAC_OFST 16
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_MAC_LBN 16
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_MAC_WIDTH 1
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_PORT_OFST 16
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_PORT_LBN 17
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_DST_PORT_WIDTH 1
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_ETHER_TYPE_OFST 16
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_ETHER_TYPE_LBN 18
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_ETHER_TYPE_WIDTH 1
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_INNER_VLAN_OFST 16
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_INNER_VLAN_LBN 19
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_INNER_VLAN_WIDTH 1
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_OUTER_VLAN_OFST 16
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_OUTER_VLAN_LBN 20
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_OUTER_VLAN_WIDTH 1
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_IP_PROTO_OFST 16
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_IP_PROTO_LBN 21
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_IP_PROTO_WIDTH 1
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF0_OFST 16
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF0_LBN 22
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF0_WIDTH 1
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF1_OFST 16
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF1_LBN 23
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_FWDEF1_WIDTH 1
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_OFST 16
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN 24
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_WIDTH 1
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_OFST 16
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN 25
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_WIDTH 1
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_MCAST_DST_OFST 16
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_UCAST_DST_OFST 16
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_UCAST_DST_LBN 31
+#define        MC_CMD_FILTER_OP_V3_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1
+/* receive destination */
+#define       MC_CMD_FILTER_OP_V3_IN_RX_DEST_OFST 20
+#define       MC_CMD_FILTER_OP_V3_IN_RX_DEST_LEN 4
+/* enum: drop packets */
+#define          MC_CMD_FILTER_OP_V3_IN_RX_DEST_DROP 0x0
+/* enum: receive to host */
+#define          MC_CMD_FILTER_OP_V3_IN_RX_DEST_HOST 0x1
+/* enum: receive to MC */
+#define          MC_CMD_FILTER_OP_V3_IN_RX_DEST_MC 0x2
+/* enum: loop back to TXDP 0 */
+#define          MC_CMD_FILTER_OP_V3_IN_RX_DEST_TX0 0x3
+/* enum: loop back to TXDP 1 */
+#define          MC_CMD_FILTER_OP_V3_IN_RX_DEST_TX1 0x4
+/* receive queue handle (for multiple queue modes, this is the base queue) */
+#define       MC_CMD_FILTER_OP_V3_IN_RX_QUEUE_OFST 24
+#define       MC_CMD_FILTER_OP_V3_IN_RX_QUEUE_LEN 4
+/* receive mode */
+#define       MC_CMD_FILTER_OP_V3_IN_RX_MODE_OFST 28
+#define       MC_CMD_FILTER_OP_V3_IN_RX_MODE_LEN 4
+/* enum: receive to just the specified queue */
+#define          MC_CMD_FILTER_OP_V3_IN_RX_MODE_SIMPLE 0x0
+/* enum: receive to multiple queues using RSS context */
+#define          MC_CMD_FILTER_OP_V3_IN_RX_MODE_RSS 0x1
+/* enum: receive to multiple queues using .1p mapping */
+#define          MC_CMD_FILTER_OP_V3_IN_RX_MODE_DOT1P_MAPPING 0x2
+/* enum: install a filter entry that will never match; for test purposes only
+ */
+#define          MC_CMD_FILTER_OP_V3_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000
+/* RSS context (for RX_MODE_RSS) or .1p mapping handle (for
+ * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or
+ * MC_CMD_DOT1P_MAPPING_ALLOC.
+ */
+#define       MC_CMD_FILTER_OP_V3_IN_RX_CONTEXT_OFST 32
+#define       MC_CMD_FILTER_OP_V3_IN_RX_CONTEXT_LEN 4
+/* transmit domain (reserved; set to 0) */
+#define       MC_CMD_FILTER_OP_V3_IN_TX_DOMAIN_OFST 36
+#define       MC_CMD_FILTER_OP_V3_IN_TX_DOMAIN_LEN 4
+/* transmit destination (either set the MAC and/or PM bits for explicit
+ * control, or set this field to TX_DEST_DEFAULT for sensible default
+ * behaviour)
+ */
+#define       MC_CMD_FILTER_OP_V3_IN_TX_DEST_OFST 40
+#define       MC_CMD_FILTER_OP_V3_IN_TX_DEST_LEN 4
+/* enum: request default behaviour (based on filter type) */
+#define          MC_CMD_FILTER_OP_V3_IN_TX_DEST_DEFAULT 0xffffffff
+#define        MC_CMD_FILTER_OP_V3_IN_TX_DEST_MAC_OFST 40
+#define        MC_CMD_FILTER_OP_V3_IN_TX_DEST_MAC_LBN 0
+#define        MC_CMD_FILTER_OP_V3_IN_TX_DEST_MAC_WIDTH 1
+#define        MC_CMD_FILTER_OP_V3_IN_TX_DEST_PM_OFST 40
+#define        MC_CMD_FILTER_OP_V3_IN_TX_DEST_PM_LBN 1
+#define        MC_CMD_FILTER_OP_V3_IN_TX_DEST_PM_WIDTH 1
+/* source MAC address to match (as bytes in network order) */
+#define       MC_CMD_FILTER_OP_V3_IN_SRC_MAC_OFST 44
+#define       MC_CMD_FILTER_OP_V3_IN_SRC_MAC_LEN 6
+/* source port to match (as bytes in network order) */
+#define       MC_CMD_FILTER_OP_V3_IN_SRC_PORT_OFST 50
+#define       MC_CMD_FILTER_OP_V3_IN_SRC_PORT_LEN 2
+/* destination MAC address to match (as bytes in network order) */
+#define       MC_CMD_FILTER_OP_V3_IN_DST_MAC_OFST 52
+#define       MC_CMD_FILTER_OP_V3_IN_DST_MAC_LEN 6
+/* destination port to match (as bytes in network order) */
+#define       MC_CMD_FILTER_OP_V3_IN_DST_PORT_OFST 58
+#define       MC_CMD_FILTER_OP_V3_IN_DST_PORT_LEN 2
+/* Ethernet type to match (as bytes in network order) */
+#define       MC_CMD_FILTER_OP_V3_IN_ETHER_TYPE_OFST 60
+#define       MC_CMD_FILTER_OP_V3_IN_ETHER_TYPE_LEN 2
+/* Inner VLAN tag to match (as bytes in network order) */
+#define       MC_CMD_FILTER_OP_V3_IN_INNER_VLAN_OFST 62
+#define       MC_CMD_FILTER_OP_V3_IN_INNER_VLAN_LEN 2
+/* Outer VLAN tag to match (as bytes in network order) */
+#define       MC_CMD_FILTER_OP_V3_IN_OUTER_VLAN_OFST 64
+#define       MC_CMD_FILTER_OP_V3_IN_OUTER_VLAN_LEN 2
+/* IP protocol to match (in low byte; set high byte to 0) */
+#define       MC_CMD_FILTER_OP_V3_IN_IP_PROTO_OFST 66
+#define       MC_CMD_FILTER_OP_V3_IN_IP_PROTO_LEN 2
+/* Firmware defined register 0 to match (reserved; set to 0) */
+#define       MC_CMD_FILTER_OP_V3_IN_FWDEF0_OFST 68
+#define       MC_CMD_FILTER_OP_V3_IN_FWDEF0_LEN 4
+/* VNI (for VXLAN/Geneve, when IP protocol is UDP) or VSID (for NVGRE, when IP
+ * protocol is GRE) to match (as bytes in network order; set last byte to 0 for
+ * VXLAN/NVGRE, or 1 for Geneve)
+ */
+#define       MC_CMD_FILTER_OP_V3_IN_VNI_OR_VSID_OFST 72
+#define       MC_CMD_FILTER_OP_V3_IN_VNI_OR_VSID_LEN 4
+#define        MC_CMD_FILTER_OP_V3_IN_VNI_VALUE_OFST 72
+#define        MC_CMD_FILTER_OP_V3_IN_VNI_VALUE_LBN 0
+#define        MC_CMD_FILTER_OP_V3_IN_VNI_VALUE_WIDTH 24
+#define        MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_OFST 72
+#define        MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_LBN 24
+#define        MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_WIDTH 8
+/* enum: Match VXLAN traffic with this VNI */
+#define          MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_VXLAN 0x0
+/* enum: Match Geneve traffic with this VNI */
+#define          MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_GENEVE 0x1
+/* enum: Reserved for experimental development use */
+#define          MC_CMD_FILTER_OP_V3_IN_VNI_TYPE_EXPERIMENTAL 0xfe
+#define        MC_CMD_FILTER_OP_V3_IN_VSID_VALUE_OFST 72
+#define        MC_CMD_FILTER_OP_V3_IN_VSID_VALUE_LBN 0
+#define        MC_CMD_FILTER_OP_V3_IN_VSID_VALUE_WIDTH 24
+#define        MC_CMD_FILTER_OP_V3_IN_VSID_TYPE_OFST 72
+#define        MC_CMD_FILTER_OP_V3_IN_VSID_TYPE_LBN 24
+#define        MC_CMD_FILTER_OP_V3_IN_VSID_TYPE_WIDTH 8
+/* enum: Match NVGRE traffic with this VSID */
+#define          MC_CMD_FILTER_OP_V3_IN_VSID_TYPE_NVGRE 0x0
+/* source IP address to match (as bytes in network order; set last 12 bytes to
+ * 0 for IPv4 address)
+ */
+#define       MC_CMD_FILTER_OP_V3_IN_SRC_IP_OFST 76
+#define       MC_CMD_FILTER_OP_V3_IN_SRC_IP_LEN 16
+/* destination IP address to match (as bytes in network order; set last 12
+ * bytes to 0 for IPv4 address)
+ */
+#define       MC_CMD_FILTER_OP_V3_IN_DST_IP_OFST 92
+#define       MC_CMD_FILTER_OP_V3_IN_DST_IP_LEN 16
+/* VXLAN/NVGRE inner frame source MAC address to match (as bytes in network
+ * order)
+ */
+#define       MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_MAC_OFST 108
+#define       MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_MAC_LEN 6
+/* VXLAN/NVGRE inner frame source port to match (as bytes in network order) */
+#define       MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_PORT_OFST 114
+#define       MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_PORT_LEN 2
+/* VXLAN/NVGRE inner frame destination MAC address to match (as bytes in
+ * network order)
+ */
+#define       MC_CMD_FILTER_OP_V3_IN_IFRM_DST_MAC_OFST 116
+#define       MC_CMD_FILTER_OP_V3_IN_IFRM_DST_MAC_LEN 6
+/* VXLAN/NVGRE inner frame destination port to match (as bytes in network
+ * order)
+ */
+#define       MC_CMD_FILTER_OP_V3_IN_IFRM_DST_PORT_OFST 122
+#define       MC_CMD_FILTER_OP_V3_IN_IFRM_DST_PORT_LEN 2
+/* VXLAN/NVGRE inner frame Ethernet type to match (as bytes in network order)
+ */
+#define       MC_CMD_FILTER_OP_V3_IN_IFRM_ETHER_TYPE_OFST 124
+#define       MC_CMD_FILTER_OP_V3_IN_IFRM_ETHER_TYPE_LEN 2
+/* VXLAN/NVGRE inner frame Inner VLAN tag to match (as bytes in network order)
+ */
+#define       MC_CMD_FILTER_OP_V3_IN_IFRM_INNER_VLAN_OFST 126
+#define       MC_CMD_FILTER_OP_V3_IN_IFRM_INNER_VLAN_LEN 2
+/* VXLAN/NVGRE inner frame Outer VLAN tag to match (as bytes in network order)
+ */
+#define       MC_CMD_FILTER_OP_V3_IN_IFRM_OUTER_VLAN_OFST 128
+#define       MC_CMD_FILTER_OP_V3_IN_IFRM_OUTER_VLAN_LEN 2
+/* VXLAN/NVGRE inner frame IP protocol to match (in low byte; set high byte to
+ * 0)
+ */
+#define       MC_CMD_FILTER_OP_V3_IN_IFRM_IP_PROTO_OFST 130
+#define       MC_CMD_FILTER_OP_V3_IN_IFRM_IP_PROTO_LEN 2
+/* VXLAN/NVGRE inner frame Firmware defined register 0 to match (reserved; set
+ * to 0)
+ */
+#define       MC_CMD_FILTER_OP_V3_IN_IFRM_FWDEF0_OFST 132
+#define       MC_CMD_FILTER_OP_V3_IN_IFRM_FWDEF0_LEN 4
+/* VXLAN/NVGRE inner frame Firmware defined register 1 to match (reserved; set
+ * to 0)
+ */
+#define       MC_CMD_FILTER_OP_V3_IN_IFRM_FWDEF1_OFST 136
+#define       MC_CMD_FILTER_OP_V3_IN_IFRM_FWDEF1_LEN 4
+/* VXLAN/NVGRE inner frame source IP address to match (as bytes in network
+ * order; set last 12 bytes to 0 for IPv4 address)
+ */
+#define       MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_IP_OFST 140
+#define       MC_CMD_FILTER_OP_V3_IN_IFRM_SRC_IP_LEN 16
+/* VXLAN/NVGRE inner frame destination IP address to match (as bytes in network
+ * order; set last 12 bytes to 0 for IPv4 address)
+ */
+#define       MC_CMD_FILTER_OP_V3_IN_IFRM_DST_IP_OFST 156
+#define       MC_CMD_FILTER_OP_V3_IN_IFRM_DST_IP_LEN 16
+/* Set an action for all packets matching this filter. The DPDK driver and dpdk
+ * f/w variant use their own specific delivery structures, which are documented
+ * in the DPDK Firmware Driver Interface (SF-119419-TC). Requesting anything
+ * other than MATCH_ACTION_NONE when the NIC is running another f/w variant
+ * will cause the filter insertion to fail with ENOTSUP.
+ */
+#define       MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_OFST 172
+#define       MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_LEN 4
+/* enum: do nothing extra */
+#define          MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_NONE 0x0
+/* enum: Set the match flag in the packet prefix for packets matching the
+ * filter (only with dpdk firmware, otherwise fails with ENOTSUP). Used to
+ * support the DPDK rte_flow "FLAG" action.
+ */
+#define          MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_FLAG 0x1
+/* enum: Insert MATCH_MARK_VALUE into the packet prefix for packets matching
+ * the filter (only with dpdk firmware, otherwise fails with ENOTSUP). Used to
+ * support the DPDK rte_flow "MARK" action.
+ */
+#define          MC_CMD_FILTER_OP_V3_IN_MATCH_ACTION_MARK 0x2
+/* the mark value for MATCH_ACTION_MARK. Requesting a value larger than the
+ * maximum (obtained from MC_CMD_GET_CAPABILITIES_V5/FILTER_ACTION_MARK_MAX)
+ * will cause the filter insertion to fail with EINVAL.
+ */
+#define       MC_CMD_FILTER_OP_V3_IN_MATCH_MARK_VALUE_OFST 176
+#define       MC_CMD_FILTER_OP_V3_IN_MATCH_MARK_VALUE_LEN 4
+
+/* MC_CMD_FILTER_OP_OUT msgresponse */
+#define    MC_CMD_FILTER_OP_OUT_LEN 12
+/* identifies the type of operation requested */
+#define       MC_CMD_FILTER_OP_OUT_OP_OFST 0
+#define       MC_CMD_FILTER_OP_OUT_OP_LEN 4
+/*            Enum values, see field(s): */
+/*               MC_CMD_FILTER_OP_IN/OP */
+/* Returned filter handle (for insert / subscribe operations). Note that these
+ * handles should be considered opaque to the host, although a value of
+ * 0xFFFFFFFF_FFFFFFFF is guaranteed never to be a valid handle.
+ */
+#define       MC_CMD_FILTER_OP_OUT_HANDLE_OFST 4
+#define       MC_CMD_FILTER_OP_OUT_HANDLE_LEN 8
+#define       MC_CMD_FILTER_OP_OUT_HANDLE_LO_OFST 4
+#define       MC_CMD_FILTER_OP_OUT_HANDLE_HI_OFST 8
+/* enum: guaranteed invalid filter handle (low 32 bits) */
+#define          MC_CMD_FILTER_OP_OUT_HANDLE_LO_INVALID 0xffffffff
+/* enum: guaranteed invalid filter handle (high 32 bits) */
+#define          MC_CMD_FILTER_OP_OUT_HANDLE_HI_INVALID 0xffffffff
+
+/* MC_CMD_FILTER_OP_EXT_OUT msgresponse */
+#define    MC_CMD_FILTER_OP_EXT_OUT_LEN 12
+/* identifies the type of operation requested */
+#define       MC_CMD_FILTER_OP_EXT_OUT_OP_OFST 0
+#define       MC_CMD_FILTER_OP_EXT_OUT_OP_LEN 4
+/*            Enum values, see field(s): */
+/*               MC_CMD_FILTER_OP_EXT_IN/OP */
+/* Returned filter handle (for insert / subscribe operations). Note that these
+ * handles should be considered opaque to the host, although a value of
+ * 0xFFFFFFFF_FFFFFFFF is guaranteed never to be a valid handle.
+ */
+#define       MC_CMD_FILTER_OP_EXT_OUT_HANDLE_OFST 4
+#define       MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LEN 8
+#define       MC_CMD_FILTER_OP_EXT_OUT_HANDLE_LO_OFST 4
+#define       MC_CMD_FILTER_OP_EXT_OUT_HANDLE_HI_OFST 8
+/*            Enum values, see field(s): */
+/*               MC_CMD_FILTER_OP_OUT/HANDLE */
+
+
+/***********************************/
+/* MC_CMD_GET_PARSER_DISP_INFO
+ * Get information related to the parser-dispatcher subsystem
+ */
+#define MC_CMD_GET_PARSER_DISP_INFO 0xe4
+#undef MC_CMD_0xe4_PRIVILEGE_CTG
+
+#define MC_CMD_0xe4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PARSER_DISP_INFO_IN msgrequest */
+#define    MC_CMD_GET_PARSER_DISP_INFO_IN_LEN 4
+/* identifies the type of operation requested */
+#define       MC_CMD_GET_PARSER_DISP_INFO_IN_OP_OFST 0
+#define       MC_CMD_GET_PARSER_DISP_INFO_IN_OP_LEN 4
+/* enum: read the list of supported RX filter matches */
+#define          MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES 0x1
+/* enum: read flags indicating restrictions on filter insertion for the calling
+ * client
+ */
+#define          MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_RESTRICTIONS 0x2
+/* enum: read properties relating to security rules (Medford-only; for use by
+ * SolarSecure apps, not directly by drivers. See SF-114946-SW.)
+ */
+#define          MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SECURITY_RULE_INFO 0x3
+/* enum: read the list of supported RX filter matches for VXLAN/NVGRE
+ * encapsulated frames, which follow a different match sequence to normal
+ * frames (Medford only)
+ */
+#define          MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_ENCAP_RX_MATCHES 0x4
+/* enum: read the list of supported matches for the encapsulation detection
+ * rules inserted by MC_CMD_VNIC_ENCAP_RULE_ADD. (ef100 and later)
+ */
+#define          MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_VNIC_ENCAP_MATCHES 0x5
+
+/* MC_CMD_GET_PARSER_DISP_INFO_OUT msgresponse */
+#define    MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMIN 8
+#define    MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX 252
+#define    MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX_MCDI2 1020
+#define    MC_CMD_GET_PARSER_DISP_INFO_OUT_LEN(num) (8+4*(num))
+#define    MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_NUM(len) (((len)-8)/4)
+/* identifies the type of operation requested */
+#define       MC_CMD_GET_PARSER_DISP_INFO_OUT_OP_OFST 0
+#define       MC_CMD_GET_PARSER_DISP_INFO_OUT_OP_LEN 4
+/*            Enum values, see field(s): */
+/*               MC_CMD_GET_PARSER_DISP_INFO_IN/OP */
+/* number of supported match types */
+#define       MC_CMD_GET_PARSER_DISP_INFO_OUT_NUM_SUPPORTED_MATCHES_OFST 4
+#define       MC_CMD_GET_PARSER_DISP_INFO_OUT_NUM_SUPPORTED_MATCHES_LEN 4
+/* array of supported match types (valid MATCH_FIELDS values for
+ * MC_CMD_FILTER_OP) sorted in decreasing priority order
+ */
+#define       MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_OFST 8
+#define       MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_LEN 4
+#define       MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MINNUM 0
+#define       MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM 61
+#define       MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM_MCDI2 253
+
+/* MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT msgresponse */
+#define    MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_LEN 8
+/* identifies the type of operation requested */
+#define       MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_OP_OFST 0
+#define       MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_OP_LEN 4
+/*            Enum values, see field(s): */
+/*               MC_CMD_GET_PARSER_DISP_INFO_IN/OP */
+/* bitfield of filter insertion restrictions */
+#define       MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_RESTRICTION_FLAGS_OFST 4
+#define       MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_RESTRICTION_FLAGS_LEN 4
+#define        MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_OFST 4
+#define        MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_LBN 0
+#define        MC_CMD_GET_PARSER_DISP_RESTRICTIONS_OUT_DST_IP_MCAST_ONLY_WIDTH 1
+
+/* MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT msgresponse: This response is
+ * returned if a MC_CMD_GET_PARSER_DISP_INFO_IN request is sent with OP value
+ * OP_GET_SUPPORTED_VNIC_ENCAP_MATCHES. It contains information about the
+ * supported match types that can be used in the encapsulation detection rules
+ * inserted by MC_CMD_VNIC_ENCAP_RULE_ADD.
+ */
+#define    MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_LENMIN 8
+#define    MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_LENMAX 252
+#define    MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_LENMAX_MCDI2 1020
+#define    MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_LEN(num) (8+4*(num))
+#define    MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_SUPPORTED_MATCHES_NUM(len) (((len)-8)/4)
+/* The op code OP_GET_SUPPORTED_VNIC_ENCAP_MATCHES is returned. */
+#define       MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_OP_OFST 0
+#define       MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_OP_LEN 4
+/*            Enum values, see field(s): */
+/*               MC_CMD_GET_PARSER_DISP_INFO_IN/OP */
+/* number of supported match types */
+#define       MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_NUM_SUPPORTED_MATCHES_OFST 4
+#define       MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_NUM_SUPPORTED_MATCHES_LEN 4
+/* array of supported match types (valid MATCH_FLAGS values for
+ * MC_CMD_VNIC_ENCAP_RULE_ADD) sorted in decreasing priority order
+ */
+#define       MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_SUPPORTED_MATCHES_OFST 8
+#define       MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_SUPPORTED_MATCHES_LEN 4
+#define       MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_SUPPORTED_MATCHES_MINNUM 0
+#define       MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_SUPPORTED_MATCHES_MAXNUM 61
+#define       MC_CMD_GET_PARSER_DISP_VNIC_ENCAP_MATCHES_OUT_SUPPORTED_MATCHES_MAXNUM_MCDI2 253
+
+
+/***********************************/
+/* MC_CMD_PARSER_DISP_RW
+ * Direct read/write of parser-dispatcher state (DICPUs and LUE) for debugging.
+ * Please note that this interface is only of use to debug tools which have
+ * knowledge of firmware and hardware data structures; nothing here is intended
+ * for use by normal driver code. Note that although this command is in the
+ * Admin privilege group, in tamperproof adapters, only read operations are
+ * permitted.
+ */
+#define MC_CMD_PARSER_DISP_RW 0xe5
+#undef MC_CMD_0xe5_PRIVILEGE_CTG
+
+#define MC_CMD_0xe5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_PARSER_DISP_RW_IN msgrequest */
+#define    MC_CMD_PARSER_DISP_RW_IN_LEN 32
+/* identifies the target of the operation */
+#define       MC_CMD_PARSER_DISP_RW_IN_TARGET_OFST 0
+#define       MC_CMD_PARSER_DISP_RW_IN_TARGET_LEN 4
+/* enum: RX dispatcher CPU */
+#define          MC_CMD_PARSER_DISP_RW_IN_RX_DICPU 0x0
+/* enum: TX dispatcher CPU */
+#define          MC_CMD_PARSER_DISP_RW_IN_TX_DICPU 0x1
+/* enum: Lookup engine (with original metadata format). Deprecated; used only
+ * by cmdclient as a fallback for very old Huntington firmware, and not
+ * supported in firmware beyond v6.4.0.1005. Use LUE_VERSIONED_METADATA
+ * instead.
+ */
+#define          MC_CMD_PARSER_DISP_RW_IN_LUE 0x2
+/* enum: Lookup engine (with requested metadata format) */
+#define          MC_CMD_PARSER_DISP_RW_IN_LUE_VERSIONED_METADATA 0x3
+/* enum: RX0 dispatcher CPU (alias for RX_DICPU; Medford has 2 RX DICPUs) */
+#define          MC_CMD_PARSER_DISP_RW_IN_RX0_DICPU 0x0
+/* enum: RX1 dispatcher CPU (only valid for Medford) */
+#define          MC_CMD_PARSER_DISP_RW_IN_RX1_DICPU 0x4
+/* enum: Miscellaneous other state (only valid for Medford) */
+#define          MC_CMD_PARSER_DISP_RW_IN_MISC_STATE 0x5
+/* identifies the type of operation requested */
+#define       MC_CMD_PARSER_DISP_RW_IN_OP_OFST 4
+#define       MC_CMD_PARSER_DISP_RW_IN_OP_LEN 4
+/* enum: Read a word of DICPU DMEM or a LUE entry */
+#define          MC_CMD_PARSER_DISP_RW_IN_READ 0x0
+/* enum: Write a word of DICPU DMEM or a LUE entry. Not permitted on
+ * tamperproof adapters.
+ */
+#define          MC_CMD_PARSER_DISP_RW_IN_WRITE 0x1
+/* enum: Read-modify-write a word of DICPU DMEM (not valid for LUE). Not
+ * permitted on tamperproof adapters.
+ */
+#define          MC_CMD_PARSER_DISP_RW_IN_RMW 0x2
+/* data memory address (DICPU targets) or LUE index (LUE targets) */
+#define       MC_CMD_PARSER_DISP_RW_IN_ADDRESS_OFST 8
+#define       MC_CMD_PARSER_DISP_RW_IN_ADDRESS_LEN 4
+/* selector (for MISC_STATE target) */
+#define       MC_CMD_PARSER_DISP_RW_IN_SELECTOR_OFST 8
+#define       MC_CMD_PARSER_DISP_RW_IN_SELECTOR_LEN 4
+/* enum: Port to datapath mapping */
+#define          MC_CMD_PARSER_DISP_RW_IN_PORT_DP_MAPPING 0x1
+/* value to write (for DMEM writes) */
+#define       MC_CMD_PARSER_DISP_RW_IN_DMEM_WRITE_VALUE_OFST 12
+#define       MC_CMD_PARSER_DISP_RW_IN_DMEM_WRITE_VALUE_LEN 4
+/* XOR value (for DMEM read-modify-writes: new = (old & mask) ^ value) */
+#define       MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_XOR_VALUE_OFST 12
+#define       MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_XOR_VALUE_LEN 4
+/* AND mask (for DMEM read-modify-writes: new = (old & mask) ^ value) */
+#define       MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_AND_MASK_OFST 16
+#define       MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_AND_MASK_LEN 4
+/* metadata format (for LUE reads using LUE_VERSIONED_METADATA) */
+#define       MC_CMD_PARSER_DISP_RW_IN_LUE_READ_METADATA_VERSION_OFST 12
+#define       MC_CMD_PARSER_DISP_RW_IN_LUE_READ_METADATA_VERSION_LEN 4
+/* value to write (for LUE writes) */
+#define       MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_OFST 12
+#define       MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_LEN 20
+
+/* MC_CMD_PARSER_DISP_RW_OUT msgresponse */
+#define    MC_CMD_PARSER_DISP_RW_OUT_LEN 52
+/* value read (for DMEM reads) */
+#define       MC_CMD_PARSER_DISP_RW_OUT_DMEM_READ_VALUE_OFST 0
+#define       MC_CMD_PARSER_DISP_RW_OUT_DMEM_READ_VALUE_LEN 4
+/* value read (for LUE reads) */
+#define       MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_OFST 0
+#define       MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_LEN 20
+/* up to 8 32-bit words of additional soft state from the LUE manager (the
+ * exact content is firmware-dependent and intended only for debug use)
+ */
+#define       MC_CMD_PARSER_DISP_RW_OUT_LUE_MGR_STATE_OFST 20
+#define       MC_CMD_PARSER_DISP_RW_OUT_LUE_MGR_STATE_LEN 32
+/* datapath(s) used for each port (for MISC_STATE PORT_DP_MAPPING selector) */
+#define       MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_OFST 0
+#define       MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_LEN 4
+#define       MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_NUM 4
+#define          MC_CMD_PARSER_DISP_RW_OUT_DP0 0x1 /* enum */
+#define          MC_CMD_PARSER_DISP_RW_OUT_DP1 0x2 /* enum */
+
+
+/***********************************/
+/* MC_CMD_GET_PF_COUNT
+ * Get number of PFs on the device.
+ */
+#define MC_CMD_GET_PF_COUNT 0xb6
+#undef MC_CMD_0xb6_PRIVILEGE_CTG
+
+#define MC_CMD_0xb6_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PF_COUNT_IN msgrequest */
+#define    MC_CMD_GET_PF_COUNT_IN_LEN 0
+
+/* MC_CMD_GET_PF_COUNT_OUT msgresponse */
+#define    MC_CMD_GET_PF_COUNT_OUT_LEN 1
+/* Identifies the number of PFs on the device. */
+#define       MC_CMD_GET_PF_COUNT_OUT_PF_COUNT_OFST 0
+#define       MC_CMD_GET_PF_COUNT_OUT_PF_COUNT_LEN 1
+
+
+/***********************************/
+/* MC_CMD_SET_PF_COUNT
+ * Set number of PFs on the device.
+ */
+#define MC_CMD_SET_PF_COUNT 0xb7
+
+/* MC_CMD_SET_PF_COUNT_IN msgrequest */
+#define    MC_CMD_SET_PF_COUNT_IN_LEN 4
+/* New number of PFs on the device. */
+#define       MC_CMD_SET_PF_COUNT_IN_PF_COUNT_OFST 0
+#define       MC_CMD_SET_PF_COUNT_IN_PF_COUNT_LEN 4
+
+/* MC_CMD_SET_PF_COUNT_OUT msgresponse */
+#define    MC_CMD_SET_PF_COUNT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_PORT_ASSIGNMENT
+ * Get port assignment for current PCI function.
+ */
+#define MC_CMD_GET_PORT_ASSIGNMENT 0xb8
+#undef MC_CMD_0xb8_PRIVILEGE_CTG
+
+#define MC_CMD_0xb8_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PORT_ASSIGNMENT_IN msgrequest */
+#define    MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN 0
+
+/* MC_CMD_GET_PORT_ASSIGNMENT_OUT msgresponse */
+#define    MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN 4
+/* Identifies the port assignment for this function. */
+#define       MC_CMD_GET_PORT_ASSIGNMENT_OUT_PORT_OFST 0
+#define       MC_CMD_GET_PORT_ASSIGNMENT_OUT_PORT_LEN 4
+
+
+/***********************************/
+/* MC_CMD_SET_PORT_ASSIGNMENT
+ * Set port assignment for current PCI function.
+ */
+#define MC_CMD_SET_PORT_ASSIGNMENT 0xb9
+#undef MC_CMD_0xb9_PRIVILEGE_CTG
+
+#define MC_CMD_0xb9_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_PORT_ASSIGNMENT_IN msgrequest */
+#define    MC_CMD_SET_PORT_ASSIGNMENT_IN_LEN 4
+/* Identifies the port assignment for this function. */
+#define       MC_CMD_SET_PORT_ASSIGNMENT_IN_PORT_OFST 0
+#define       MC_CMD_SET_PORT_ASSIGNMENT_IN_PORT_LEN 4
+
+/* MC_CMD_SET_PORT_ASSIGNMENT_OUT msgresponse */
+#define    MC_CMD_SET_PORT_ASSIGNMENT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_ALLOC_VIS
+ * Allocate VIs for current PCI function.
+ */
+#define MC_CMD_ALLOC_VIS 0x8b
+#undef MC_CMD_0x8b_PRIVILEGE_CTG
+
+#define MC_CMD_0x8b_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_ALLOC_VIS_IN msgrequest */
+#define    MC_CMD_ALLOC_VIS_IN_LEN 8
+/* The minimum number of VIs that is acceptable */
+#define       MC_CMD_ALLOC_VIS_IN_MIN_VI_COUNT_OFST 0
+#define       MC_CMD_ALLOC_VIS_IN_MIN_VI_COUNT_LEN 4
+/* The maximum number of VIs that would be useful */
+#define       MC_CMD_ALLOC_VIS_IN_MAX_VI_COUNT_OFST 4
+#define       MC_CMD_ALLOC_VIS_IN_MAX_VI_COUNT_LEN 4
+
+/* MC_CMD_ALLOC_VIS_OUT msgresponse: Huntington-compatible VI_ALLOC request.
+ * Use extended version in new code.
+ */
+#define    MC_CMD_ALLOC_VIS_OUT_LEN 8
+/* The number of VIs allocated on this function */
+#define       MC_CMD_ALLOC_VIS_OUT_VI_COUNT_OFST 0
+#define       MC_CMD_ALLOC_VIS_OUT_VI_COUNT_LEN 4
+/* The base absolute VI number allocated to this function. Required to
+ * correctly interpret wakeup events.
+ */
+#define       MC_CMD_ALLOC_VIS_OUT_VI_BASE_OFST 4
+#define       MC_CMD_ALLOC_VIS_OUT_VI_BASE_LEN 4
+
+/* MC_CMD_ALLOC_VIS_EXT_OUT msgresponse */
+#define    MC_CMD_ALLOC_VIS_EXT_OUT_LEN 12
+/* The number of VIs allocated on this function */
+#define       MC_CMD_ALLOC_VIS_EXT_OUT_VI_COUNT_OFST 0
+#define       MC_CMD_ALLOC_VIS_EXT_OUT_VI_COUNT_LEN 4
+/* The base absolute VI number allocated to this function. Required to
+ * correctly interpret wakeup events.
+ */
+#define       MC_CMD_ALLOC_VIS_EXT_OUT_VI_BASE_OFST 4
+#define       MC_CMD_ALLOC_VIS_EXT_OUT_VI_BASE_LEN 4
+/* Function's port vi_shift value (always 0 on Huntington) */
+#define       MC_CMD_ALLOC_VIS_EXT_OUT_VI_SHIFT_OFST 8
+#define       MC_CMD_ALLOC_VIS_EXT_OUT_VI_SHIFT_LEN 4
+
+
+/***********************************/
+/* MC_CMD_FREE_VIS
+ * Free VIs for current PCI function. Any linked PIO buffers will be unlinked,
+ * but not freed.
+ */
+#define MC_CMD_FREE_VIS 0x8c
+#undef MC_CMD_0x8c_PRIVILEGE_CTG
+
+#define MC_CMD_0x8c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_FREE_VIS_IN msgrequest */
+#define    MC_CMD_FREE_VIS_IN_LEN 0
+
+/* MC_CMD_FREE_VIS_OUT msgresponse */
+#define    MC_CMD_FREE_VIS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_SRIOV_CFG
+ * Get SRIOV config for this PF.
+ */
+#define MC_CMD_GET_SRIOV_CFG 0xba
+#undef MC_CMD_0xba_PRIVILEGE_CTG
+
+#define MC_CMD_0xba_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_SRIOV_CFG_IN msgrequest */
+#define    MC_CMD_GET_SRIOV_CFG_IN_LEN 0
+
+/* MC_CMD_GET_SRIOV_CFG_OUT msgresponse */
+#define    MC_CMD_GET_SRIOV_CFG_OUT_LEN 20
+/* Number of VFs currently enabled. */
+#define       MC_CMD_GET_SRIOV_CFG_OUT_VF_CURRENT_OFST 0
+#define       MC_CMD_GET_SRIOV_CFG_OUT_VF_CURRENT_LEN 4
+/* Max number of VFs before sriov stride and offset may need to be changed. */
+#define       MC_CMD_GET_SRIOV_CFG_OUT_VF_MAX_OFST 4
+#define       MC_CMD_GET_SRIOV_CFG_OUT_VF_MAX_LEN 4
+#define       MC_CMD_GET_SRIOV_CFG_OUT_FLAGS_OFST 8
+#define       MC_CMD_GET_SRIOV_CFG_OUT_FLAGS_LEN 4
+#define        MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_OFST 8
+#define        MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_LBN 0
+#define        MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_WIDTH 1
+/* RID offset of first VF from PF. */
+#define       MC_CMD_GET_SRIOV_CFG_OUT_VF_OFFSET_OFST 12
+#define       MC_CMD_GET_SRIOV_CFG_OUT_VF_OFFSET_LEN 4
+/* RID offset of each subsequent VF from the previous. */
+#define       MC_CMD_GET_SRIOV_CFG_OUT_VF_STRIDE_OFST 16
+#define       MC_CMD_GET_SRIOV_CFG_OUT_VF_STRIDE_LEN 4
+
+
+/***********************************/
+/* MC_CMD_SET_SRIOV_CFG
+ * Set SRIOV config for this PF.
+ */
+#define MC_CMD_SET_SRIOV_CFG 0xbb
+#undef MC_CMD_0xbb_PRIVILEGE_CTG
+
+#define MC_CMD_0xbb_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_SRIOV_CFG_IN msgrequest */
+#define    MC_CMD_SET_SRIOV_CFG_IN_LEN 20
+/* Number of VFs currently enabled. */
+#define       MC_CMD_SET_SRIOV_CFG_IN_VF_CURRENT_OFST 0
+#define       MC_CMD_SET_SRIOV_CFG_IN_VF_CURRENT_LEN 4
+/* Max number of VFs before sriov stride and offset may need to be changed. */
+#define       MC_CMD_SET_SRIOV_CFG_IN_VF_MAX_OFST 4
+#define       MC_CMD_SET_SRIOV_CFG_IN_VF_MAX_LEN 4
+#define       MC_CMD_SET_SRIOV_CFG_IN_FLAGS_OFST 8
+#define       MC_CMD_SET_SRIOV_CFG_IN_FLAGS_LEN 4
+#define        MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_OFST 8
+#define        MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_LBN 0
+#define        MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_WIDTH 1
+/* RID offset of first VF from PF, or 0 for no change, or
+ * MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate an offset.
+ */
+#define       MC_CMD_SET_SRIOV_CFG_IN_VF_OFFSET_OFST 12
+#define       MC_CMD_SET_SRIOV_CFG_IN_VF_OFFSET_LEN 4
+/* RID offset of each subsequent VF from the previous, 0 for no change, or
+ * MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate a stride.
+ */
+#define       MC_CMD_SET_SRIOV_CFG_IN_VF_STRIDE_OFST 16
+#define       MC_CMD_SET_SRIOV_CFG_IN_VF_STRIDE_LEN 4
+
+/* MC_CMD_SET_SRIOV_CFG_OUT msgresponse */
+#define    MC_CMD_SET_SRIOV_CFG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_VI_ALLOC_INFO
+ * Get information about number of VI's and base VI number allocated to this
+ * function.
+ */
+#define MC_CMD_GET_VI_ALLOC_INFO 0x8d
+#undef MC_CMD_0x8d_PRIVILEGE_CTG
+
+#define MC_CMD_0x8d_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_VI_ALLOC_INFO_IN msgrequest */
+#define    MC_CMD_GET_VI_ALLOC_INFO_IN_LEN 0
+
+/* MC_CMD_GET_VI_ALLOC_INFO_OUT msgresponse */
+#define    MC_CMD_GET_VI_ALLOC_INFO_OUT_LEN 12
+/* The number of VIs allocated on this function */
+#define       MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_OFST 0
+#define       MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_LEN 4
+/* The base absolute VI number allocated to this function. Required to
+ * correctly interpret wakeup events.
+ */
+#define       MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_OFST 4
+#define       MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_LEN 4
+/* Function's port vi_shift value (always 0 on Huntington) */
+#define       MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_SHIFT_OFST 8
+#define       MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_SHIFT_LEN 4
+
+
+/***********************************/
+/* MC_CMD_DUMP_VI_STATE
+ * For CmdClient use. Dump pertinent information on a specific absolute VI.
+ */
+#define MC_CMD_DUMP_VI_STATE 0x8e
+#undef MC_CMD_0x8e_PRIVILEGE_CTG
+
+#define MC_CMD_0x8e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_DUMP_VI_STATE_IN msgrequest */
+#define    MC_CMD_DUMP_VI_STATE_IN_LEN 4
+/* The VI number to query. */
+#define       MC_CMD_DUMP_VI_STATE_IN_VI_NUMBER_OFST 0
+#define       MC_CMD_DUMP_VI_STATE_IN_VI_NUMBER_LEN 4
+
+/* MC_CMD_DUMP_VI_STATE_OUT msgresponse */
+#define    MC_CMD_DUMP_VI_STATE_OUT_LEN 96
+/* The PF part of the function owning this VI. */
+#define       MC_CMD_DUMP_VI_STATE_OUT_OWNER_PF_OFST 0
+#define       MC_CMD_DUMP_VI_STATE_OUT_OWNER_PF_LEN 2
+/* The VF part of the function owning this VI. */
+#define       MC_CMD_DUMP_VI_STATE_OUT_OWNER_VF_OFST 2
+#define       MC_CMD_DUMP_VI_STATE_OUT_OWNER_VF_LEN 2
+/* Base of VIs allocated to this function. */
+#define       MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_BASE_OFST 4
+#define       MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_BASE_LEN 2
+/* Count of VIs allocated to the owner function. */
+#define       MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_COUNT_OFST 6
+#define       MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_COUNT_LEN 2
+/* Base interrupt vector allocated to this function. */
+#define       MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_BASE_OFST 8
+#define       MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_BASE_LEN 2
+/* Number of interrupt vectors allocated to this function. */
+#define       MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_COUNT_OFST 10
+#define       MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_COUNT_LEN 2
+/* Raw evq ptr table data. */
+#define       MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_OFST 12
+#define       MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LEN 8
+#define       MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LO_OFST 12
+#define       MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_HI_OFST 16
+/* Raw evq timer table data. */
+#define       MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_OFST 20
+#define       MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LEN 8
+#define       MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LO_OFST 20
+#define       MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_OFST 24
+/* Combined metadata field. */
+#define       MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_OFST 28
+#define       MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_LEN 4
+#define        MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_OFST 28
+#define        MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_LBN 0
+#define        MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_WIDTH 16
+#define        MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_OFST 28
+#define        MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_LBN 16
+#define        MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_WIDTH 8
+#define        MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_OFST 28
+#define        MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_LBN 24
+#define        MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_WIDTH 8
+/* TXDPCPU raw table data for queue. */
+#define       MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_OFST 32
+#define       MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LEN 8
+#define       MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LO_OFST 32
+#define       MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_HI_OFST 36
+/* TXDPCPU raw table data for queue. */
+#define       MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_OFST 40
+#define       MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LEN 8
+#define       MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LO_OFST 40
+#define       MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_HI_OFST 44
+/* TXDPCPU raw table data for queue. */
+#define       MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_OFST 48
+#define       MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LEN 8
+#define       MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LO_OFST 48
+#define       MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_HI_OFST 52
+/* Combined metadata field. */
+#define       MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_OFST 56
+#define       MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LEN 8
+#define       MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LO_OFST 56
+#define       MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_HI_OFST 60
+#define        MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_OFST 56
+#define        MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_LBN 0
+#define        MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_WIDTH 16
+#define        MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_OFST 56
+#define        MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_LBN 16
+#define        MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_WIDTH 8
+#define        MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_OFST 56
+#define        MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_LBN 24
+#define        MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_WIDTH 8
+#define        MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_OFST 56
+#define        MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_LBN 32
+#define        MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_WIDTH 8
+#define        MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_OFST 56
+#define        MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_LBN 40
+#define        MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_WIDTH 24
+/* RXDPCPU raw table data for queue. */
+#define       MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_OFST 64
+#define       MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LEN 8
+#define       MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LO_OFST 64
+#define       MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_HI_OFST 68
+/* RXDPCPU raw table data for queue. */
+#define       MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_OFST 72
+#define       MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LEN 8
+#define       MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LO_OFST 72
+#define       MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_HI_OFST 76
+/* Reserved, currently 0. */
+#define       MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_OFST 80
+#define       MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LEN 8
+#define       MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LO_OFST 80
+#define       MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_HI_OFST 84
+/* Combined metadata field. */
+#define       MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_OFST 88
+#define       MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LEN 8
+#define       MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LO_OFST 88
+#define       MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_HI_OFST 92
+#define        MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_OFST 88
+#define        MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_LBN 0
+#define        MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_WIDTH 16
+#define        MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_OFST 88
+#define        MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_LBN 16
+#define        MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_WIDTH 8
+#define        MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_OFST 88
+#define        MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_LBN 24
+#define        MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_WIDTH 8
+#define        MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_OFST 88
+#define        MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_LBN 32
+#define        MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_WIDTH 8
+
+
+/***********************************/
+/* MC_CMD_ALLOC_PIOBUF
+ * Allocate a push I/O buffer for later use with a tx queue.
+ */
+#define MC_CMD_ALLOC_PIOBUF 0x8f
+#undef MC_CMD_0x8f_PRIVILEGE_CTG
+
+#define MC_CMD_0x8f_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_ALLOC_PIOBUF_IN msgrequest */
+#define    MC_CMD_ALLOC_PIOBUF_IN_LEN 0
+
+/* MC_CMD_ALLOC_PIOBUF_OUT msgresponse */
+#define    MC_CMD_ALLOC_PIOBUF_OUT_LEN 4
+/* Handle for allocated push I/O buffer. */
+#define       MC_CMD_ALLOC_PIOBUF_OUT_PIOBUF_HANDLE_OFST 0
+#define       MC_CMD_ALLOC_PIOBUF_OUT_PIOBUF_HANDLE_LEN 4
+
+
+/***********************************/
+/* MC_CMD_FREE_PIOBUF
+ * Free a push I/O buffer.
+ */
+#define MC_CMD_FREE_PIOBUF 0x90
+#undef MC_CMD_0x90_PRIVILEGE_CTG
+
+#define MC_CMD_0x90_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_FREE_PIOBUF_IN msgrequest */
+#define    MC_CMD_FREE_PIOBUF_IN_LEN 4
+/* Handle for allocated push I/O buffer. */
+#define       MC_CMD_FREE_PIOBUF_IN_PIOBUF_HANDLE_OFST 0
+#define       MC_CMD_FREE_PIOBUF_IN_PIOBUF_HANDLE_LEN 4
+
+/* MC_CMD_FREE_PIOBUF_OUT msgresponse */
+#define    MC_CMD_FREE_PIOBUF_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_VI_TLP_PROCESSING
+ * Get TLP steering and ordering information for a VI.
+ */
+#define MC_CMD_GET_VI_TLP_PROCESSING 0xb0
+#undef MC_CMD_0xb0_PRIVILEGE_CTG
+
+#define MC_CMD_0xb0_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_VI_TLP_PROCESSING_IN msgrequest */
+#define    MC_CMD_GET_VI_TLP_PROCESSING_IN_LEN 4
+/* VI number to get information for. */
+#define       MC_CMD_GET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0
+#define       MC_CMD_GET_VI_TLP_PROCESSING_IN_INSTANCE_LEN 4
+
+/* MC_CMD_GET_VI_TLP_PROCESSING_OUT msgresponse */
+#define    MC_CMD_GET_VI_TLP_PROCESSING_OUT_LEN 4
+/* Transaction processing steering hint 1 for use with the Rx Queue. */
+#define       MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG1_RX_OFST 0
+#define       MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG1_RX_LEN 1
+/* Transaction processing steering hint 2 for use with the Ev Queue. */
+#define       MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG2_EV_OFST 1
+#define       MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG2_EV_LEN 1
+/* Use Relaxed ordering model for TLPs on this VI. */
+#define       MC_CMD_GET_VI_TLP_PROCESSING_OUT_RELAXED_ORDERING_LBN 16
+#define       MC_CMD_GET_VI_TLP_PROCESSING_OUT_RELAXED_ORDERING_WIDTH 1
+/* Use ID based ordering for TLPs on this VI. */
+#define       MC_CMD_GET_VI_TLP_PROCESSING_OUT_ID_BASED_ORDERING_LBN 17
+#define       MC_CMD_GET_VI_TLP_PROCESSING_OUT_ID_BASED_ORDERING_WIDTH 1
+/* Set no snoop bit for TLPs on this VI. */
+#define       MC_CMD_GET_VI_TLP_PROCESSING_OUT_NO_SNOOP_LBN 18
+#define       MC_CMD_GET_VI_TLP_PROCESSING_OUT_NO_SNOOP_WIDTH 1
+/* Enable TPH for TLPs on this VI. */
+#define       MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_LBN 19
+#define       MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_WIDTH 1
+#define       MC_CMD_GET_VI_TLP_PROCESSING_OUT_DATA_OFST 0
+#define       MC_CMD_GET_VI_TLP_PROCESSING_OUT_DATA_LEN 4
+
+
+/***********************************/
+/* MC_CMD_SET_VI_TLP_PROCESSING
+ * Set TLP steering and ordering information for a VI.
+ */
+#define MC_CMD_SET_VI_TLP_PROCESSING 0xb1
+#undef MC_CMD_0xb1_PRIVILEGE_CTG
+
+#define MC_CMD_0xb1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_SET_VI_TLP_PROCESSING_IN msgrequest */
+#define    MC_CMD_SET_VI_TLP_PROCESSING_IN_LEN 8
+/* VI number to set information for. */
+#define       MC_CMD_SET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0
+#define       MC_CMD_SET_VI_TLP_PROCESSING_IN_INSTANCE_LEN 4
+/* Transaction processing steering hint 1 for use with the Rx Queue. */
+#define       MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_OFST 4
+#define       MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_LEN 1
+/* Transaction processing steering hint 2 for use with the Ev Queue. */
+#define       MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG2_EV_OFST 5
+#define       MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG2_EV_LEN 1
+/* Use Relaxed ordering model for TLPs on this VI. */
+#define       MC_CMD_SET_VI_TLP_PROCESSING_IN_RELAXED_ORDERING_LBN 48
+#define       MC_CMD_SET_VI_TLP_PROCESSING_IN_RELAXED_ORDERING_WIDTH 1
+/* Use ID based ordering for TLPs on this VI. */
+#define       MC_CMD_SET_VI_TLP_PROCESSING_IN_ID_BASED_ORDERING_LBN 49
+#define       MC_CMD_SET_VI_TLP_PROCESSING_IN_ID_BASED_ORDERING_WIDTH 1
+/* Set the no snoop bit for TLPs on this VI. */
+#define       MC_CMD_SET_VI_TLP_PROCESSING_IN_NO_SNOOP_LBN 50
+#define       MC_CMD_SET_VI_TLP_PROCESSING_IN_NO_SNOOP_WIDTH 1
+/* Enable TPH for TLPs on this VI. */
+#define       MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_LBN 51
+#define       MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_WIDTH 1
+#define       MC_CMD_SET_VI_TLP_PROCESSING_IN_DATA_OFST 4
+#define       MC_CMD_SET_VI_TLP_PROCESSING_IN_DATA_LEN 4
+
+/* MC_CMD_SET_VI_TLP_PROCESSING_OUT msgresponse */
+#define    MC_CMD_SET_VI_TLP_PROCESSING_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_TLP_PROCESSING_GLOBALS
+ * Get global PCIe steering and transaction processing configuration.
+ */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS 0xbc
+#undef MC_CMD_0xbc_PRIVILEGE_CTG
+
+#define MC_CMD_0xbc_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN msgrequest */
+#define    MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_LEN 4
+#define       MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0
+#define       MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_LEN 4
+/* enum: MISC. */
+#define          MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_MISC 0x0
+/* enum: IDO. */
+#define          MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_IDO 0x1
+/* enum: RO. */
+#define          MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_RO 0x2
+/* enum: TPH Type. */
+#define          MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_TPH_TYPE 0x3
+
+/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT msgresponse */
+#define    MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_LEN 8
+#define       MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_GLOBAL_CATEGORY_OFST 0
+#define       MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_GLOBAL_CATEGORY_LEN 4
+/*            Enum values, see field(s): */
+/*               MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */
+/* Amalgamated TLP info word. */
+#define       MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_WORD_OFST 4
+#define       MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_WORD_LEN 4
+#define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_OFST 4
+#define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_LBN 0
+#define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_WIDTH 1
+#define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_OFST 4
+#define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_LBN 1
+#define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_WIDTH 31
+#define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_DL_EN_OFST 4
+#define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_DL_EN_LBN 0
+#define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_DL_EN_WIDTH 1
+#define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_TX_EN_OFST 4
+#define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_TX_EN_LBN 1
+#define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_TX_EN_WIDTH 1
+#define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_EV_EN_OFST 4
+#define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_EV_EN_LBN 2
+#define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_EV_EN_WIDTH 1
+#define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_RX_EN_OFST 4
+#define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_RX_EN_LBN 3
+#define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_RX_EN_WIDTH 1
+#define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_SPARE_OFST 4
+#define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_SPARE_LBN 4
+#define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_SPARE_WIDTH 28
+#define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_RXDMA_EN_OFST 4
+#define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_RXDMA_EN_LBN 0
+#define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_RXDMA_EN_WIDTH 1
+#define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_TXDMA_EN_OFST 4
+#define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_TXDMA_EN_LBN 1
+#define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_TXDMA_EN_WIDTH 1
+#define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_DL_EN_OFST 4
+#define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_DL_EN_LBN 2
+#define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_DL_EN_WIDTH 1
+#define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_SPARE_OFST 4
+#define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_SPARE_LBN 3
+#define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_SPARE_WIDTH 29
+#define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_MSIX_OFST 4
+#define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_MSIX_LBN 0
+#define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_MSIX_WIDTH 2
+#define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_DL_OFST 4
+#define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_DL_LBN 2
+#define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_DL_WIDTH 2
+#define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_TX_OFST 4
+#define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_TX_LBN 4
+#define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_TX_WIDTH 2
+#define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_EV_OFST 4
+#define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_EV_LBN 6
+#define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_EV_WIDTH 2
+#define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_RX_OFST 4
+#define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_RX_LBN 8
+#define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_RX_WIDTH 2
+#define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TLP_TYPE_SPARE_OFST 4
+#define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TLP_TYPE_SPARE_LBN 9
+#define        MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TLP_TYPE_SPARE_WIDTH 23
+
+
+/***********************************/
+/* MC_CMD_SET_TLP_PROCESSING_GLOBALS
+ * Set global PCIe steering and transaction processing configuration.
+ */
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS 0xbd
+#undef MC_CMD_0xbd_PRIVILEGE_CTG
+
+#define MC_CMD_0xbd_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN msgrequest */
+#define    MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_LEN 8
+#define       MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0
+#define       MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_LEN 4
+/*            Enum values, see field(s): */
+/*               MC_CMD_GET_TLP_PROCESSING_GLOBALS/MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */
+/* Amalgamated TLP info word. */
+#define       MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_WORD_OFST 4
+#define       MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_WORD_LEN 4
+#define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_OFST 4
+#define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_LBN 0
+#define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_WIDTH 1
+#define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_OFST 4
+#define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_LBN 0
+#define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_WIDTH 1
+#define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_TX_EN_OFST 4
+#define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_TX_EN_LBN 1
+#define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_TX_EN_WIDTH 1
+#define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_EV_EN_OFST 4
+#define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_EV_EN_LBN 2
+#define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_EV_EN_WIDTH 1
+#define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_RX_EN_OFST 4
+#define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_RX_EN_LBN 3
+#define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_RX_EN_WIDTH 1
+#define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_RXDMA_EN_OFST 4
+#define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_RXDMA_EN_LBN 0
+#define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_RXDMA_EN_WIDTH 1
+#define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_TXDMA_EN_OFST 4
+#define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_TXDMA_EN_LBN 1
+#define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_TXDMA_EN_WIDTH 1
+#define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_DL_EN_OFST 4
+#define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_DL_EN_LBN 2
+#define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_DL_EN_WIDTH 1
+#define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_MSIX_OFST 4
+#define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_MSIX_LBN 0
+#define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_MSIX_WIDTH 2
+#define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_DL_OFST 4
+#define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_DL_LBN 2
+#define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_DL_WIDTH 2
+#define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_TX_OFST 4
+#define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_TX_LBN 4
+#define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_TX_WIDTH 2
+#define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_EV_OFST 4
+#define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_EV_LBN 6
+#define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_EV_WIDTH 2
+#define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_RX_OFST 4
+#define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_RX_LBN 8
+#define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_RX_WIDTH 2
+#define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_SPARE_OFST 4
+#define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_SPARE_LBN 10
+#define        MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_SPARE_WIDTH 22
+
+/* MC_CMD_SET_TLP_PROCESSING_GLOBALS_OUT msgresponse */
+#define    MC_CMD_SET_TLP_PROCESSING_GLOBALS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SATELLITE_DOWNLOAD
+ * Download a new set of images to the satellite CPUs from the host.
+ */
+#define MC_CMD_SATELLITE_DOWNLOAD 0x91
+#undef MC_CMD_0x91_PRIVILEGE_CTG
+
+#define MC_CMD_0x91_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SATELLITE_DOWNLOAD_IN msgrequest: The reset requirements for the CPUs
+ * are subtle, and so downloads must proceed in a number of phases.
+ *
+ * 1) PHASE_RESET with a target of TARGET_ALL and chunk ID/length of 0.
+ *
+ * 2) PHASE_IMEMS for each of the IMEM targets (target IDs 0-11). Each download
+ * may consist of multiple chunks. The final chunk (with CHUNK_ID_LAST) should
+ * be a checksum (a simple 32-bit sum) of the transferred data. An individual
+ * download may be aborted using CHUNK_ID_ABORT.
+ *
+ * 3) PHASE_VECTORS for each of the vector table targets (target IDs 12-15),
+ * similar to PHASE_IMEMS.
+ *
+ * 4) PHASE_READY with a target of TARGET_ALL and chunk ID/length of 0.
+ *
+ * After any error (a requested abort is not considered to be an error) the
+ * sequence must be restarted from PHASE_RESET.
+ */
+#define    MC_CMD_SATELLITE_DOWNLOAD_IN_LENMIN 20
+#define    MC_CMD_SATELLITE_DOWNLOAD_IN_LENMAX 252
+#define    MC_CMD_SATELLITE_DOWNLOAD_IN_LENMAX_MCDI2 1020
+#define    MC_CMD_SATELLITE_DOWNLOAD_IN_LEN(num) (16+4*(num))
+#define    MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_NUM(len) (((len)-16)/4)
+/* Download phase. (Note: the IDLE phase is used internally and is never valid
+ * in a command from the host.)
+ */
+#define       MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_OFST 0
+#define       MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_LEN 4
+#define          MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IDLE 0x0 /* enum */
+#define          MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_RESET 0x1 /* enum */
+#define          MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IMEMS 0x2 /* enum */
+#define          MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_VECTORS 0x3 /* enum */
+#define          MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_READY 0x4 /* enum */
+/* Target for download. (These match the blob numbers defined in
+ * mc_flash_layout.h.)
+ */
+#define       MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_OFST 4
+#define       MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_LEN 4
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define          MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_TEXT 0x0
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define          MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_TEXT 0x1
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define          MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDP_TEXT 0x2
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define          MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDP_TEXT 0x3
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define          MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_LUT 0x4
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define          MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_LUT_CFG 0x5
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define          MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_LUT 0x6
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define          MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_LUT_CFG 0x7
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define          MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_PGM 0x8
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define          MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_SL_PGM 0x9
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define          MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_PGM 0xa
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define          MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_SL_PGM 0xb
+/* enum: Valid in phase 3 (PHASE_VECTORS) only */
+#define          MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_VTBL0 0xc
+/* enum: Valid in phase 3 (PHASE_VECTORS) only */
+#define          MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_VTBL0 0xd
+/* enum: Valid in phase 3 (PHASE_VECTORS) only */
+#define          MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_VTBL1 0xe
+/* enum: Valid in phase 3 (PHASE_VECTORS) only */
+#define          MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_VTBL1 0xf
+/* enum: Valid in phases 1 (PHASE_RESET) and 4 (PHASE_READY) only */
+#define          MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_ALL 0xffffffff
+/* Chunk ID, or CHUNK_ID_LAST or CHUNK_ID_ABORT */
+#define       MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_OFST 8
+#define       MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_LEN 4
+/* enum: Last chunk, containing checksum rather than data */
+#define          MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_LAST 0xffffffff
+/* enum: Abort download of this item */
+#define          MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_ABORT 0xfffffffe
+/* Length of this chunk in bytes */
+#define       MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_LEN_OFST 12
+#define       MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_LEN_LEN 4
+/* Data for this chunk */
+#define       MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_OFST 16
+#define       MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_LEN 4
+#define       MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_MINNUM 1
+#define       MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_MAXNUM 59
+#define       MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_MAXNUM_MCDI2 251
+
+/* MC_CMD_SATELLITE_DOWNLOAD_OUT msgresponse */
+#define    MC_CMD_SATELLITE_DOWNLOAD_OUT_LEN 8
+/* Same as MC_CMD_ERR field, but included as 0 in success cases */
+#define       MC_CMD_SATELLITE_DOWNLOAD_OUT_RESULT_OFST 0
+#define       MC_CMD_SATELLITE_DOWNLOAD_OUT_RESULT_LEN 4
+/* Extra status information */
+#define       MC_CMD_SATELLITE_DOWNLOAD_OUT_INFO_OFST 4
+#define       MC_CMD_SATELLITE_DOWNLOAD_OUT_INFO_LEN 4
+/* enum: Code download OK, completed. */
+#define          MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_COMPLETE 0x0
+/* enum: Code download aborted as requested. */
+#define          MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_ABORTED 0x1
+/* enum: Code download OK so far, send next chunk. */
+#define          MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_NEXT_CHUNK 0x2
+/* enum: Download phases out of sequence */
+#define          MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_PHASE 0x100
+/* enum: Bad target for this phase */
+#define          MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_TARGET 0x101
+/* enum: Chunk ID out of sequence */
+#define          MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHUNK_ID 0x200
+/* enum: Chunk length zero or too large */
+#define          MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHUNK_LEN 0x201
+/* enum: Checksum was incorrect */
+#define          MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHECKSUM 0x300
+
+
+/***********************************/
+/* MC_CMD_GET_CAPABILITIES
+ * Get device capabilities.
+ *
+ * This is supplementary to the MC_CMD_GET_BOARD_CFG command, and intended to
+ * reference inherent device capabilities as opposed to current NVRAM config.
+ */
+#define MC_CMD_GET_CAPABILITIES 0xbe
+#undef MC_CMD_0xbe_PRIVILEGE_CTG
+
+#define MC_CMD_0xbe_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_CAPABILITIES_IN msgrequest */
+#define    MC_CMD_GET_CAPABILITIES_IN_LEN 0
+
+/* MC_CMD_GET_CAPABILITIES_OUT msgresponse */
+#define    MC_CMD_GET_CAPABILITIES_OUT_LEN 20
+/* First word of flags. */
+#define       MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_OFST 0
+#define       MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_LEN 4
+#define        MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_LBN 3
+#define        MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_TX_STRIPING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_OUT_TX_STRIPING_LBN 4
+#define        MC_CMD_GET_CAPABILITIES_OUT_TX_STRIPING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_LBN 5
+#define        MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define        MC_CMD_GET_CAPABILITIES_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_DRV_ATTACH_PREBOOT_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define        MC_CMD_GET_CAPABILITIES_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_FORCE_EVENT_MERGING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_SET_MAC_ENHANCED_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_OUT_SET_MAC_ENHANCED_LBN 9
+#define        MC_CMD_GET_CAPABILITIES_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define        MC_CMD_GET_CAPABILITIES_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define        MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define        MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_QBB_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_OUT_QBB_LBN 14
+#define        MC_CMD_GET_CAPABILITIES_OUT_QBB_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_LBN 16
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_LBN 17
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_PACKED_STREAM_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_LBN 18
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_LBN 19
+#define        MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_VLAN_STRIPPING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_VLAN_STRIPPING_LBN 20
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN 21
+#define        MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_0_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_0_LBN 22
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN 23
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_TIMESTAMP_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_TIMESTAMP_LBN 24
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_TIMESTAMP_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN 25
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define        MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define        MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_DISABLE_SCATTER_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_DISABLE_SCATTER_LBN 28
+#define        MC_CMD_GET_CAPABILITIES_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define        MC_CMD_GET_CAPABILITIES_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_EVB_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN 30
+#define        MC_CMD_GET_CAPABILITIES_OUT_EVB_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN 31
+#define        MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define       MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_OFST 4
+#define       MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXDP_RULES_ENGINE 0x5
+/* enum: DPDK RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXDP_DPDK 0x6
+/* enum: BIST RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_SLOW 0x10c
+/* TxDPCPU firmware id. */
+#define       MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_OFST 6
+#define       MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_OUT_TXDP_RULES_ENGINE 0x5
+/* enum: DPDK TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_OUT_TXDP_DPDK 0x6
+/* enum: BIST TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define          MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define          MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define          MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_CSR 0x103
+#define       MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_OFST 8
+#define       MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_LEN 2
+#define        MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_OFST 8
+#define        MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define        MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define        MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_OFST 8
+#define        MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define       MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_OFST 10
+#define       MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_LEN 2
+#define        MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_OFST 10
+#define        MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define        MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define        MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_OFST 10
+#define        MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured TX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK TX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define          MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define       MC_CMD_GET_CAPABILITIES_OUT_HW_CAPABILITIES_OFST 12
+#define       MC_CMD_GET_CAPABILITIES_OUT_HW_CAPABILITIES_LEN 4
+/* Licensed capabilities */
+#define       MC_CMD_GET_CAPABILITIES_OUT_LICENSE_CAPABILITIES_OFST 16
+#define       MC_CMD_GET_CAPABILITIES_OUT_LICENSE_CAPABILITIES_LEN 4
+
+/* MC_CMD_GET_CAPABILITIES_V2_IN msgrequest */
+#define    MC_CMD_GET_CAPABILITIES_V2_IN_LEN 0
+
+/* MC_CMD_GET_CAPABILITIES_V2_OUT msgresponse */
+#define    MC_CMD_GET_CAPABILITIES_V2_OUT_LEN 72
+/* First word of flags. */
+#define       MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS1_OFST 0
+#define       MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS1_LEN 4
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_LBN 3
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_STRIPING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_STRIPING_LBN 4
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_STRIPING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_QUERY_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_QUERY_LBN 5
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_QUERY_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_DRV_ATTACH_PREBOOT_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_FORCE_EVENT_MERGING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_SET_MAC_ENHANCED_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_SET_MAC_ENHANCED_LBN 9
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_SECURITY_FILTERING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_ADDITIONAL_RSS_MODES_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_QBB_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_QBB_LBN 14
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_QBB_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_RSS_LIMITED_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_RSS_LIMITED_LBN 16
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_RSS_LIMITED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_LBN 17
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_INCLUDE_FCS_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_INCLUDE_FCS_LBN 18
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VLAN_INSERTION_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VLAN_INSERTION_LBN 19
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_VLAN_STRIPPING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_VLAN_STRIPPING_LBN 20
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_LBN 21
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_0_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_0_LBN 22
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_14_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_14_LBN 23
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_TIMESTAMP_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_TIMESTAMP_LBN 24
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_TIMESTAMP_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_BATCHING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_BATCHING_LBN 25
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_BATCHING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_MCAST_FILTER_CHAINING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_PM_AND_RXDP_COUNTERS_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DISABLE_SCATTER_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DISABLE_SCATTER_LBN 28
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_LBN 30
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_VXLAN_NVGRE_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_VXLAN_NVGRE_LBN 31
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define       MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DPCPU_FW_ID_OFST 4
+#define       MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_RULES_ENGINE 0x5
+/* enum: DPDK RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_DPDK 0x6
+/* enum: BIST RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_SLOW 0x10c
+/* TxDPCPU firmware id. */
+#define       MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DPCPU_FW_ID_OFST 6
+#define       MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_RULES_ENGINE 0x5
+/* enum: DPDK TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_DPDK 0x6
+/* enum: BIST TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_CSR 0x103
+#define       MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_OFST 8
+#define       MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_LEN 2
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_REV_OFST 8
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_TYPE_OFST 8
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define       MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_OFST 10
+#define       MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_LEN 2
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_REV_OFST 10
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_TYPE_OFST 10
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured TX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK TX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define       MC_CMD_GET_CAPABILITIES_V2_OUT_HW_CAPABILITIES_OFST 12
+#define       MC_CMD_GET_CAPABILITIES_V2_OUT_HW_CAPABILITIES_LEN 4
+/* Licensed capabilities */
+#define       MC_CMD_GET_CAPABILITIES_V2_OUT_LICENSE_CAPABILITIES_OFST 16
+#define       MC_CMD_GET_CAPABILITIES_V2_OUT_LICENSE_CAPABILITIES_LEN 4
+/* Second word of flags. Not present on older firmware (check the length). */
+#define       MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS2_OFST 20
+#define       MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS2_LEN 4
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN 0
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_ENCAP_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_ENCAP_LBN 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_EVQ_TIMER_CTRL_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_EVQ_TIMER_CTRL_LBN 2
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_EVENT_CUT_THROUGH_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_EVENT_CUT_THROUGH_LBN 3
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_CUT_THROUGH_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_CUT_THROUGH_LBN 4
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_CUT_THROUGH_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VFIFO_ULL_MODE_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VFIFO_ULL_MODE_LBN 5
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_LBN 7
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_EVQ_V2_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_TIMESTAMPING_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_TIMESTAMPING_LBN 8
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TIMESTAMP_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TIMESTAMP_LBN 9
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TIMESTAMP_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_SNIFF_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_SNIFF_LBN 10
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_RX_SNIFF_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_SNIFF_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_SNIFF_LBN 11
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_SNIFF_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_BACKGROUND_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_BACKGROUND_LBN 13
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_BACKGROUND_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_DB_RETURN_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_DB_RETURN_LBN 14
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_MCDI_DB_RETURN_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_CTPIO_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_CTPIO_LBN 15
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_CTPIO_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_SUPPORT_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_SUPPORT_LBN 16
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_SUPPORT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_BOUND_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_BOUND_LBN 17
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TSA_BOUND_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_FLAG_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_FLAG_LBN 19
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_FLAG_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_MARK_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_MARK_LBN 20
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_FILTER_ACTION_MARK_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_L3XUDP_SUPPORT_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_L3XUDP_SUPPORT_LBN 22
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_L3XUDP_SUPPORT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_VI_SPREADING_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_VI_SPREADING_LBN 24
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_VI_SPREADING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_HLB_IDLE_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_HLB_IDLE_LBN 25
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_HLB_IDLE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_RXQ_NO_CONT_EV_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_RXQ_NO_CONT_EV_LBN 26
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_BUNDLE_UPDATE_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_BUNDLE_UPDATE_LBN 28
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_BUNDLE_UPDATE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V3_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V3_LBN 29
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V3_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_DYNAMIC_SENSORS_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_DYNAMIC_SENSORS_LBN 30
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_DYNAMIC_SENSORS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31
+#define        MC_CMD_GET_CAPABILITIES_V2_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC (when
+ * TX_TSO_V2 == 1). Not present on older firmware (check the length).
+ */
+#define       MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
+#define       MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define       MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define       MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_ACCESS_NOT_PERMITTED 0xff
+/* enum: PF does not exist. */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_PRESENT 0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_ASSIGNED 0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define          MC_CMD_GET_CAPABILITIES_V2_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_OFST 42
+#define       MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_LEN 1
+#define       MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/*               MC_CMD_GET_CAPABILITIES_V2_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* enum: PF does not exist. */
+/*               MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_PRESENT 0xfe */
+/* Number of VIs available for each external port */
+#define       MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_OFST 58
+#define       MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_LEN 2
+#define       MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define       MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define       MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define       MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define       MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define       MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_PIO_BUFFS_OFST 68
+#define       MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define       MC_CMD_GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF_OFST 70
+#define       MC_CMD_GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF_LEN 2
+
+/* MC_CMD_GET_CAPABILITIES_V3_OUT msgresponse */
+#define    MC_CMD_GET_CAPABILITIES_V3_OUT_LEN 76
+/* First word of flags. */
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS1_OFST 0
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS1_LEN 4
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_LBN 3
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_STRIPING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_STRIPING_LBN 4
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_STRIPING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_QUERY_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_QUERY_LBN 5
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_QUERY_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_DRV_ATTACH_PREBOOT_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_FORCE_EVENT_MERGING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_SET_MAC_ENHANCED_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_SET_MAC_ENHANCED_LBN 9
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_SECURITY_FILTERING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_ADDITIONAL_RSS_MODES_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_QBB_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_QBB_LBN 14
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_QBB_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_RSS_LIMITED_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_RSS_LIMITED_LBN 16
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_RSS_LIMITED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_LBN 17
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PACKED_STREAM_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_INCLUDE_FCS_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_INCLUDE_FCS_LBN 18
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VLAN_INSERTION_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VLAN_INSERTION_LBN 19
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_VLAN_STRIPPING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_VLAN_STRIPPING_LBN 20
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_LBN 21
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_0_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_0_LBN 22
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_14_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_14_LBN 23
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_TIMESTAMP_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_TIMESTAMP_LBN 24
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_TIMESTAMP_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_BATCHING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_BATCHING_LBN 25
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_BATCHING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_MCAST_FILTER_CHAINING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_PM_AND_RXDP_COUNTERS_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DISABLE_SCATTER_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DISABLE_SCATTER_LBN 28
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_LBN 30
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_EVB_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_VXLAN_NVGRE_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_VXLAN_NVGRE_LBN 31
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DPCPU_FW_ID_OFST 4
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_RULES_ENGINE 0x5
+/* enum: DPDK RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_DPDK 0x6
+/* enum: BIST RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_TEST_FW_SLOW 0x10c
+/* TxDPCPU firmware id. */
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DPCPU_FW_ID_OFST 6
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_RULES_ENGINE 0x5
+/* enum: DPDK TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_DPDK 0x6
+/* enum: BIST TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_TXDP_TEST_FW_CSR 0x103
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_OFST 8
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_LEN 2
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_REV_OFST 8
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_TYPE_OFST 8
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_OFST 10
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_LEN 2
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_REV_OFST 10
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_TYPE_OFST 10
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured TX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK TX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_HW_CAPABILITIES_OFST 12
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_HW_CAPABILITIES_LEN 4
+/* Licensed capabilities */
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_LICENSE_CAPABILITIES_OFST 16
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_LICENSE_CAPABILITIES_LEN 4
+/* Second word of flags. Not present on older firmware (check the length). */
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS2_OFST 20
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_FLAGS2_LEN 4
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_LBN 0
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_ENCAP_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_ENCAP_LBN 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_EVQ_TIMER_CTRL_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_EVQ_TIMER_CTRL_LBN 2
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_EVENT_CUT_THROUGH_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_EVENT_CUT_THROUGH_LBN 3
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_CUT_THROUGH_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_CUT_THROUGH_LBN 4
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_CUT_THROUGH_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VFIFO_ULL_MODE_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VFIFO_ULL_MODE_LBN 5
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_V2_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_V2_LBN 7
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_EVQ_V2_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_TIMESTAMPING_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_TIMESTAMPING_LBN 8
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TIMESTAMP_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TIMESTAMP_LBN 9
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TIMESTAMP_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_SNIFF_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_SNIFF_LBN 10
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_RX_SNIFF_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_SNIFF_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_SNIFF_LBN 11
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_SNIFF_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_BACKGROUND_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_BACKGROUND_LBN 13
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_BACKGROUND_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_DB_RETURN_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_DB_RETURN_LBN 14
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_MCDI_DB_RETURN_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_CTPIO_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_CTPIO_LBN 15
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_CTPIO_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_SUPPORT_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_SUPPORT_LBN 16
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_SUPPORT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_BOUND_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_BOUND_LBN 17
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TSA_BOUND_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_FLAG_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_FLAG_LBN 19
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_FLAG_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_MARK_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_MARK_LBN 20
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_FILTER_ACTION_MARK_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_L3XUDP_SUPPORT_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_L3XUDP_SUPPORT_LBN 22
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_L3XUDP_SUPPORT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_VI_SPREADING_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_VI_SPREADING_LBN 24
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_VI_SPREADING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_HLB_IDLE_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_HLB_IDLE_LBN 25
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_RXDP_HLB_IDLE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_RXQ_NO_CONT_EV_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_RXQ_NO_CONT_EV_LBN 26
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_BUNDLE_UPDATE_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_BUNDLE_UPDATE_LBN 28
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_BUNDLE_UPDATE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V3_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V3_LBN 29
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V3_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_DYNAMIC_SENSORS_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_DYNAMIC_SENSORS_LBN 30
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_DYNAMIC_SENSORS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31
+#define        MC_CMD_GET_CAPABILITIES_V3_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC (when
+ * TX_TSO_V2 == 1). Not present on older firmware (check the length).
+ */
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_ACCESS_NOT_PERMITTED 0xff
+/* enum: PF does not exist. */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_PRESENT 0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_ASSIGNED 0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VFS_PER_PF_OFST 42
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VFS_PER_PF_LEN 1
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/*               MC_CMD_GET_CAPABILITIES_V3_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* enum: PF does not exist. */
+/*               MC_CMD_GET_CAPABILITIES_V3_OUT_PF_NOT_PRESENT 0xfe */
+/* Number of VIs available for each external port */
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_OFST 58
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_LEN 2
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_PIO_BUFFS_OFST 68
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_SIZE_PIO_BUFF_OFST 70
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_SIZE_PIO_BUFF_LEN 2
+/* On chips later than Medford the amount of address space assigned to each VI
+ * is configurable. This is a global setting that the driver must query to
+ * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available
+ * with 8k VI windows.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_OFST 72
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_LEN 1
+/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k.
+ * CTPIO is not mapped.
+ */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K 0x0
+/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K 0x1
+/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define          MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K 0x2
+/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1
+/* Number of buffers per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74
+#define       MC_CMD_GET_CAPABILITIES_V3_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2
+
+/* MC_CMD_GET_CAPABILITIES_V4_OUT msgresponse */
+#define    MC_CMD_GET_CAPABILITIES_V4_OUT_LEN 78
+/* First word of flags. */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS1_OFST 0
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS1_LEN 4
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_VPORT_RECONFIGURE_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_VPORT_RECONFIGURE_LBN 3
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_STRIPING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_STRIPING_LBN 4
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_STRIPING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_QUERY_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_QUERY_LBN 5
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_QUERY_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_DRV_ATTACH_PREBOOT_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_FORCE_EVENT_MERGING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_SET_MAC_ENHANCED_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_SET_MAC_ENHANCED_LBN 9
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_SECURITY_FILTERING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_ADDITIONAL_RSS_MODES_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_QBB_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_QBB_LBN 14
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_QBB_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_RSS_LIMITED_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_RSS_LIMITED_LBN 16
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_RSS_LIMITED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_LBN 17
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PACKED_STREAM_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_INCLUDE_FCS_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_INCLUDE_FCS_LBN 18
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VLAN_INSERTION_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VLAN_INSERTION_LBN 19
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_VLAN_STRIPPING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_VLAN_STRIPPING_LBN 20
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_LBN 21
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_0_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_0_LBN 22
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_14_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_14_LBN 23
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_TIMESTAMP_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_TIMESTAMP_LBN 24
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_TIMESTAMP_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_BATCHING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_BATCHING_LBN 25
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_BATCHING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_MCAST_FILTER_CHAINING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_PM_AND_RXDP_COUNTERS_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DISABLE_SCATTER_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DISABLE_SCATTER_LBN 28
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_LBN 30
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_EVB_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_VXLAN_NVGRE_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_VXLAN_NVGRE_LBN 31
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DPCPU_FW_ID_OFST 4
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_RULES_ENGINE 0x5
+/* enum: DPDK RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_DPDK 0x6
+/* enum: BIST RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_TEST_FW_SLOW 0x10c
+/* TxDPCPU firmware id. */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DPCPU_FW_ID_OFST 6
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_RULES_ENGINE 0x5
+/* enum: DPDK TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_DPDK 0x6
+/* enum: BIST TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXDP_TEST_FW_CSR 0x103
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_OFST 8
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_LEN 2
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_REV_OFST 8
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_TYPE_OFST 8
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_OFST 10
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_LEN 2
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_REV_OFST 10
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_TYPE_OFST 10
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured TX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK TX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_HW_CAPABILITIES_OFST 12
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_HW_CAPABILITIES_LEN 4
+/* Licensed capabilities */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_LICENSE_CAPABILITIES_OFST 16
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_LICENSE_CAPABILITIES_LEN 4
+/* Second word of flags. Not present on older firmware (check the length). */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS2_OFST 20
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_FLAGS2_LEN 4
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_LBN 0
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_ENCAP_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_ENCAP_LBN 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_EVQ_TIMER_CTRL_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_EVQ_TIMER_CTRL_LBN 2
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_EVENT_CUT_THROUGH_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_EVENT_CUT_THROUGH_LBN 3
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_CUT_THROUGH_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_CUT_THROUGH_LBN 4
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_CUT_THROUGH_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VFIFO_ULL_MODE_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VFIFO_ULL_MODE_LBN 5
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_V2_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_V2_LBN 7
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_EVQ_V2_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_TIMESTAMPING_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_TIMESTAMPING_LBN 8
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TIMESTAMP_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TIMESTAMP_LBN 9
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TIMESTAMP_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_SNIFF_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_SNIFF_LBN 10
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RX_SNIFF_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_SNIFF_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_SNIFF_LBN 11
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_SNIFF_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_BACKGROUND_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_BACKGROUND_LBN 13
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_BACKGROUND_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_DB_RETURN_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_DB_RETURN_LBN 14
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_MCDI_DB_RETURN_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_LBN 15
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_CTPIO_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_SUPPORT_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_SUPPORT_LBN 16
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_SUPPORT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_BOUND_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_BOUND_LBN 17
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TSA_BOUND_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_FLAG_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_FLAG_LBN 19
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_FLAG_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_MARK_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_MARK_LBN 20
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_FILTER_ACTION_MARK_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_L3XUDP_SUPPORT_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_L3XUDP_SUPPORT_LBN 22
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_L3XUDP_SUPPORT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_VI_SPREADING_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_VI_SPREADING_LBN 24
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_VI_SPREADING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_HLB_IDLE_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_HLB_IDLE_LBN 25
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_RXDP_HLB_IDLE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_RXQ_NO_CONT_EV_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_RXQ_NO_CONT_EV_LBN 26
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_BUNDLE_UPDATE_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_BUNDLE_UPDATE_LBN 28
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_BUNDLE_UPDATE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V3_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V3_LBN 29
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V3_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_DYNAMIC_SENSORS_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_DYNAMIC_SENSORS_LBN 30
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_DYNAMIC_SENSORS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31
+#define        MC_CMD_GET_CAPABILITIES_V4_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC (when
+ * TX_TSO_V2 == 1). Not present on older firmware (check the length).
+ */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_ACCESS_NOT_PERMITTED 0xff
+/* enum: PF does not exist. */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_PRESENT 0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_ASSIGNED 0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VFS_PER_PF_OFST 42
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VFS_PER_PF_LEN 1
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/*               MC_CMD_GET_CAPABILITIES_V4_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* enum: PF does not exist. */
+/*               MC_CMD_GET_CAPABILITIES_V4_OUT_PF_NOT_PRESENT 0xfe */
+/* Number of VIs available for each external port */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_OFST 58
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_LEN 2
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_PIO_BUFFS_OFST 68
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_SIZE_PIO_BUFF_OFST 70
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_SIZE_PIO_BUFF_LEN 2
+/* On chips later than Medford the amount of address space assigned to each VI
+ * is configurable. This is a global setting that the driver must query to
+ * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available
+ * with 8k VI windows.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_OFST 72
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_LEN 1
+/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k.
+ * CTPIO is not mapped.
+ */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_8K 0x0
+/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_16K 0x1
+/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define          MC_CMD_GET_CAPABILITIES_V4_OUT_VI_WINDOW_MODE_64K 0x2
+/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1
+/* Number of buffers per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2
+/* Entry count in the MAC stats array, including the final GENERATION_END
+ * entry. For MAC stats DMA, drivers should allocate a buffer large enough to
+ * hold at least this many 64-bit stats values, if they wish to receive all
+ * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the
+ * stats array returned will be truncated.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS_OFST 76
+#define       MC_CMD_GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS_LEN 2
+
+/* MC_CMD_GET_CAPABILITIES_V5_OUT msgresponse */
+#define    MC_CMD_GET_CAPABILITIES_V5_OUT_LEN 84
+/* First word of flags. */
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_FLAGS1_OFST 0
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_FLAGS1_LEN 4
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_VPORT_RECONFIGURE_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_VPORT_RECONFIGURE_LBN 3
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_STRIPING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_STRIPING_LBN 4
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_STRIPING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_QUERY_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_QUERY_LBN 5
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_QUERY_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_DRV_ATTACH_PREBOOT_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_FORCE_EVENT_MERGING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_SET_MAC_ENHANCED_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_SET_MAC_ENHANCED_LBN 9
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_SECURITY_FILTERING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_ADDITIONAL_RSS_MODES_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_QBB_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_QBB_LBN 14
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_QBB_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_RSS_LIMITED_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_RSS_LIMITED_LBN 16
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_RSS_LIMITED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_LBN 17
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PACKED_STREAM_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_INCLUDE_FCS_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_INCLUDE_FCS_LBN 18
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VLAN_INSERTION_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VLAN_INSERTION_LBN 19
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_VLAN_STRIPPING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_VLAN_STRIPPING_LBN 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_LBN 21
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_0_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_0_LBN 22
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_14_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_14_LBN 23
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_TIMESTAMP_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_TIMESTAMP_LBN 24
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_TIMESTAMP_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_BATCHING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_BATCHING_LBN 25
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_BATCHING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_MCAST_FILTER_CHAINING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_PM_AND_RXDP_COUNTERS_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DISABLE_SCATTER_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DISABLE_SCATTER_LBN 28
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_LBN 30
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_EVB_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_VXLAN_NVGRE_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_VXLAN_NVGRE_LBN 31
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DPCPU_FW_ID_OFST 4
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_RULES_ENGINE 0x5
+/* enum: DPDK RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_DPDK 0x6
+/* enum: BIST RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_TEST_FW_SLOW 0x10c
+/* TxDPCPU firmware id. */
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_TX_DPCPU_FW_ID_OFST 6
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_RULES_ENGINE 0x5
+/* enum: DPDK TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_DPDK 0x6
+/* enum: BIST TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_TXDP_TEST_FW_CSR 0x103
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_OFST 8
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_LEN 2
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_REV_OFST 8
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_TYPE_OFST 8
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_OFST 10
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_LEN 2
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_REV_OFST 10
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_TYPE_OFST 10
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured TX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK TX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_HW_CAPABILITIES_OFST 12
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_HW_CAPABILITIES_LEN 4
+/* Licensed capabilities */
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_LICENSE_CAPABILITIES_OFST 16
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_LICENSE_CAPABILITIES_LEN 4
+/* Second word of flags. Not present on older firmware (check the length). */
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_FLAGS2_OFST 20
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_FLAGS2_LEN 4
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_LBN 0
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_ENCAP_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_ENCAP_LBN 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_EVQ_TIMER_CTRL_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_EVQ_TIMER_CTRL_LBN 2
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_EVENT_CUT_THROUGH_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_EVENT_CUT_THROUGH_LBN 3
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_CUT_THROUGH_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_CUT_THROUGH_LBN 4
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_CUT_THROUGH_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VFIFO_ULL_MODE_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VFIFO_ULL_MODE_LBN 5
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_V2_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_V2_LBN 7
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_EVQ_V2_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_TIMESTAMPING_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_TIMESTAMPING_LBN 8
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TIMESTAMP_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TIMESTAMP_LBN 9
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TIMESTAMP_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_SNIFF_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_SNIFF_LBN 10
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RX_SNIFF_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_SNIFF_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_SNIFF_LBN 11
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_SNIFF_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_BACKGROUND_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_BACKGROUND_LBN 13
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_BACKGROUND_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_DB_RETURN_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_DB_RETURN_LBN 14
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_MCDI_DB_RETURN_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_CTPIO_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_CTPIO_LBN 15
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_CTPIO_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_SUPPORT_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_SUPPORT_LBN 16
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_SUPPORT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_BOUND_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_BOUND_LBN 17
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TSA_BOUND_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_FLAG_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_FLAG_LBN 19
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_FLAG_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_LBN 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_L3XUDP_SUPPORT_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_L3XUDP_SUPPORT_LBN 22
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_L3XUDP_SUPPORT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_VI_SPREADING_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_VI_SPREADING_LBN 24
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_VI_SPREADING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_HLB_IDLE_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_HLB_IDLE_LBN 25
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_RXDP_HLB_IDLE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_RXQ_NO_CONT_EV_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_RXQ_NO_CONT_EV_LBN 26
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_BUNDLE_UPDATE_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_BUNDLE_UPDATE_LBN 28
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_BUNDLE_UPDATE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V3_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V3_LBN 29
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V3_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_DYNAMIC_SENSORS_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_DYNAMIC_SENSORS_LBN 30
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_DYNAMIC_SENSORS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31
+#define        MC_CMD_GET_CAPABILITIES_V5_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC (when
+ * TX_TSO_V2 == 1). Not present on older firmware (check the length).
+ */
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_ACCESS_NOT_PERMITTED 0xff
+/* enum: PF does not exist. */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_PF_NOT_PRESENT 0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_PF_NOT_ASSIGNED 0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VFS_PER_PF_OFST 42
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VFS_PER_PF_LEN 1
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/*               MC_CMD_GET_CAPABILITIES_V5_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* enum: PF does not exist. */
+/*               MC_CMD_GET_CAPABILITIES_V5_OUT_PF_NOT_PRESENT 0xfe */
+/* Number of VIs available for each external port */
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VIS_PER_PORT_OFST 58
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VIS_PER_PORT_LEN 2
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_PIO_BUFFS_OFST 68
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_SIZE_PIO_BUFF_OFST 70
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_SIZE_PIO_BUFF_LEN 2
+/* On chips later than Medford the amount of address space assigned to each VI
+ * is configurable. This is a global setting that the driver must query to
+ * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available
+ * with 8k VI windows.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_OFST 72
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_LEN 1
+/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k.
+ * CTPIO is not mapped.
+ */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_8K 0x0
+/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_16K 0x1
+/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define          MC_CMD_GET_CAPABILITIES_V5_OUT_VI_WINDOW_MODE_64K 0x2
+/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1
+/* Number of buffers per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2
+/* Entry count in the MAC stats array, including the final GENERATION_END
+ * entry. For MAC stats DMA, drivers should allocate a buffer large enough to
+ * hold at least this many 64-bit stats values, if they wish to receive all
+ * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the
+ * stats array returned will be truncated.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_NUM_STATS_OFST 76
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_MAC_STATS_NUM_STATS_LEN 2
+/* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field
+ * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_MAX_OFST 80
+#define       MC_CMD_GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_MAX_LEN 4
+
+/* MC_CMD_GET_CAPABILITIES_V6_OUT msgresponse */
+#define    MC_CMD_GET_CAPABILITIES_V6_OUT_LEN 148
+/* First word of flags. */
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_FLAGS1_OFST 0
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_FLAGS1_LEN 4
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_VPORT_RECONFIGURE_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_VPORT_RECONFIGURE_LBN 3
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_STRIPING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_STRIPING_LBN 4
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_STRIPING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_VADAPTOR_QUERY_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_VADAPTOR_QUERY_LBN 5
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_VADAPTOR_QUERY_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_DRV_ATTACH_PREBOOT_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_FORCE_EVENT_MERGING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_SET_MAC_ENHANCED_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_SET_MAC_ENHANCED_LBN 9
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MAC_SECURITY_FILTERING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_ADDITIONAL_RSS_MODES_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_QBB_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_QBB_LBN 14
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_QBB_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_RSS_LIMITED_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_RSS_LIMITED_LBN 16
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_RSS_LIMITED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PACKED_STREAM_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PACKED_STREAM_LBN 17
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PACKED_STREAM_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_INCLUDE_FCS_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_INCLUDE_FCS_LBN 18
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_VLAN_INSERTION_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_VLAN_INSERTION_LBN 19
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_VLAN_STRIPPING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_VLAN_STRIPPING_LBN 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_LBN 21
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PREFIX_LEN_0_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PREFIX_LEN_0_LBN 22
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PREFIX_LEN_14_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PREFIX_LEN_14_LBN 23
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_TIMESTAMP_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_TIMESTAMP_LBN 24
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_TIMESTAMP_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_BATCHING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_BATCHING_LBN 25
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_BATCHING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_MCAST_FILTER_CHAINING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_PM_AND_RXDP_COUNTERS_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DISABLE_SCATTER_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DISABLE_SCATTER_LBN 28
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_EVB_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_EVB_LBN 30
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_EVB_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_VXLAN_NVGRE_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_VXLAN_NVGRE_LBN 31
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DPCPU_FW_ID_OFST 4
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_RULES_ENGINE 0x5
+/* enum: DPDK RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_DPDK 0x6
+/* enum: BIST RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_TEST_FW_SLOW 0x10c
+/* TxDPCPU firmware id. */
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_TX_DPCPU_FW_ID_OFST 6
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_RULES_ENGINE 0x5
+/* enum: DPDK TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_DPDK 0x6
+/* enum: BIST TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_TXDP_TEST_FW_CSR 0x103
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_OFST 8
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_LEN 2
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_REV_OFST 8
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_TYPE_OFST 8
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_OFST 10
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_LEN 2
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_REV_OFST 10
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_TYPE_OFST 10
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured TX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK TX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_HW_CAPABILITIES_OFST 12
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_HW_CAPABILITIES_LEN 4
+/* Licensed capabilities */
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_LICENSE_CAPABILITIES_OFST 16
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_LICENSE_CAPABILITIES_LEN 4
+/* Second word of flags. Not present on older firmware (check the length). */
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_FLAGS2_OFST 20
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_FLAGS2_LEN 4
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_LBN 0
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_ENCAP_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_ENCAP_LBN 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_EVQ_TIMER_CTRL_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_EVQ_TIMER_CTRL_LBN 2
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_EVENT_CUT_THROUGH_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_EVENT_CUT_THROUGH_LBN 3
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_CUT_THROUGH_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_CUT_THROUGH_LBN 4
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_CUT_THROUGH_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_VFIFO_ULL_MODE_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_VFIFO_ULL_MODE_LBN 5
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_EVQ_V2_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_EVQ_V2_LBN 7
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_EVQ_V2_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MAC_TIMESTAMPING_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MAC_TIMESTAMPING_LBN 8
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TIMESTAMP_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TIMESTAMP_LBN 9
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TIMESTAMP_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_SNIFF_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_SNIFF_LBN 10
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RX_SNIFF_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_SNIFF_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_SNIFF_LBN 11
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_SNIFF_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_MCDI_BACKGROUND_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_MCDI_BACKGROUND_LBN 13
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_MCDI_BACKGROUND_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_MCDI_DB_RETURN_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_MCDI_DB_RETURN_LBN 14
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_MCDI_DB_RETURN_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_CTPIO_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_CTPIO_LBN 15
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_CTPIO_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TSA_SUPPORT_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TSA_SUPPORT_LBN 16
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TSA_SUPPORT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TSA_BOUND_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TSA_BOUND_LBN 17
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TSA_BOUND_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_FLAG_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_FLAG_LBN 19
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_FLAG_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_MARK_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_MARK_LBN 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_MARK_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_L3XUDP_SUPPORT_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_L3XUDP_SUPPORT_LBN 22
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_L3XUDP_SUPPORT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_VI_SPREADING_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_VI_SPREADING_LBN 24
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_VI_SPREADING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_HLB_IDLE_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_HLB_IDLE_LBN 25
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_RXDP_HLB_IDLE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_RXQ_NO_CONT_EV_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_RXQ_NO_CONT_EV_LBN 26
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_BUNDLE_UPDATE_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_BUNDLE_UPDATE_LBN 28
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_BUNDLE_UPDATE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V3_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V3_LBN 29
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V3_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_DYNAMIC_SENSORS_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_DYNAMIC_SENSORS_LBN 30
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_DYNAMIC_SENSORS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31
+#define        MC_CMD_GET_CAPABILITIES_V6_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC (when
+ * TX_TSO_V2 == 1). Not present on older firmware (check the length).
+ */
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_ACCESS_NOT_PERMITTED 0xff
+/* enum: PF does not exist. */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_PF_NOT_PRESENT 0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_PF_NOT_ASSIGNED 0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VFS_PER_PF_OFST 42
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VFS_PER_PF_LEN 1
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/*               MC_CMD_GET_CAPABILITIES_V6_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* enum: PF does not exist. */
+/*               MC_CMD_GET_CAPABILITIES_V6_OUT_PF_NOT_PRESENT 0xfe */
+/* Number of VIs available for each external port */
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VIS_PER_PORT_OFST 58
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VIS_PER_PORT_LEN 2
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_PIO_BUFFS_OFST 68
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_SIZE_PIO_BUFF_OFST 70
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_SIZE_PIO_BUFF_LEN 2
+/* On chips later than Medford the amount of address space assigned to each VI
+ * is configurable. This is a global setting that the driver must query to
+ * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available
+ * with 8k VI windows.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_VI_WINDOW_MODE_OFST 72
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_VI_WINDOW_MODE_LEN 1
+/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k.
+ * CTPIO is not mapped.
+ */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_VI_WINDOW_MODE_8K 0x0
+/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_VI_WINDOW_MODE_16K 0x1
+/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define          MC_CMD_GET_CAPABILITIES_V6_OUT_VI_WINDOW_MODE_64K 0x2
+/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1
+/* Number of buffers per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2
+/* Entry count in the MAC stats array, including the final GENERATION_END
+ * entry. For MAC stats DMA, drivers should allocate a buffer large enough to
+ * hold at least this many 64-bit stats values, if they wish to receive all
+ * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the
+ * stats array returned will be truncated.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_MAC_STATS_NUM_STATS_OFST 76
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_MAC_STATS_NUM_STATS_LEN 2
+/* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field
+ * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_MARK_MAX_OFST 80
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_FILTER_ACTION_MARK_MAX_LEN 4
+/* On devices where the INIT_RXQ_WITH_BUFFER_SIZE flag (in
+ * GET_CAPABILITIES_OUT_V2) is set, drivers have to specify a buffer size when
+ * they create an RX queue. Due to hardware limitations, only a small number of
+ * different buffer sizes may be available concurrently. Nonzero entries in
+ * this array are the sizes of buffers which the system guarantees will be
+ * available for use. If the list is empty, there are no limitations on
+ * concurrent buffer sizes.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_GUARANTEED_RX_BUFFER_SIZES_OFST 84
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_GUARANTEED_RX_BUFFER_SIZES_LEN 4
+#define       MC_CMD_GET_CAPABILITIES_V6_OUT_GUARANTEED_RX_BUFFER_SIZES_NUM 16
+
+/* MC_CMD_GET_CAPABILITIES_V7_OUT msgresponse */
+#define    MC_CMD_GET_CAPABILITIES_V7_OUT_LEN 152
+/* First word of flags. */
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_FLAGS1_OFST 0
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_FLAGS1_LEN 4
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_VPORT_RECONFIGURE_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_VPORT_RECONFIGURE_LBN 3
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_STRIPING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_STRIPING_LBN 4
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_STRIPING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_VADAPTOR_QUERY_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_VADAPTOR_QUERY_LBN 5
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_VADAPTOR_QUERY_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_DRV_ATTACH_PREBOOT_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_FORCE_EVENT_MERGING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_SET_MAC_ENHANCED_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_SET_MAC_ENHANCED_LBN 9
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MAC_SECURITY_FILTERING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_ADDITIONAL_RSS_MODES_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_QBB_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_QBB_LBN 14
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_QBB_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_RSS_LIMITED_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_RSS_LIMITED_LBN 16
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_RSS_LIMITED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PACKED_STREAM_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PACKED_STREAM_LBN 17
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PACKED_STREAM_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_INCLUDE_FCS_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_INCLUDE_FCS_LBN 18
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_VLAN_INSERTION_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_VLAN_INSERTION_LBN 19
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_VLAN_STRIPPING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_VLAN_STRIPPING_LBN 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_LBN 21
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PREFIX_LEN_0_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PREFIX_LEN_0_LBN 22
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PREFIX_LEN_14_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PREFIX_LEN_14_LBN 23
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_TIMESTAMP_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_TIMESTAMP_LBN 24
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_TIMESTAMP_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_BATCHING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_BATCHING_LBN 25
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_BATCHING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_MCAST_FILTER_CHAINING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_PM_AND_RXDP_COUNTERS_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_DISABLE_SCATTER_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_DISABLE_SCATTER_LBN 28
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_EVB_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_EVB_LBN 30
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_EVB_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_VXLAN_NVGRE_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_VXLAN_NVGRE_LBN 31
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_RX_DPCPU_FW_ID_OFST 4
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_RULES_ENGINE 0x5
+/* enum: DPDK RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_DPDK 0x6
+/* enum: BIST RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_TEST_FW_SLOW 0x10c
+/* TxDPCPU firmware id. */
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_TX_DPCPU_FW_ID_OFST 6
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_RULES_ENGINE 0x5
+/* enum: DPDK TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_DPDK 0x6
+/* enum: BIST TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_TXDP_TEST_FW_CSR 0x103
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_OFST 8
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_LEN 2
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_REV_OFST 8
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_TYPE_OFST 8
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_OFST 10
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_LEN 2
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_REV_OFST 10
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_TYPE_OFST 10
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured TX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK TX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_HW_CAPABILITIES_OFST 12
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_HW_CAPABILITIES_LEN 4
+/* Licensed capabilities */
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_LICENSE_CAPABILITIES_OFST 16
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_LICENSE_CAPABILITIES_LEN 4
+/* Second word of flags. Not present on older firmware (check the length). */
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_FLAGS2_OFST 20
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_FLAGS2_LEN 4
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_LBN 0
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_ENCAP_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_ENCAP_LBN 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_EVQ_TIMER_CTRL_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_EVQ_TIMER_CTRL_LBN 2
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_EVENT_CUT_THROUGH_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_EVENT_CUT_THROUGH_LBN 3
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_CUT_THROUGH_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_CUT_THROUGH_LBN 4
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_CUT_THROUGH_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_VFIFO_ULL_MODE_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_VFIFO_ULL_MODE_LBN 5
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_EVQ_V2_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_EVQ_V2_LBN 7
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_EVQ_V2_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MAC_TIMESTAMPING_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MAC_TIMESTAMPING_LBN 8
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TIMESTAMP_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TIMESTAMP_LBN 9
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TIMESTAMP_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_SNIFF_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_SNIFF_LBN 10
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_SNIFF_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_SNIFF_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_SNIFF_LBN 11
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_SNIFF_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_MCDI_BACKGROUND_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_MCDI_BACKGROUND_LBN 13
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_MCDI_BACKGROUND_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_MCDI_DB_RETURN_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_MCDI_DB_RETURN_LBN 14
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_MCDI_DB_RETURN_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_CTPIO_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_CTPIO_LBN 15
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_CTPIO_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TSA_SUPPORT_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TSA_SUPPORT_LBN 16
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TSA_SUPPORT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TSA_BOUND_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TSA_BOUND_LBN 17
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TSA_BOUND_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_FLAG_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_FLAG_LBN 19
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_FLAG_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_MARK_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_MARK_LBN 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_MARK_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_L3XUDP_SUPPORT_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_L3XUDP_SUPPORT_LBN 22
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_L3XUDP_SUPPORT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_VI_SPREADING_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_VI_SPREADING_LBN 24
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_VI_SPREADING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_HLB_IDLE_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_HLB_IDLE_LBN 25
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RXDP_HLB_IDLE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_RXQ_NO_CONT_EV_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_RXQ_NO_CONT_EV_LBN 26
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_BUNDLE_UPDATE_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_BUNDLE_UPDATE_LBN 28
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_BUNDLE_UPDATE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V3_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V3_LBN 29
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V3_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_DYNAMIC_SENSORS_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_DYNAMIC_SENSORS_LBN 30
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_DYNAMIC_SENSORS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC (when
+ * TX_TSO_V2 == 1). Not present on older firmware (check the length).
+ */
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_ACCESS_NOT_PERMITTED 0xff
+/* enum: PF does not exist. */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_PF_NOT_PRESENT 0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_PF_NOT_ASSIGNED 0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VFS_PER_PF_OFST 42
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VFS_PER_PF_LEN 1
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/*               MC_CMD_GET_CAPABILITIES_V7_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* enum: PF does not exist. */
+/*               MC_CMD_GET_CAPABILITIES_V7_OUT_PF_NOT_PRESENT 0xfe */
+/* Number of VIs available for each external port */
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VIS_PER_PORT_OFST 58
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VIS_PER_PORT_LEN 2
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_PIO_BUFFS_OFST 68
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_SIZE_PIO_BUFF_OFST 70
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_SIZE_PIO_BUFF_LEN 2
+/* On chips later than Medford the amount of address space assigned to each VI
+ * is configurable. This is a global setting that the driver must query to
+ * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available
+ * with 8k VI windows.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_VI_WINDOW_MODE_OFST 72
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_VI_WINDOW_MODE_LEN 1
+/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k.
+ * CTPIO is not mapped.
+ */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_VI_WINDOW_MODE_8K 0x0
+/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_VI_WINDOW_MODE_16K 0x1
+/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define          MC_CMD_GET_CAPABILITIES_V7_OUT_VI_WINDOW_MODE_64K 0x2
+/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1
+/* Number of buffers per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2
+/* Entry count in the MAC stats array, including the final GENERATION_END
+ * entry. For MAC stats DMA, drivers should allocate a buffer large enough to
+ * hold at least this many 64-bit stats values, if they wish to receive all
+ * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the
+ * stats array returned will be truncated.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_MAC_STATS_NUM_STATS_OFST 76
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_MAC_STATS_NUM_STATS_LEN 2
+/* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field
+ * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_MARK_MAX_OFST 80
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_FILTER_ACTION_MARK_MAX_LEN 4
+/* On devices where the INIT_RXQ_WITH_BUFFER_SIZE flag (in
+ * GET_CAPABILITIES_OUT_V2) is set, drivers have to specify a buffer size when
+ * they create an RX queue. Due to hardware limitations, only a small number of
+ * different buffer sizes may be available concurrently. Nonzero entries in
+ * this array are the sizes of buffers which the system guarantees will be
+ * available for use. If the list is empty, there are no limitations on
+ * concurrent buffer sizes.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_GUARANTEED_RX_BUFFER_SIZES_OFST 84
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_GUARANTEED_RX_BUFFER_SIZES_LEN 4
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_GUARANTEED_RX_BUFFER_SIZES_NUM 16
+/* Third word of flags. Not present on older firmware (check the length). */
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_FLAGS3_OFST 148
+#define       MC_CMD_GET_CAPABILITIES_V7_OUT_FLAGS3_LEN 4
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_WOL_ETHERWAKE_OFST 148
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_WOL_ETHERWAKE_LBN 0
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_WOL_ETHERWAKE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_EVEN_SPREADING_OFST 148
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_EVEN_SPREADING_LBN 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_EVEN_SPREADING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_SELECTABLE_TABLE_SIZE_OFST 148
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_SELECTABLE_TABLE_SIZE_LBN 2
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RSS_SELECTABLE_TABLE_SIZE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_SUPPORTED_OFST 148
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_SUPPORTED_LBN 3
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_MAE_SUPPORTED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_VDPA_SUPPORTED_OFST 148
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_VDPA_SUPPORTED_LBN 4
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_VDPA_SUPPORTED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_OFST 148
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_LBN 5
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_OFST 148
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_LBN 6
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_UNSOL_EV_CREDIT_SUPPORTED_OFST 148
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_UNSOL_EV_CREDIT_SUPPORTED_LBN 7
+#define        MC_CMD_GET_CAPABILITIES_V7_OUT_UNSOL_EV_CREDIT_SUPPORTED_WIDTH 1
+
+/* MC_CMD_GET_CAPABILITIES_V8_OUT msgresponse */
+#define    MC_CMD_GET_CAPABILITIES_V8_OUT_LEN 160
+/* First word of flags. */
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS1_OFST 0
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS1_LEN 4
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_VPORT_RECONFIGURE_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_VPORT_RECONFIGURE_LBN 3
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_STRIPING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_STRIPING_LBN 4
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_STRIPING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_VADAPTOR_QUERY_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_VADAPTOR_QUERY_LBN 5
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_VADAPTOR_QUERY_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_DRV_ATTACH_PREBOOT_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_FORCE_EVENT_MERGING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_SET_MAC_ENHANCED_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_SET_MAC_ENHANCED_LBN 9
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MAC_SECURITY_FILTERING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_ADDITIONAL_RSS_MODES_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_QBB_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_QBB_LBN 14
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_QBB_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_RSS_LIMITED_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_RSS_LIMITED_LBN 16
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_RSS_LIMITED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PACKED_STREAM_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PACKED_STREAM_LBN 17
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PACKED_STREAM_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_INCLUDE_FCS_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_INCLUDE_FCS_LBN 18
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_VLAN_INSERTION_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_VLAN_INSERTION_LBN 19
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_VLAN_STRIPPING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_VLAN_STRIPPING_LBN 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_LBN 21
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PREFIX_LEN_0_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PREFIX_LEN_0_LBN 22
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PREFIX_LEN_14_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PREFIX_LEN_14_LBN 23
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_TIMESTAMP_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_TIMESTAMP_LBN 24
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_TIMESTAMP_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_BATCHING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_BATCHING_LBN 25
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_BATCHING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_MCAST_FILTER_CHAINING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_PM_AND_RXDP_COUNTERS_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_DISABLE_SCATTER_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_DISABLE_SCATTER_LBN 28
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_EVB_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_EVB_LBN 30
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_EVB_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_VXLAN_NVGRE_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_VXLAN_NVGRE_LBN 31
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_RX_DPCPU_FW_ID_OFST 4
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_RULES_ENGINE 0x5
+/* enum: DPDK RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_DPDK 0x6
+/* enum: BIST RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_TEST_FW_SLOW 0x10c
+/* TxDPCPU firmware id. */
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_TX_DPCPU_FW_ID_OFST 6
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_RULES_ENGINE 0x5
+/* enum: DPDK TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_DPDK 0x6
+/* enum: BIST TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_TXDP_TEST_FW_CSR 0x103
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_OFST 8
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_LEN 2
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_REV_OFST 8
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_TYPE_OFST 8
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_OFST 10
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_LEN 2
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_REV_OFST 10
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_TYPE_OFST 10
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured TX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK TX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_HW_CAPABILITIES_OFST 12
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_HW_CAPABILITIES_LEN 4
+/* Licensed capabilities */
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_LICENSE_CAPABILITIES_OFST 16
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_LICENSE_CAPABILITIES_LEN 4
+/* Second word of flags. Not present on older firmware (check the length). */
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS2_OFST 20
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS2_LEN 4
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_LBN 0
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_ENCAP_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_ENCAP_LBN 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_EVQ_TIMER_CTRL_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_EVQ_TIMER_CTRL_LBN 2
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_EVENT_CUT_THROUGH_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_EVENT_CUT_THROUGH_LBN 3
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_CUT_THROUGH_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_CUT_THROUGH_LBN 4
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_CUT_THROUGH_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_VFIFO_ULL_MODE_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_VFIFO_ULL_MODE_LBN 5
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_EVQ_V2_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_EVQ_V2_LBN 7
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_EVQ_V2_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MAC_TIMESTAMPING_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MAC_TIMESTAMPING_LBN 8
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TIMESTAMP_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TIMESTAMP_LBN 9
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TIMESTAMP_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_SNIFF_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_SNIFF_LBN 10
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_SNIFF_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_SNIFF_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_SNIFF_LBN 11
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_SNIFF_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_MCDI_BACKGROUND_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_MCDI_BACKGROUND_LBN 13
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_MCDI_BACKGROUND_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_MCDI_DB_RETURN_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_MCDI_DB_RETURN_LBN 14
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_MCDI_DB_RETURN_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_CTPIO_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_CTPIO_LBN 15
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_CTPIO_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TSA_SUPPORT_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TSA_SUPPORT_LBN 16
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TSA_SUPPORT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TSA_BOUND_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TSA_BOUND_LBN 17
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TSA_BOUND_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_FLAG_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_FLAG_LBN 19
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_FLAG_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_MARK_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_MARK_LBN 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_MARK_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_L3XUDP_SUPPORT_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_L3XUDP_SUPPORT_LBN 22
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_L3XUDP_SUPPORT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_VI_SPREADING_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_VI_SPREADING_LBN 24
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_VI_SPREADING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_HLB_IDLE_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_HLB_IDLE_LBN 25
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RXDP_HLB_IDLE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_RXQ_NO_CONT_EV_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_RXQ_NO_CONT_EV_LBN 26
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_BUNDLE_UPDATE_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_BUNDLE_UPDATE_LBN 28
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_BUNDLE_UPDATE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V3_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V3_LBN 29
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V3_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_DYNAMIC_SENSORS_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_DYNAMIC_SENSORS_LBN 30
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_DYNAMIC_SENSORS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC (when
+ * TX_TSO_V2 == 1). Not present on older firmware (check the length).
+ */
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_ACCESS_NOT_PERMITTED 0xff
+/* enum: PF does not exist. */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_PF_NOT_PRESENT 0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_PF_NOT_ASSIGNED 0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VFS_PER_PF_OFST 42
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VFS_PER_PF_LEN 1
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/*               MC_CMD_GET_CAPABILITIES_V8_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* enum: PF does not exist. */
+/*               MC_CMD_GET_CAPABILITIES_V8_OUT_PF_NOT_PRESENT 0xfe */
+/* Number of VIs available for each external port */
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VIS_PER_PORT_OFST 58
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VIS_PER_PORT_LEN 2
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_PIO_BUFFS_OFST 68
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_SIZE_PIO_BUFF_OFST 70
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_SIZE_PIO_BUFF_LEN 2
+/* On chips later than Medford the amount of address space assigned to each VI
+ * is configurable. This is a global setting that the driver must query to
+ * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available
+ * with 8k VI windows.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_VI_WINDOW_MODE_OFST 72
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_VI_WINDOW_MODE_LEN 1
+/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k.
+ * CTPIO is not mapped.
+ */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_VI_WINDOW_MODE_8K 0x0
+/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_VI_WINDOW_MODE_16K 0x1
+/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define          MC_CMD_GET_CAPABILITIES_V8_OUT_VI_WINDOW_MODE_64K 0x2
+/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1
+/* Number of buffers per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2
+/* Entry count in the MAC stats array, including the final GENERATION_END
+ * entry. For MAC stats DMA, drivers should allocate a buffer large enough to
+ * hold at least this many 64-bit stats values, if they wish to receive all
+ * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the
+ * stats array returned will be truncated.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_MAC_STATS_NUM_STATS_OFST 76
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_MAC_STATS_NUM_STATS_LEN 2
+/* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field
+ * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_MARK_MAX_OFST 80
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_FILTER_ACTION_MARK_MAX_LEN 4
+/* On devices where the INIT_RXQ_WITH_BUFFER_SIZE flag (in
+ * GET_CAPABILITIES_OUT_V2) is set, drivers have to specify a buffer size when
+ * they create an RX queue. Due to hardware limitations, only a small number of
+ * different buffer sizes may be available concurrently. Nonzero entries in
+ * this array are the sizes of buffers which the system guarantees will be
+ * available for use. If the list is empty, there are no limitations on
+ * concurrent buffer sizes.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_GUARANTEED_RX_BUFFER_SIZES_OFST 84
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_GUARANTEED_RX_BUFFER_SIZES_LEN 4
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_GUARANTEED_RX_BUFFER_SIZES_NUM 16
+/* Third word of flags. Not present on older firmware (check the length). */
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS3_OFST 148
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS3_LEN 4
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_WOL_ETHERWAKE_OFST 148
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_WOL_ETHERWAKE_LBN 0
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_WOL_ETHERWAKE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_EVEN_SPREADING_OFST 148
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_EVEN_SPREADING_LBN 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_EVEN_SPREADING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_SELECTABLE_TABLE_SIZE_OFST 148
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_SELECTABLE_TABLE_SIZE_LBN 2
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RSS_SELECTABLE_TABLE_SIZE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_SUPPORTED_OFST 148
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_SUPPORTED_LBN 3
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_MAE_SUPPORTED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_VDPA_SUPPORTED_OFST 148
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_VDPA_SUPPORTED_LBN 4
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_VDPA_SUPPORTED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_OFST 148
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_LBN 5
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_OFST 148
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_LBN 6
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_UNSOL_EV_CREDIT_SUPPORTED_OFST 148
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_UNSOL_EV_CREDIT_SUPPORTED_LBN 7
+#define        MC_CMD_GET_CAPABILITIES_V8_OUT_UNSOL_EV_CREDIT_SUPPORTED_WIDTH 1
+/* These bits are reserved for communicating test-specific capabilities to
+ * host-side test software. All production drivers should treat this field as
+ * opaque.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_OFST 152
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_LEN 8
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_LO_OFST 152
+#define       MC_CMD_GET_CAPABILITIES_V8_OUT_TEST_RESERVED_HI_OFST 156
+
+/* MC_CMD_GET_CAPABILITIES_V9_OUT msgresponse */
+#define    MC_CMD_GET_CAPABILITIES_V9_OUT_LEN 184
+/* First word of flags. */
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_FLAGS1_OFST 0
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_FLAGS1_LEN 4
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_VPORT_RECONFIGURE_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_VPORT_RECONFIGURE_LBN 3
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_STRIPING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_STRIPING_LBN 4
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_STRIPING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_VADAPTOR_QUERY_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_VADAPTOR_QUERY_LBN 5
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_VADAPTOR_QUERY_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_EVB_PORT_VLAN_RESTRICT_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_DRV_ATTACH_PREBOOT_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_FORCE_EVENT_MERGING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_SET_MAC_ENHANCED_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_SET_MAC_ENHANCED_LBN 9
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MAC_SECURITY_FILTERING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_ADDITIONAL_RSS_MODES_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_QBB_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_QBB_LBN 14
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_QBB_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PACKED_STREAM_VAR_BUFFERS_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_RSS_LIMITED_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_RSS_LIMITED_LBN 16
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_RSS_LIMITED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PACKED_STREAM_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PACKED_STREAM_LBN 17
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PACKED_STREAM_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_INCLUDE_FCS_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_INCLUDE_FCS_LBN 18
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_VLAN_INSERTION_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_VLAN_INSERTION_LBN 19
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_VLAN_STRIPPING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_VLAN_STRIPPING_LBN 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_LBN 21
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PREFIX_LEN_0_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PREFIX_LEN_0_LBN 22
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PREFIX_LEN_14_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PREFIX_LEN_14_LBN 23
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_TIMESTAMP_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_TIMESTAMP_LBN 24
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_TIMESTAMP_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_BATCHING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_BATCHING_LBN 25
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_BATCHING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_MCAST_FILTER_CHAINING_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_PM_AND_RXDP_COUNTERS_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_DISABLE_SCATTER_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_DISABLE_SCATTER_LBN 28
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MCAST_UDP_LOOPBACK_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_EVB_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_EVB_LBN 30
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_EVB_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_VXLAN_NVGRE_OFST 0
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_VXLAN_NVGRE_LBN 31
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_RX_DPCPU_FW_ID_OFST 4
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: Rules engine RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_RULES_ENGINE 0x5
+/* enum: DPDK RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_DPDK 0x6
+/* enum: BIST RXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* enum: RXDP Test firmware image 10 */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_TEST_FW_SLOW 0x10c
+/* TxDPCPU firmware id. */
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_TX_DPCPU_FW_ID_OFST 6
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: Rules engine TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_RULES_ENGINE 0x5
+/* enum: DPDK TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_DPDK 0x6
+/* enum: BIST TXDP firmware */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_TXDP_TEST_FW_CSR 0x103
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_OFST 8
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_LEN 2
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_REV_OFST 8
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_TYPE_OFST 8
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK RX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_OFST 10
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_LEN 2
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_REV_OFST 10
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_TYPE_OFST 10
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware for telemetry prototyping (Medford2 development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_TESTFW_TELEMETRY 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Full featured TX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_FULL_FEATURED 0x3
+/* enum: (deprecated original name for the FULL_FEATURED variant) */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: Rules engine TX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_RULES_ENGINE 0x8
+/* enum: Custom firmware variant (see SF-119495-PD and bug69716) */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_L3XUDP 0x9
+/* enum: DPDK TX PD production firmware */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_DPDK 0xa
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_HW_CAPABILITIES_OFST 12
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_HW_CAPABILITIES_LEN 4
+/* Licensed capabilities */
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_LICENSE_CAPABILITIES_OFST 16
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_LICENSE_CAPABILITIES_LEN 4
+/* Second word of flags. Not present on older firmware (check the length). */
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_FLAGS2_OFST 20
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_FLAGS2_LEN 4
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_LBN 0
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_ENCAP_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_ENCAP_LBN 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_EVQ_TIMER_CTRL_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_EVQ_TIMER_CTRL_LBN 2
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_EVENT_CUT_THROUGH_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_EVENT_CUT_THROUGH_LBN 3
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_CUT_THROUGH_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_CUT_THROUGH_LBN 4
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_CUT_THROUGH_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_VFIFO_ULL_MODE_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_VFIFO_ULL_MODE_LBN 5
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_VFIFO_ULL_MODE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_MAC_STATS_40G_TX_SIZE_BINS_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_MAC_STATS_40G_TX_SIZE_BINS_LBN 6
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_MAC_STATS_40G_TX_SIZE_BINS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_EVQ_TYPE_SUPPORTED_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_EVQ_TYPE_SUPPORTED_LBN 7
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_EVQ_TYPE_SUPPORTED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_EVQ_V2_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_EVQ_V2_LBN 7
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_EVQ_V2_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MAC_TIMESTAMPING_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MAC_TIMESTAMPING_LBN 8
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_MAC_TIMESTAMPING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TIMESTAMP_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TIMESTAMP_LBN 9
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TIMESTAMP_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_SNIFF_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_SNIFF_LBN 10
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_SNIFF_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_SNIFF_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_SNIFF_LBN 11
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_SNIFF_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_LBN 12
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_REPORT_VERIFY_RESULT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_MCDI_BACKGROUND_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_MCDI_BACKGROUND_LBN 13
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_MCDI_BACKGROUND_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_MCDI_DB_RETURN_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_MCDI_DB_RETURN_LBN 14
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_MCDI_DB_RETURN_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_CTPIO_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_CTPIO_LBN 15
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_CTPIO_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TSA_SUPPORT_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TSA_SUPPORT_LBN 16
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TSA_SUPPORT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TSA_BOUND_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TSA_BOUND_LBN 17
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TSA_BOUND_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_SF_ADAPTER_AUTHENTICATION_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_SF_ADAPTER_AUTHENTICATION_LBN 18
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_SF_ADAPTER_AUTHENTICATION_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_FLAG_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_FLAG_LBN 19
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_FLAG_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_MARK_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_MARK_LBN 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_MARK_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_EQUAL_STRIDE_SUPER_BUFFER_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_EQUAL_STRIDE_SUPER_BUFFER_LBN 21
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_EQUAL_STRIDE_SUPER_BUFFER_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_EQUAL_STRIDE_PACKED_STREAM_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_EQUAL_STRIDE_PACKED_STREAM_LBN 21
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_EQUAL_STRIDE_PACKED_STREAM_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_L3XUDP_SUPPORT_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_L3XUDP_SUPPORT_LBN 22
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_L3XUDP_SUPPORT_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_FW_SUBVARIANT_NO_TX_CSUM_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_FW_SUBVARIANT_NO_TX_CSUM_LBN 23
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_FW_SUBVARIANT_NO_TX_CSUM_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_VI_SPREADING_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_VI_SPREADING_LBN 24
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_VI_SPREADING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_HLB_IDLE_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_HLB_IDLE_LBN 25
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RXDP_HLB_IDLE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_RXQ_NO_CONT_EV_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_RXQ_NO_CONT_EV_LBN 26
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_RXQ_NO_CONT_EV_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_RXQ_WITH_BUFFER_SIZE_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_RXQ_WITH_BUFFER_SIZE_LBN 27
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_INIT_RXQ_WITH_BUFFER_SIZE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_BUNDLE_UPDATE_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_BUNDLE_UPDATE_LBN 28
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_BUNDLE_UPDATE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V3_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V3_LBN 29
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V3_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_DYNAMIC_SENSORS_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_DYNAMIC_SENSORS_LBN 30
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_DYNAMIC_SENSORS_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_OFST 20
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_LBN 31
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_NVRAM_UPDATE_POLL_VERIFY_RESULT_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC (when
+ * TX_TSO_V2 == 1). Not present on older firmware (check the length).
+ */
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_ACCESS_NOT_PERMITTED 0xff
+/* enum: PF does not exist. */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_PF_NOT_PRESENT 0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_PF_NOT_ASSIGNED 0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VFS_PER_PF_OFST 42
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VFS_PER_PF_LEN 1
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/*               MC_CMD_GET_CAPABILITIES_V9_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* enum: PF does not exist. */
+/*               MC_CMD_GET_CAPABILITIES_V9_OUT_PF_NOT_PRESENT 0xfe */
+/* Number of VIs available for each external port */
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VIS_PER_PORT_OFST 58
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VIS_PER_PORT_LEN 2
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_PIO_BUFFS_OFST 68
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_SIZE_PIO_BUFF_OFST 70
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_SIZE_PIO_BUFF_LEN 2
+/* On chips later than Medford the amount of address space assigned to each VI
+ * is configurable. This is a global setting that the driver must query to
+ * discover the VI to address mapping. Cut-through PIO (CTPIO) is not available
+ * with 8k VI windows.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_VI_WINDOW_MODE_OFST 72
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_VI_WINDOW_MODE_LEN 1
+/* enum: Each VI occupies 8k as on Huntington and Medford. PIO is at offset 4k.
+ * CTPIO is not mapped.
+ */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_VI_WINDOW_MODE_8K 0x0
+/* enum: Each VI occupies 16k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_VI_WINDOW_MODE_16K 0x1
+/* enum: Each VI occupies 64k. PIO is at offset 4k. CTPIO is at offset 12k. */
+#define          MC_CMD_GET_CAPABILITIES_V9_OUT_VI_WINDOW_MODE_64K 0x2
+/* Number of vFIFOs per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_VFIFO_STUFFING_NUM_VFIFOS_OFST 73
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_VFIFO_STUFFING_NUM_VFIFOS_LEN 1
+/* Number of buffers per adapter that can be used for VFIFO Stuffing
+ * (SF-115995-SW) in the present configuration of firmware and port mode.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_OFST 74
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_VFIFO_STUFFING_NUM_CP_BUFFERS_LEN 2
+/* Entry count in the MAC stats array, including the final GENERATION_END
+ * entry. For MAC stats DMA, drivers should allocate a buffer large enough to
+ * hold at least this many 64-bit stats values, if they wish to receive all
+ * available stats. If the buffer is shorter than MAC_STATS_NUM_STATS * 8, the
+ * stats array returned will be truncated.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_MAC_STATS_NUM_STATS_OFST 76
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_MAC_STATS_NUM_STATS_LEN 2
+/* Maximum supported value for MC_CMD_FILTER_OP_V3/MATCH_MARK_VALUE. This field
+ * will only be non-zero if MC_CMD_GET_CAPABILITIES/FILTER_ACTION_MARK is set.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_MARK_MAX_OFST 80
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_FILTER_ACTION_MARK_MAX_LEN 4
+/* On devices where the INIT_RXQ_WITH_BUFFER_SIZE flag (in
+ * GET_CAPABILITIES_OUT_V2) is set, drivers have to specify a buffer size when
+ * they create an RX queue. Due to hardware limitations, only a small number of
+ * different buffer sizes may be available concurrently. Nonzero entries in
+ * this array are the sizes of buffers which the system guarantees will be
+ * available for use. If the list is empty, there are no limitations on
+ * concurrent buffer sizes.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_GUARANTEED_RX_BUFFER_SIZES_OFST 84
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_GUARANTEED_RX_BUFFER_SIZES_LEN 4
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_GUARANTEED_RX_BUFFER_SIZES_NUM 16
+/* Third word of flags. Not present on older firmware (check the length). */
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_FLAGS3_OFST 148
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_FLAGS3_LEN 4
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_WOL_ETHERWAKE_OFST 148
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_WOL_ETHERWAKE_LBN 0
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_WOL_ETHERWAKE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_EVEN_SPREADING_OFST 148
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_EVEN_SPREADING_LBN 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_EVEN_SPREADING_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_SELECTABLE_TABLE_SIZE_OFST 148
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_SELECTABLE_TABLE_SIZE_LBN 2
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_SELECTABLE_TABLE_SIZE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_SUPPORTED_OFST 148
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_SUPPORTED_LBN 3
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_MAE_SUPPORTED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_VDPA_SUPPORTED_OFST 148
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_VDPA_SUPPORTED_LBN 4
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_VDPA_SUPPORTED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_OFST 148
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_LBN 5
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_RX_VLAN_STRIPPING_PER_ENCAP_RULE_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_OFST 148
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_LBN 6
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_EXTENDED_WIDTH_EVQS_SUPPORTED_WIDTH 1
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_UNSOL_EV_CREDIT_SUPPORTED_OFST 148
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_UNSOL_EV_CREDIT_SUPPORTED_LBN 7
+#define        MC_CMD_GET_CAPABILITIES_V9_OUT_UNSOL_EV_CREDIT_SUPPORTED_WIDTH 1
+/* These bits are reserved for communicating test-specific capabilities to
+ * host-side test software. All production drivers should treat this field as
+ * opaque.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_OFST 152
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_LEN 8
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_LO_OFST 152
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_TEST_RESERVED_HI_OFST 156
+/* The minimum size (in table entries) of indirection table to be allocated
+ * from the pool for an RSS context. Note that the table size used must be a
+ * power of 2.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MIN_INDIRECTION_TABLE_SIZE_OFST 160
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MIN_INDIRECTION_TABLE_SIZE_LEN 4
+/* The maximum size (in table entries) of indirection table to be allocated
+ * from the pool for an RSS context. Note that the table size used must be a
+ * power of 2.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MAX_INDIRECTION_TABLE_SIZE_OFST 164
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MAX_INDIRECTION_TABLE_SIZE_LEN 4
+/* The maximum number of queues that can be used by an RSS context in exclusive
+ * mode. In exclusive mode the context has a configurable indirection table and
+ * a configurable RSS key.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MAX_INDIRECTION_QUEUES_OFST 168
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MAX_INDIRECTION_QUEUES_LEN 4
+/* The maximum number of queues that can be used by an RSS context in even-
+ * spreading mode. In even-spreading mode the context has no indirection table
+ * but it does have a configurable RSS key.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MAX_EVEN_SPREADING_QUEUES_OFST 172
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_MAX_EVEN_SPREADING_QUEUES_LEN 4
+/* The total number of RSS contexts supported. Note that the number of
+ * available contexts using indirection tables is also limited by the
+ * availability of indirection table space allocated from a common pool.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_NUM_CONTEXTS_OFST 176
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_NUM_CONTEXTS_LEN 4
+/* The total amount of indirection table space that can be shared between RSS
+ * contexts.
+ */
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_TABLE_POOL_SIZE_OFST 180
+#define       MC_CMD_GET_CAPABILITIES_V9_OUT_RSS_TABLE_POOL_SIZE_LEN 4
+
+
+/***********************************/
+/* MC_CMD_V2_EXTN
+ * Encapsulation for a v2 extended command
+ */
+#define MC_CMD_V2_EXTN 0x7f
+
+/* MC_CMD_V2_EXTN_IN msgrequest */
+#define    MC_CMD_V2_EXTN_IN_LEN 4
+/* the extended command number */
+#define       MC_CMD_V2_EXTN_IN_EXTENDED_CMD_LBN 0
+#define       MC_CMD_V2_EXTN_IN_EXTENDED_CMD_WIDTH 15
+#define       MC_CMD_V2_EXTN_IN_UNUSED_LBN 15
+#define       MC_CMD_V2_EXTN_IN_UNUSED_WIDTH 1
+/* the actual length of the encapsulated command (which is not in the v1
+ * header)
+ */
+#define       MC_CMD_V2_EXTN_IN_ACTUAL_LEN_LBN 16
+#define       MC_CMD_V2_EXTN_IN_ACTUAL_LEN_WIDTH 10
+#define       MC_CMD_V2_EXTN_IN_UNUSED2_LBN 26
+#define       MC_CMD_V2_EXTN_IN_UNUSED2_WIDTH 2
+/* Type of command/response */
+#define       MC_CMD_V2_EXTN_IN_MESSAGE_TYPE_LBN 28
+#define       MC_CMD_V2_EXTN_IN_MESSAGE_TYPE_WIDTH 4
+/* enum: MCDI command directed to or response originating from the MC. */
+#define          MC_CMD_V2_EXTN_IN_MCDI_MESSAGE_TYPE_MC 0x0
+/* enum: MCDI command directed to a TSA controller. MCDI responses of this type
+ * are not defined.
+ */
+#define          MC_CMD_V2_EXTN_IN_MCDI_MESSAGE_TYPE_TSA 0x1
+
+
+/***********************************/
+/* MC_CMD_TCM_BUCKET_ALLOC
+ * Allocate a pacer bucket (for qau rp or a snapper test)
+ */
+#define MC_CMD_TCM_BUCKET_ALLOC 0xb2
+#undef MC_CMD_0xb2_PRIVILEGE_CTG
+
+#define MC_CMD_0xb2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_TCM_BUCKET_ALLOC_IN msgrequest */
+#define    MC_CMD_TCM_BUCKET_ALLOC_IN_LEN 0
+
+/* MC_CMD_TCM_BUCKET_ALLOC_OUT msgresponse */
+#define    MC_CMD_TCM_BUCKET_ALLOC_OUT_LEN 4
+/* the bucket id */
+#define       MC_CMD_TCM_BUCKET_ALLOC_OUT_BUCKET_OFST 0
+#define       MC_CMD_TCM_BUCKET_ALLOC_OUT_BUCKET_LEN 4
+
+
+/***********************************/
+/* MC_CMD_TCM_BUCKET_FREE
+ * Free a pacer bucket
+ */
+#define MC_CMD_TCM_BUCKET_FREE 0xb3
+#undef MC_CMD_0xb3_PRIVILEGE_CTG
+
+#define MC_CMD_0xb3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_TCM_BUCKET_FREE_IN msgrequest */
+#define    MC_CMD_TCM_BUCKET_FREE_IN_LEN 4
+/* the bucket id */
+#define       MC_CMD_TCM_BUCKET_FREE_IN_BUCKET_OFST 0
+#define       MC_CMD_TCM_BUCKET_FREE_IN_BUCKET_LEN 4
+
+/* MC_CMD_TCM_BUCKET_FREE_OUT msgresponse */
+#define    MC_CMD_TCM_BUCKET_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_TCM_BUCKET_INIT
+ * Initialise pacer bucket with a given rate
+ */
+#define MC_CMD_TCM_BUCKET_INIT 0xb4
+#undef MC_CMD_0xb4_PRIVILEGE_CTG
+
+#define MC_CMD_0xb4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_TCM_BUCKET_INIT_IN msgrequest */
+#define    MC_CMD_TCM_BUCKET_INIT_IN_LEN 8
+/* the bucket id */
+#define       MC_CMD_TCM_BUCKET_INIT_IN_BUCKET_OFST 0
+#define       MC_CMD_TCM_BUCKET_INIT_IN_BUCKET_LEN 4
+/* the rate in mbps */
+#define       MC_CMD_TCM_BUCKET_INIT_IN_RATE_OFST 4
+#define       MC_CMD_TCM_BUCKET_INIT_IN_RATE_LEN 4
+
+/* MC_CMD_TCM_BUCKET_INIT_EXT_IN msgrequest */
+#define    MC_CMD_TCM_BUCKET_INIT_EXT_IN_LEN 12
+/* the bucket id */
+#define       MC_CMD_TCM_BUCKET_INIT_EXT_IN_BUCKET_OFST 0
+#define       MC_CMD_TCM_BUCKET_INIT_EXT_IN_BUCKET_LEN 4
+/* the rate in mbps */
+#define       MC_CMD_TCM_BUCKET_INIT_EXT_IN_RATE_OFST 4
+#define       MC_CMD_TCM_BUCKET_INIT_EXT_IN_RATE_LEN 4
+/* the desired maximum fill level */
+#define       MC_CMD_TCM_BUCKET_INIT_EXT_IN_MAX_FILL_OFST 8
+#define       MC_CMD_TCM_BUCKET_INIT_EXT_IN_MAX_FILL_LEN 4
+
+/* MC_CMD_TCM_BUCKET_INIT_OUT msgresponse */
+#define    MC_CMD_TCM_BUCKET_INIT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_TCM_TXQ_INIT
+ * Initialise txq in pacer with given options or set options
+ */
+#define MC_CMD_TCM_TXQ_INIT 0xb5
+#undef MC_CMD_0xb5_PRIVILEGE_CTG
+
+#define MC_CMD_0xb5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_TCM_TXQ_INIT_IN msgrequest */
+#define    MC_CMD_TCM_TXQ_INIT_IN_LEN 28
+/* the txq id */
+#define       MC_CMD_TCM_TXQ_INIT_IN_QID_OFST 0
+#define       MC_CMD_TCM_TXQ_INIT_IN_QID_LEN 4
+/* the static priority associated with the txq */
+#define       MC_CMD_TCM_TXQ_INIT_IN_LABEL_OFST 4
+#define       MC_CMD_TCM_TXQ_INIT_IN_LABEL_LEN 4
+/* bitmask of the priority queues this txq is inserted into when inserted. */
+#define       MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_OFST 8
+#define       MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_LEN 4
+#define        MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_OFST 8
+#define        MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_LBN 0
+#define        MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_GUARANTEED_WIDTH 1
+#define        MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_OFST 8
+#define        MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_LBN 1
+#define        MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_NORMAL_WIDTH 1
+#define        MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_OFST 8
+#define        MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_LBN 2
+#define        MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAG_LOW_WIDTH 1
+/* the reaction point (RP) bucket */
+#define       MC_CMD_TCM_TXQ_INIT_IN_RP_BKT_OFST 12
+#define       MC_CMD_TCM_TXQ_INIT_IN_RP_BKT_LEN 4
+/* an already reserved bucket (typically set to bucket associated with outer
+ * vswitch)
+ */
+#define       MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT1_OFST 16
+#define       MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT1_LEN 4
+/* an already reserved bucket (typically set to bucket associated with inner
+ * vswitch)
+ */
+#define       MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT2_OFST 20
+#define       MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT2_LEN 4
+/* the min bucket (typically for ETS/minimum bandwidth) */
+#define       MC_CMD_TCM_TXQ_INIT_IN_MIN_BKT_OFST 24
+#define       MC_CMD_TCM_TXQ_INIT_IN_MIN_BKT_LEN 4
+
+/* MC_CMD_TCM_TXQ_INIT_EXT_IN msgrequest */
+#define    MC_CMD_TCM_TXQ_INIT_EXT_IN_LEN 32
+/* the txq id */
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_QID_OFST 0
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_QID_LEN 4
+/* the static priority associated with the txq */
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_NORMAL_OFST 4
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_NORMAL_LEN 4
+/* bitmask of the priority queues this txq is inserted into when inserted. */
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAGS_OFST 8
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAGS_LEN 4
+#define        MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_OFST 8
+#define        MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_LBN 0
+#define        MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_GUARANTEED_WIDTH 1
+#define        MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_OFST 8
+#define        MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_LBN 1
+#define        MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_NORMAL_WIDTH 1
+#define        MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_OFST 8
+#define        MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_LBN 2
+#define        MC_CMD_TCM_TXQ_INIT_EXT_IN_PQ_FLAG_LOW_WIDTH 1
+/* the reaction point (RP) bucket */
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_RP_BKT_OFST 12
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_RP_BKT_LEN 4
+/* an already reserved bucket (typically set to bucket associated with outer
+ * vswitch)
+ */
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT1_OFST 16
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT1_LEN 4
+/* an already reserved bucket (typically set to bucket associated with inner
+ * vswitch)
+ */
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT2_OFST 20
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_MAX_BKT2_LEN 4
+/* the min bucket (typically for ETS/minimum bandwidth) */
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_MIN_BKT_OFST 24
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_MIN_BKT_LEN 4
+/* the static priority associated with the txq */
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_GUARANTEED_OFST 28
+#define       MC_CMD_TCM_TXQ_INIT_EXT_IN_LABEL_GUARANTEED_LEN 4
+
+/* MC_CMD_TCM_TXQ_INIT_OUT msgresponse */
+#define    MC_CMD_TCM_TXQ_INIT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_LINK_PIOBUF
+ * Link a push I/O buffer to a TxQ
+ */
+#define MC_CMD_LINK_PIOBUF 0x92
+#undef MC_CMD_0x92_PRIVILEGE_CTG
+
+#define MC_CMD_0x92_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_LINK_PIOBUF_IN msgrequest */
+#define    MC_CMD_LINK_PIOBUF_IN_LEN 8
+/* Handle for allocated push I/O buffer. */
+#define       MC_CMD_LINK_PIOBUF_IN_PIOBUF_HANDLE_OFST 0
+#define       MC_CMD_LINK_PIOBUF_IN_PIOBUF_HANDLE_LEN 4
+/* Function Local Instance (VI) number. */
+#define       MC_CMD_LINK_PIOBUF_IN_TXQ_INSTANCE_OFST 4
+#define       MC_CMD_LINK_PIOBUF_IN_TXQ_INSTANCE_LEN 4
+
+/* MC_CMD_LINK_PIOBUF_OUT msgresponse */
+#define    MC_CMD_LINK_PIOBUF_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_UNLINK_PIOBUF
+ * Unlink a push I/O buffer from a TxQ
+ */
+#define MC_CMD_UNLINK_PIOBUF 0x93
+#undef MC_CMD_0x93_PRIVILEGE_CTG
+
+#define MC_CMD_0x93_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_UNLINK_PIOBUF_IN msgrequest */
+#define    MC_CMD_UNLINK_PIOBUF_IN_LEN 4
+/* Function Local Instance (VI) number. */
+#define       MC_CMD_UNLINK_PIOBUF_IN_TXQ_INSTANCE_OFST 0
+#define       MC_CMD_UNLINK_PIOBUF_IN_TXQ_INSTANCE_LEN 4
+
+/* MC_CMD_UNLINK_PIOBUF_OUT msgresponse */
+#define    MC_CMD_UNLINK_PIOBUF_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VSWITCH_ALLOC
+ * allocate and initialise a v-switch.
+ */
+#define MC_CMD_VSWITCH_ALLOC 0x94
+#undef MC_CMD_0x94_PRIVILEGE_CTG
+
+#define MC_CMD_0x94_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VSWITCH_ALLOC_IN msgrequest */
+#define    MC_CMD_VSWITCH_ALLOC_IN_LEN 16
+/* The port to connect to the v-switch's upstream port. */
+#define       MC_CMD_VSWITCH_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define       MC_CMD_VSWITCH_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
+/* The type of v-switch to create. */
+#define       MC_CMD_VSWITCH_ALLOC_IN_TYPE_OFST 4
+#define       MC_CMD_VSWITCH_ALLOC_IN_TYPE_LEN 4
+/* enum: VLAN */
+#define          MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VLAN 0x1
+/* enum: VEB */
+#define          MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEB 0x2
+/* enum: VEPA (obsolete) */
+#define          MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEPA 0x3
+/* enum: MUX */
+#define          MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_MUX 0x4
+/* enum: Snapper specific; semantics TBD */
+#define          MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_TEST 0x5
+/* Flags controlling v-port creation */
+#define       MC_CMD_VSWITCH_ALLOC_IN_FLAGS_OFST 8
+#define       MC_CMD_VSWITCH_ALLOC_IN_FLAGS_LEN 4
+#define        MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_OFST 8
+#define        MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_LBN 0
+#define        MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1
+/* The number of VLAN tags to allow for attached v-ports. For VLAN aggregators,
+ * this must be one or greated, and the attached v-ports must have exactly this
+ * number of tags. For other v-switch types, this must be zero of greater, and
+ * is an upper limit on the number of VLAN tags for attached v-ports. An error
+ * will be returned if existing configuration means we can't support attached
+ * v-ports with this number of tags.
+ */
+#define       MC_CMD_VSWITCH_ALLOC_IN_NUM_VLAN_TAGS_OFST 12
+#define       MC_CMD_VSWITCH_ALLOC_IN_NUM_VLAN_TAGS_LEN 4
+
+/* MC_CMD_VSWITCH_ALLOC_OUT msgresponse */
+#define    MC_CMD_VSWITCH_ALLOC_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VSWITCH_FREE
+ * de-allocate a v-switch.
+ */
+#define MC_CMD_VSWITCH_FREE 0x95
+#undef MC_CMD_0x95_PRIVILEGE_CTG
+
+#define MC_CMD_0x95_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VSWITCH_FREE_IN msgrequest */
+#define    MC_CMD_VSWITCH_FREE_IN_LEN 4
+/* The port to which the v-switch is connected. */
+#define       MC_CMD_VSWITCH_FREE_IN_UPSTREAM_PORT_ID_OFST 0
+#define       MC_CMD_VSWITCH_FREE_IN_UPSTREAM_PORT_ID_LEN 4
+
+/* MC_CMD_VSWITCH_FREE_OUT msgresponse */
+#define    MC_CMD_VSWITCH_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VSWITCH_QUERY
+ * read some config of v-switch. For now this command is an empty placeholder.
+ * It may be used to check if a v-switch is connected to a given EVB port (if
+ * not, then the command returns ENOENT).
+ */
+#define MC_CMD_VSWITCH_QUERY 0x63
+#undef MC_CMD_0x63_PRIVILEGE_CTG
+
+#define MC_CMD_0x63_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VSWITCH_QUERY_IN msgrequest */
+#define    MC_CMD_VSWITCH_QUERY_IN_LEN 4
+/* The port to which the v-switch is connected. */
+#define       MC_CMD_VSWITCH_QUERY_IN_UPSTREAM_PORT_ID_OFST 0
+#define       MC_CMD_VSWITCH_QUERY_IN_UPSTREAM_PORT_ID_LEN 4
+
+/* MC_CMD_VSWITCH_QUERY_OUT msgresponse */
+#define    MC_CMD_VSWITCH_QUERY_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VPORT_ALLOC
+ * allocate a v-port.
+ */
+#define MC_CMD_VPORT_ALLOC 0x96
+#undef MC_CMD_0x96_PRIVILEGE_CTG
+
+#define MC_CMD_0x96_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VPORT_ALLOC_IN msgrequest */
+#define    MC_CMD_VPORT_ALLOC_IN_LEN 20
+/* The port to which the v-switch is connected. */
+#define       MC_CMD_VPORT_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define       MC_CMD_VPORT_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
+/* The type of the new v-port. */
+#define       MC_CMD_VPORT_ALLOC_IN_TYPE_OFST 4
+#define       MC_CMD_VPORT_ALLOC_IN_TYPE_LEN 4
+/* enum: VLAN (obsolete) */
+#define          MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VLAN 0x1
+/* enum: VEB (obsolete) */
+#define          MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VEB 0x2
+/* enum: VEPA (obsolete) */
+#define          MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VEPA 0x3
+/* enum: A normal v-port receives packets which match a specified MAC and/or
+ * VLAN.
+ */
+#define          MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL 0x4
+/* enum: An expansion v-port packets traffic which don't match any other
+ * v-port.
+ */
+#define          MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_EXPANSION 0x5
+/* enum: An test v-port receives packets which match any filters installed by
+ * its downstream components.
+ */
+#define          MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_TEST 0x6
+/* Flags controlling v-port creation */
+#define       MC_CMD_VPORT_ALLOC_IN_FLAGS_OFST 8
+#define       MC_CMD_VPORT_ALLOC_IN_FLAGS_LEN 4
+#define        MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_OFST 8
+#define        MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_LBN 0
+#define        MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1
+#define        MC_CMD_VPORT_ALLOC_IN_FLAG_VLAN_RESTRICT_OFST 8
+#define        MC_CMD_VPORT_ALLOC_IN_FLAG_VLAN_RESTRICT_LBN 1
+#define        MC_CMD_VPORT_ALLOC_IN_FLAG_VLAN_RESTRICT_WIDTH 1
+/* The number of VLAN tags to insert/remove. An error will be returned if
+ * incompatible with the number of VLAN tags specified for the upstream
+ * v-switch.
+ */
+#define       MC_CMD_VPORT_ALLOC_IN_NUM_VLAN_TAGS_OFST 12
+#define       MC_CMD_VPORT_ALLOC_IN_NUM_VLAN_TAGS_LEN 4
+/* The actual VLAN tags to insert/remove */
+#define       MC_CMD_VPORT_ALLOC_IN_VLAN_TAGS_OFST 16
+#define       MC_CMD_VPORT_ALLOC_IN_VLAN_TAGS_LEN 4
+#define        MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_OFST 16
+#define        MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_LBN 0
+#define        MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_WIDTH 16
+#define        MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_1_OFST 16
+#define        MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_1_LBN 16
+#define        MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_1_WIDTH 16
+
+/* MC_CMD_VPORT_ALLOC_OUT msgresponse */
+#define    MC_CMD_VPORT_ALLOC_OUT_LEN 4
+/* The handle of the new v-port */
+#define       MC_CMD_VPORT_ALLOC_OUT_VPORT_ID_OFST 0
+#define       MC_CMD_VPORT_ALLOC_OUT_VPORT_ID_LEN 4
+
+
+/***********************************/
+/* MC_CMD_VPORT_FREE
+ * de-allocate a v-port.
+ */
+#define MC_CMD_VPORT_FREE 0x97
+#undef MC_CMD_0x97_PRIVILEGE_CTG
+
+#define MC_CMD_0x97_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VPORT_FREE_IN msgrequest */
+#define    MC_CMD_VPORT_FREE_IN_LEN 4
+/* The handle of the v-port */
+#define       MC_CMD_VPORT_FREE_IN_VPORT_ID_OFST 0
+#define       MC_CMD_VPORT_FREE_IN_VPORT_ID_LEN 4
+
+/* MC_CMD_VPORT_FREE_OUT msgresponse */
+#define    MC_CMD_VPORT_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VADAPTOR_ALLOC
+ * allocate a v-adaptor.
+ */
+#define MC_CMD_VADAPTOR_ALLOC 0x98
+#undef MC_CMD_0x98_PRIVILEGE_CTG
+
+#define MC_CMD_0x98_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VADAPTOR_ALLOC_IN msgrequest */
+#define    MC_CMD_VADAPTOR_ALLOC_IN_LEN 30
+/* The port to connect to the v-adaptor's port. */
+#define       MC_CMD_VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define       MC_CMD_VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
+/* Flags controlling v-adaptor creation */
+#define       MC_CMD_VADAPTOR_ALLOC_IN_FLAGS_OFST 8
+#define       MC_CMD_VADAPTOR_ALLOC_IN_FLAGS_LEN 4
+#define        MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_OFST 8
+#define        MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_LBN 0
+#define        MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_WIDTH 1
+#define        MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_OFST 8
+#define        MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 1
+#define        MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+/* The number of VLAN tags to strip on receive */
+#define       MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLANS_OFST 12
+#define       MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLANS_LEN 4
+/* The number of VLAN tags to transparently insert/remove. */
+#define       MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLAN_TAGS_OFST 16
+#define       MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLAN_TAGS_LEN 4
+/* The actual VLAN tags to insert/remove */
+#define       MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAGS_OFST 20
+#define       MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAGS_LEN 4
+#define        MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_OFST 20
+#define        MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_LBN 0
+#define        MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_0_WIDTH 16
+#define        MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_1_OFST 20
+#define        MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_1_LBN 16
+#define        MC_CMD_VADAPTOR_ALLOC_IN_VLAN_TAG_1_WIDTH 16
+/* The MAC address to assign to this v-adaptor */
+#define       MC_CMD_VADAPTOR_ALLOC_IN_MACADDR_OFST 24
+#define       MC_CMD_VADAPTOR_ALLOC_IN_MACADDR_LEN 6
+/* enum: Derive the MAC address from the upstream port */
+#define          MC_CMD_VADAPTOR_ALLOC_IN_AUTO_MAC 0x0
+
+/* MC_CMD_VADAPTOR_ALLOC_OUT msgresponse */
+#define    MC_CMD_VADAPTOR_ALLOC_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VADAPTOR_FREE
+ * de-allocate a v-adaptor.
+ */
+#define MC_CMD_VADAPTOR_FREE 0x99
+#undef MC_CMD_0x99_PRIVILEGE_CTG
+
+#define MC_CMD_0x99_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VADAPTOR_FREE_IN msgrequest */
+#define    MC_CMD_VADAPTOR_FREE_IN_LEN 4
+/* The port to which the v-adaptor is connected. */
+#define       MC_CMD_VADAPTOR_FREE_IN_UPSTREAM_PORT_ID_OFST 0
+#define       MC_CMD_VADAPTOR_FREE_IN_UPSTREAM_PORT_ID_LEN 4
+
+/* MC_CMD_VADAPTOR_FREE_OUT msgresponse */
+#define    MC_CMD_VADAPTOR_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VADAPTOR_SET_MAC
+ * assign a new MAC address to a v-adaptor.
+ */
+#define MC_CMD_VADAPTOR_SET_MAC 0x5d
+#undef MC_CMD_0x5d_PRIVILEGE_CTG
+
+#define MC_CMD_0x5d_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VADAPTOR_SET_MAC_IN msgrequest */
+#define    MC_CMD_VADAPTOR_SET_MAC_IN_LEN 10
+/* The port to which the v-adaptor is connected. */
+#define       MC_CMD_VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID_OFST 0
+#define       MC_CMD_VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID_LEN 4
+/* The new MAC address to assign to this v-adaptor */
+#define       MC_CMD_VADAPTOR_SET_MAC_IN_MACADDR_OFST 4
+#define       MC_CMD_VADAPTOR_SET_MAC_IN_MACADDR_LEN 6
+
+/* MC_CMD_VADAPTOR_SET_MAC_OUT msgresponse */
+#define    MC_CMD_VADAPTOR_SET_MAC_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VADAPTOR_GET_MAC
+ * read the MAC address assigned to a v-adaptor.
+ */
+#define MC_CMD_VADAPTOR_GET_MAC 0x5e
+#undef MC_CMD_0x5e_PRIVILEGE_CTG
+
+#define MC_CMD_0x5e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VADAPTOR_GET_MAC_IN msgrequest */
+#define    MC_CMD_VADAPTOR_GET_MAC_IN_LEN 4
+/* The port to which the v-adaptor is connected. */
+#define       MC_CMD_VADAPTOR_GET_MAC_IN_UPSTREAM_PORT_ID_OFST 0
+#define       MC_CMD_VADAPTOR_GET_MAC_IN_UPSTREAM_PORT_ID_LEN 4
+
+/* MC_CMD_VADAPTOR_GET_MAC_OUT msgresponse */
+#define    MC_CMD_VADAPTOR_GET_MAC_OUT_LEN 6
+/* The MAC address assigned to this v-adaptor */
+#define       MC_CMD_VADAPTOR_GET_MAC_OUT_MACADDR_OFST 0
+#define       MC_CMD_VADAPTOR_GET_MAC_OUT_MACADDR_LEN 6
+
+
+/***********************************/
+/* MC_CMD_VADAPTOR_QUERY
+ * read some config of v-adaptor.
+ */
+#define MC_CMD_VADAPTOR_QUERY 0x61
+#undef MC_CMD_0x61_PRIVILEGE_CTG
+
+#define MC_CMD_0x61_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VADAPTOR_QUERY_IN msgrequest */
+#define    MC_CMD_VADAPTOR_QUERY_IN_LEN 4
+/* The port to which the v-adaptor is connected. */
+#define       MC_CMD_VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID_OFST 0
+#define       MC_CMD_VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID_LEN 4
+
+/* MC_CMD_VADAPTOR_QUERY_OUT msgresponse */
+#define    MC_CMD_VADAPTOR_QUERY_OUT_LEN 12
+/* The EVB port flags as defined at MC_CMD_VPORT_ALLOC. */
+#define       MC_CMD_VADAPTOR_QUERY_OUT_PORT_FLAGS_OFST 0
+#define       MC_CMD_VADAPTOR_QUERY_OUT_PORT_FLAGS_LEN 4
+/* The v-adaptor flags as defined at MC_CMD_VADAPTOR_ALLOC. */
+#define       MC_CMD_VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS_OFST 4
+#define       MC_CMD_VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS_LEN 4
+/* The number of VLAN tags that may still be added */
+#define       MC_CMD_VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_OFST 8
+#define       MC_CMD_VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_LEN 4
+
+
+/***********************************/
+/* MC_CMD_EVB_PORT_ASSIGN
+ * assign a port to a PCI function.
+ */
+#define MC_CMD_EVB_PORT_ASSIGN 0x9a
+#undef MC_CMD_0x9a_PRIVILEGE_CTG
+
+#define MC_CMD_0x9a_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_EVB_PORT_ASSIGN_IN msgrequest */
+#define    MC_CMD_EVB_PORT_ASSIGN_IN_LEN 8
+/* The port to assign. */
+#define       MC_CMD_EVB_PORT_ASSIGN_IN_PORT_ID_OFST 0
+#define       MC_CMD_EVB_PORT_ASSIGN_IN_PORT_ID_LEN 4
+/* The target function to modify. */
+#define       MC_CMD_EVB_PORT_ASSIGN_IN_FUNCTION_OFST 4
+#define       MC_CMD_EVB_PORT_ASSIGN_IN_FUNCTION_LEN 4
+#define        MC_CMD_EVB_PORT_ASSIGN_IN_PF_OFST 4
+#define        MC_CMD_EVB_PORT_ASSIGN_IN_PF_LBN 0
+#define        MC_CMD_EVB_PORT_ASSIGN_IN_PF_WIDTH 16
+#define        MC_CMD_EVB_PORT_ASSIGN_IN_VF_OFST 4
+#define        MC_CMD_EVB_PORT_ASSIGN_IN_VF_LBN 16
+#define        MC_CMD_EVB_PORT_ASSIGN_IN_VF_WIDTH 16
+
+/* MC_CMD_EVB_PORT_ASSIGN_OUT msgresponse */
+#define    MC_CMD_EVB_PORT_ASSIGN_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RDWR_A64_REGIONS
+ * Assign the 64 bit region addresses.
+ */
+#define MC_CMD_RDWR_A64_REGIONS 0x9b
+#undef MC_CMD_0x9b_PRIVILEGE_CTG
+
+#define MC_CMD_0x9b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_RDWR_A64_REGIONS_IN msgrequest */
+#define    MC_CMD_RDWR_A64_REGIONS_IN_LEN 17
+#define       MC_CMD_RDWR_A64_REGIONS_IN_REGION0_OFST 0
+#define       MC_CMD_RDWR_A64_REGIONS_IN_REGION0_LEN 4
+#define       MC_CMD_RDWR_A64_REGIONS_IN_REGION1_OFST 4
+#define       MC_CMD_RDWR_A64_REGIONS_IN_REGION1_LEN 4
+#define       MC_CMD_RDWR_A64_REGIONS_IN_REGION2_OFST 8
+#define       MC_CMD_RDWR_A64_REGIONS_IN_REGION2_LEN 4
+#define       MC_CMD_RDWR_A64_REGIONS_IN_REGION3_OFST 12
+#define       MC_CMD_RDWR_A64_REGIONS_IN_REGION3_LEN 4
+/* Write enable bits 0-3, set to write, clear to read. */
+#define       MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_LBN 128
+#define       MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_WIDTH 4
+#define       MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_BYTE_OFST 16
+#define       MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_BYTE_LEN 1
+
+/* MC_CMD_RDWR_A64_REGIONS_OUT msgresponse: This data always included
+ * regardless of state of write bits in the request.
+ */
+#define    MC_CMD_RDWR_A64_REGIONS_OUT_LEN 16
+#define       MC_CMD_RDWR_A64_REGIONS_OUT_REGION0_OFST 0
+#define       MC_CMD_RDWR_A64_REGIONS_OUT_REGION0_LEN 4
+#define       MC_CMD_RDWR_A64_REGIONS_OUT_REGION1_OFST 4
+#define       MC_CMD_RDWR_A64_REGIONS_OUT_REGION1_LEN 4
+#define       MC_CMD_RDWR_A64_REGIONS_OUT_REGION2_OFST 8
+#define       MC_CMD_RDWR_A64_REGIONS_OUT_REGION2_LEN 4
+#define       MC_CMD_RDWR_A64_REGIONS_OUT_REGION3_OFST 12
+#define       MC_CMD_RDWR_A64_REGIONS_OUT_REGION3_LEN 4
+
+
+/***********************************/
+/* MC_CMD_ONLOAD_STACK_ALLOC
+ * Allocate an Onload stack ID.
+ */
+#define MC_CMD_ONLOAD_STACK_ALLOC 0x9c
+#undef MC_CMD_0x9c_PRIVILEGE_CTG
+
+#define MC_CMD_0x9c_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_ONLOAD_STACK_ALLOC_IN msgrequest */
+#define    MC_CMD_ONLOAD_STACK_ALLOC_IN_LEN 4
+/* The handle of the owning upstream port */
+#define       MC_CMD_ONLOAD_STACK_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define       MC_CMD_ONLOAD_STACK_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
+
+/* MC_CMD_ONLOAD_STACK_ALLOC_OUT msgresponse */
+#define    MC_CMD_ONLOAD_STACK_ALLOC_OUT_LEN 4
+/* The handle of the new Onload stack */
+#define       MC_CMD_ONLOAD_STACK_ALLOC_OUT_ONLOAD_STACK_ID_OFST 0
+#define       MC_CMD_ONLOAD_STACK_ALLOC_OUT_ONLOAD_STACK_ID_LEN 4
+
+
+/***********************************/
+/* MC_CMD_ONLOAD_STACK_FREE
+ * Free an Onload stack ID.
+ */
+#define MC_CMD_ONLOAD_STACK_FREE 0x9d
+#undef MC_CMD_0x9d_PRIVILEGE_CTG
+
+#define MC_CMD_0x9d_PRIVILEGE_CTG SRIOV_CTG_ONLOAD
+
+/* MC_CMD_ONLOAD_STACK_FREE_IN msgrequest */
+#define    MC_CMD_ONLOAD_STACK_FREE_IN_LEN 4
+/* The handle of the Onload stack */
+#define       MC_CMD_ONLOAD_STACK_FREE_IN_ONLOAD_STACK_ID_OFST 0
+#define       MC_CMD_ONLOAD_STACK_FREE_IN_ONLOAD_STACK_ID_LEN 4
+
+/* MC_CMD_ONLOAD_STACK_FREE_OUT msgresponse */
+#define    MC_CMD_ONLOAD_STACK_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_ALLOC
+ * Allocate an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_ALLOC 0x9e
+#undef MC_CMD_0x9e_PRIVILEGE_CTG
+
+#define MC_CMD_0x9e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_ALLOC_IN msgrequest */
+#define    MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN 12
+/* The handle of the owning upstream port */
+#define       MC_CMD_RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define       MC_CMD_RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
+/* The type of context to allocate */
+#define       MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_OFST 4
+#define       MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_LEN 4
+/* enum: Allocate a context for exclusive use. The key and indirection table
+ * must be explicitly configured.
+ */
+#define          MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE 0x0
+/* enum: Allocate a context for shared use; this will spread across a range of
+ * queues, but the key and indirection table are pre-configured and may not be
+ * changed. For this mode, NUM_QUEUES must 2, 4, 8, 16, 32 or 64.
+ */
+#define          MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED 0x1
+/* enum: Allocate a context to spread evenly across an arbitrary number of
+ * queues. No indirection table space is allocated for this context. (EF100 and
+ * later)
+ */
+#define          MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EVEN_SPREADING 0x2
+/* Number of queues spanned by this context. For exclusive contexts this must
+ * be in the range 1 to RSS_MAX_INDIRECTION_QUEUES, where
+ * RSS_MAX_INDIRECTION_QUEUES is queried from MC_CMD_GET_CAPABILITIES_V9 or if
+ * V9 is not supported then RSS_MAX_INDIRECTION_QUEUES is 64. Valid entries in
+ * the indirection table will be in the range 0 to NUM_QUEUES-1. For even-
+ * spreading contexts this must be in the range 1 to
+ * RSS_MAX_EVEN_SPREADING_QUEUES as queried from MC_CMD_GET_CAPABILITIES. Note
+ * that specifying NUM_QUEUES = 1 will not perform any spreading but may still
+ * be useful as a way of obtaining the Toeplitz hash.
+ */
+#define       MC_CMD_RSS_CONTEXT_ALLOC_IN_NUM_QUEUES_OFST 8
+#define       MC_CMD_RSS_CONTEXT_ALLOC_IN_NUM_QUEUES_LEN 4
+
+/* MC_CMD_RSS_CONTEXT_ALLOC_V2_IN msgrequest */
+#define    MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_LEN 16
+/* The handle of the owning upstream port */
+#define       MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_UPSTREAM_PORT_ID_OFST 0
+#define       MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_UPSTREAM_PORT_ID_LEN 4
+/* The type of context to allocate */
+#define       MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_TYPE_OFST 4
+#define       MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_TYPE_LEN 4
+/* enum: Allocate a context for exclusive use. The key and indirection table
+ * must be explicitly configured.
+ */
+#define          MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_TYPE_EXCLUSIVE 0x0
+/* enum: Allocate a context for shared use; this will spread across a range of
+ * queues, but the key and indirection table are pre-configured and may not be
+ * changed. For this mode, NUM_QUEUES must 2, 4, 8, 16, 32 or 64.
+ */
+#define          MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_TYPE_SHARED 0x1
+/* enum: Allocate a context to spread evenly across an arbitrary number of
+ * queues. No indirection table space is allocated for this context. (EF100 and
+ * later)
+ */
+#define          MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_TYPE_EVEN_SPREADING 0x2
+/* Number of queues spanned by this context. For exclusive contexts this must
+ * be in the range 1 to RSS_MAX_INDIRECTION_QUEUES, where
+ * RSS_MAX_INDIRECTION_QUEUES is queried from MC_CMD_GET_CAPABILITIES_V9 or if
+ * V9 is not supported then RSS_MAX_INDIRECTION_QUEUES is 64. Valid entries in
+ * the indirection table will be in the range 0 to NUM_QUEUES-1. For even-
+ * spreading contexts this must be in the range 1 to
+ * RSS_MAX_EVEN_SPREADING_QUEUES as queried from MC_CMD_GET_CAPABILITIES. Note
+ * that specifying NUM_QUEUES = 1 will not perform any spreading but may still
+ * be useful as a way of obtaining the Toeplitz hash.
+ */
+#define       MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_NUM_QUEUES_OFST 8
+#define       MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_NUM_QUEUES_LEN 4
+/* Size of indirection table to be allocated to this context from the pool.
+ * Must be a power of 2. The minimum and maximum table size can be queried
+ * using MC_CMD_GET_CAPABILITIES_V9. If there is not enough space remaining in
+ * the common pool to allocate the requested table size, due to allocating
+ * table space to other RSS contexts, then the command will fail with
+ * MC_CMD_ERR_ENOSPC.
+ */
+#define       MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_INDIRECTION_TABLE_SIZE_OFST 12
+#define       MC_CMD_RSS_CONTEXT_ALLOC_V2_IN_INDIRECTION_TABLE_SIZE_LEN 4
+
+/* MC_CMD_RSS_CONTEXT_ALLOC_OUT msgresponse */
+#define    MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN 4
+/* The handle of the new RSS context. This should be considered opaque to the
+ * host, although a value of 0xFFFFFFFF is guaranteed never to be a valid
+ * handle.
+ */
+#define       MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_OFST 0
+#define       MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_LEN 4
+/* enum: guaranteed invalid RSS context handle value */
+#define          MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_INVALID 0xffffffff
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_FREE
+ * Free an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_FREE 0x9f
+#undef MC_CMD_0x9f_PRIVILEGE_CTG
+
+#define MC_CMD_0x9f_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_FREE_IN msgrequest */
+#define    MC_CMD_RSS_CONTEXT_FREE_IN_LEN 4
+/* The handle of the RSS context */
+#define       MC_CMD_RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID_OFST 0
+#define       MC_CMD_RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID_LEN 4
+
+/* MC_CMD_RSS_CONTEXT_FREE_OUT msgresponse */
+#define    MC_CMD_RSS_CONTEXT_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_SET_KEY
+ * Set the Toeplitz hash key for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_SET_KEY 0xa0
+#undef MC_CMD_0xa0_PRIVILEGE_CTG
+
+#define MC_CMD_0xa0_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_SET_KEY_IN msgrequest */
+#define    MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN 44
+/* The handle of the RSS context */
+#define       MC_CMD_RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID_OFST 0
+#define       MC_CMD_RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID_LEN 4
+/* The 40-byte Toeplitz hash key (TBD endianness issues?) */
+#define       MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_OFST 4
+#define       MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN 40
+
+/* MC_CMD_RSS_CONTEXT_SET_KEY_OUT msgresponse */
+#define    MC_CMD_RSS_CONTEXT_SET_KEY_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_GET_KEY
+ * Get the Toeplitz hash key for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_GET_KEY 0xa1
+#undef MC_CMD_0xa1_PRIVILEGE_CTG
+
+#define MC_CMD_0xa1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_GET_KEY_IN msgrequest */
+#define    MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN 4
+/* The handle of the RSS context */
+#define       MC_CMD_RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID_OFST 0
+#define       MC_CMD_RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID_LEN 4
+
+/* MC_CMD_RSS_CONTEXT_GET_KEY_OUT msgresponse */
+#define    MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN 44
+/* The 40-byte Toeplitz hash key (TBD endianness issues?) */
+#define       MC_CMD_RSS_CONTEXT_GET_KEY_OUT_TOEPLITZ_KEY_OFST 4
+#define       MC_CMD_RSS_CONTEXT_GET_KEY_OUT_TOEPLITZ_KEY_LEN 40
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_SET_TABLE
+ * Set the indirection table for an RSS context. This command should only be
+ * used with indirection tables containing 128 entries, which is the default
+ * when the RSS context is allocated without specifying a table size.
+ */
+#define MC_CMD_RSS_CONTEXT_SET_TABLE 0xa2
+#undef MC_CMD_0xa2_PRIVILEGE_CTG
+
+#define MC_CMD_0xa2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_SET_TABLE_IN msgrequest */
+#define    MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN 132
+/* The handle of the RSS context */
+#define       MC_CMD_RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID_OFST 0
+#define       MC_CMD_RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID_LEN 4
+/* The 128-byte indirection table (1 byte per entry) */
+#define       MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_OFST 4
+#define       MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN 128
+
+/* MC_CMD_RSS_CONTEXT_SET_TABLE_OUT msgresponse */
+#define    MC_CMD_RSS_CONTEXT_SET_TABLE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_GET_TABLE
+ * Get the indirection table for an RSS context. This command should only be
+ * used with indirection tables containing 128 entries, which is the default
+ * when the RSS context is allocated without specifying a table size.
+ */
+#define MC_CMD_RSS_CONTEXT_GET_TABLE 0xa3
+#undef MC_CMD_0xa3_PRIVILEGE_CTG
+
+#define MC_CMD_0xa3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_GET_TABLE_IN msgrequest */
+#define    MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN 4
+/* The handle of the RSS context */
+#define       MC_CMD_RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID_OFST 0
+#define       MC_CMD_RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID_LEN 4
+
+/* MC_CMD_RSS_CONTEXT_GET_TABLE_OUT msgresponse */
+#define    MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN 132
+/* The 128-byte indirection table (1 byte per entry) */
+#define       MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_OFST 4
+#define       MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_LEN 128
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_WRITE_TABLE
+ * Write a portion of a selectable-size indirection table for an RSS context.
+ * This command must be used instead of MC_CMD_RSS_CONTEXT_SET_TABLE if the
+ * RSS_SELECTABLE_TABLE_SIZE bit is set in MC_CMD_GET_CAPABILITIES.
+ */
+#define MC_CMD_RSS_CONTEXT_WRITE_TABLE 0x13e
+#undef MC_CMD_0x13e_PRIVILEGE_CTG
+
+#define MC_CMD_0x13e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN msgrequest */
+#define    MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_LENMIN 8
+#define    MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_LENMAX 252
+#define    MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_LENMAX_MCDI2 1020
+#define    MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_LEN(num) (4+4*(num))
+#define    MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_NUM(len) (((len)-4)/4)
+/* The handle of the RSS context */
+#define       MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_RSS_CONTEXT_ID_OFST 0
+#define       MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_RSS_CONTEXT_ID_LEN 4
+/* An array of index-value pairs to be written to the table. Structure is
+ * MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY.
+ */
+#define       MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_OFST 4
+#define       MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_LEN 4
+#define       MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_MINNUM 1
+#define       MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_MAXNUM 62
+#define       MC_CMD_RSS_CONTEXT_WRITE_TABLE_IN_ENTRIES_MAXNUM_MCDI2 254
+
+/* MC_CMD_RSS_CONTEXT_WRITE_TABLE_OUT msgresponse */
+#define    MC_CMD_RSS_CONTEXT_WRITE_TABLE_OUT_LEN 0
+
+/* MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY structuredef */
+#define    MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_LEN 4
+/* The index of the table entry to be written. */
+#define       MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_INDEX_OFST 0
+#define       MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_INDEX_LEN 2
+#define       MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_INDEX_LBN 0
+#define       MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_INDEX_WIDTH 16
+/* The value to write into the table entry. */
+#define       MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_VALUE_OFST 2
+#define       MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_VALUE_LEN 2
+#define       MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_VALUE_LBN 16
+#define       MC_CMD_RSS_CONTEXT_WRITE_TABLE_ENTRY_VALUE_WIDTH 16
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_READ_TABLE
+ * Read a portion of a selectable-size indirection table for an RSS context.
+ * This command must be used instead of MC_CMD_RSS_CONTEXT_GET_TABLE if the
+ * RSS_SELECTABLE_TABLE_SIZE bit is set in MC_CMD_GET_CAPABILITIES.
+ */
+#define MC_CMD_RSS_CONTEXT_READ_TABLE 0x13f
+#undef MC_CMD_0x13f_PRIVILEGE_CTG
+
+#define MC_CMD_0x13f_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_READ_TABLE_IN msgrequest */
+#define    MC_CMD_RSS_CONTEXT_READ_TABLE_IN_LENMIN 6
+#define    MC_CMD_RSS_CONTEXT_READ_TABLE_IN_LENMAX 252
+#define    MC_CMD_RSS_CONTEXT_READ_TABLE_IN_LENMAX_MCDI2 1020
+#define    MC_CMD_RSS_CONTEXT_READ_TABLE_IN_LEN(num) (4+2*(num))
+#define    MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_NUM(len) (((len)-4)/2)
+/* The handle of the RSS context */
+#define       MC_CMD_RSS_CONTEXT_READ_TABLE_IN_RSS_CONTEXT_ID_OFST 0
+#define       MC_CMD_RSS_CONTEXT_READ_TABLE_IN_RSS_CONTEXT_ID_LEN 4
+/* An array containing the indices of the entries to be read. */
+#define       MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_OFST 4
+#define       MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_LEN 2
+#define       MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_MINNUM 1
+#define       MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_MAXNUM 124
+#define       MC_CMD_RSS_CONTEXT_READ_TABLE_IN_INDICES_MAXNUM_MCDI2 508
+
+/* MC_CMD_RSS_CONTEXT_READ_TABLE_OUT msgresponse */
+#define    MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_LENMIN 2
+#define    MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_LENMAX 252
+#define    MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_LENMAX_MCDI2 1020
+#define    MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_LEN(num) (0+2*(num))
+#define    MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_NUM(len) (((len)-0)/2)
+/* A buffer containing the requested entries read from the table. */
+#define       MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_OFST 0
+#define       MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_LEN 2
+#define       MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_MINNUM 1
+#define       MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_MAXNUM 126
+#define       MC_CMD_RSS_CONTEXT_READ_TABLE_OUT_DATA_MAXNUM_MCDI2 510
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_SET_FLAGS
+ * Set various control flags for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS 0xe1
+#undef MC_CMD_0xe1_PRIVILEGE_CTG
+
+#define MC_CMD_0xe1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_SET_FLAGS_IN msgrequest */
+#define    MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN 8
+/* The handle of the RSS context */
+#define       MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0
+#define       MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID_LEN 4
+/* Hash control flags. The _EN bits are always supported, but new modes are
+ * available when ADDITIONAL_RSS_MODES is reported by MC_CMD_GET_CAPABILITIES:
+ * in this case, the MODE fields may be set to non-zero values, and will take
+ * effect regardless of the settings of the _EN flags. See the RSS_MODE
+ * structure for the meaning of the mode bits. Drivers must check the
+ * capability before trying to set any _MODE fields, as older firmware will
+ * reject any attempt to set the FLAGS field to a value > 0xff with EINVAL. In
+ * the case where all the _MODE flags are zero, the _EN flags take effect,
+ * providing backward compatibility for existing drivers. (Setting all _MODE
+ * *and* all _EN flags to zero is valid, to disable RSS spreading for that
+ * particular packet type.)
+ */
+#define       MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_OFST 4
+#define       MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_LEN 4
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_OFST 4
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_LBN 0
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_WIDTH 1
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN_OFST 4
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN_LBN 1
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN_WIDTH 1
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN_OFST 4
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN_LBN 2
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN_WIDTH 1
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_OFST 4
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_LBN 3
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_WIDTH 1
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RESERVED_OFST 4
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RESERVED_LBN 4
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RESERVED_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_OFST 4
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_LBN 8
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV4_RSS_MODE_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV4_RSS_MODE_OFST 4
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV4_RSS_MODE_LBN 12
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV4_RSS_MODE_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_OFST 4
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_LBN 16
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV4_RSS_MODE_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_OFST 4
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_LBN 20
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TCP_IPV6_RSS_MODE_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV6_RSS_MODE_OFST 4
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV6_RSS_MODE_LBN 24
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_UDP_IPV6_RSS_MODE_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_OFST 4
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_LBN 28
+#define        MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_OTHER_IPV6_RSS_MODE_WIDTH 4
+
+/* MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT msgresponse */
+#define    MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_GET_FLAGS
+ * Get various control flags for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS 0xe2
+#undef MC_CMD_0xe2_PRIVILEGE_CTG
+
+#define MC_CMD_0xe2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RSS_CONTEXT_GET_FLAGS_IN msgrequest */
+#define    MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_LEN 4
+/* The handle of the RSS context */
+#define       MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0
+#define       MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID_LEN 4
+
+/* MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT msgresponse */
+#define    MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN 8
+/* Hash control flags. If all _MODE bits are zero (which will always be true
+ * for older firmware which does not report the ADDITIONAL_RSS_MODES
+ * capability), the _EN bits report the state. If any _MODE bits are non-zero
+ * (which will only be true when the firmware reports ADDITIONAL_RSS_MODES)
+ * then the _EN bits should be disregarded, although the _MODE flags are
+ * guaranteed to be consistent with the _EN flags for a freshly-allocated RSS
+ * context and in the case where the _EN flags were used in the SET. This
+ * provides backward compatibility: old drivers will not be attempting to
+ * derive any meaning from the _MODE bits (and can never set them to any value
+ * not representable by the _EN bits); new drivers can always determine the
+ * mode by looking only at the _MODE bits; the value returned by a GET can
+ * always be used for a SET regardless of old/new driver vs. old/new firmware.
+ */
+#define       MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST 4
+#define       MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_LEN 4
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_OFST 4
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN 0
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_WIDTH 1
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_OFST 4
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_LBN 1
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_WIDTH 1
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_OFST 4
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_LBN 2
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_WIDTH 1
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_OFST 4
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_LBN 3
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_WIDTH 1
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_RESERVED_OFST 4
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_RESERVED_LBN 4
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_RESERVED_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_OFST 4
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_LBN 8
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV4_RSS_MODE_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_OFST 4
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_LBN 12
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV4_RSS_MODE_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_OFST 4
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_LBN 16
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV4_RSS_MODE_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_OFST 4
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_LBN 20
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TCP_IPV6_RSS_MODE_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_OFST 4
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN 24
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_WIDTH 4
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_OFST 4
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_LBN 28
+#define        MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_WIDTH 4
+
+
+/***********************************/
+/* MC_CMD_DOT1P_MAPPING_ALLOC
+ * Allocate a .1p mapping.
+ */
+#define MC_CMD_DOT1P_MAPPING_ALLOC 0xa4
+#undef MC_CMD_0xa4_PRIVILEGE_CTG
+
+#define MC_CMD_0xa4_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DOT1P_MAPPING_ALLOC_IN msgrequest */
+#define    MC_CMD_DOT1P_MAPPING_ALLOC_IN_LEN 8
+/* The handle of the owning upstream port */
+#define       MC_CMD_DOT1P_MAPPING_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+#define       MC_CMD_DOT1P_MAPPING_ALLOC_IN_UPSTREAM_PORT_ID_LEN 4
+/* Number of queues spanned by this mapping, in the range 1-64; valid fixed
+ * offsets in the mapping table will be in the range 0 to NUM_QUEUES-1, and
+ * referenced RSS contexts must span no more than this number.
+ */
+#define       MC_CMD_DOT1P_MAPPING_ALLOC_IN_NUM_QUEUES_OFST 4
+#define       MC_CMD_DOT1P_MAPPING_ALLOC_IN_NUM_QUEUES_LEN 4
+
+/* MC_CMD_DOT1P_MAPPING_ALLOC_OUT msgresponse */
+#define    MC_CMD_DOT1P_MAPPING_ALLOC_OUT_LEN 4
+/* The handle of the new .1p mapping. This should be considered opaque to the
+ * host, although a value of 0xFFFFFFFF is guaranteed never to be a valid
+ * handle.
+ */
+#define       MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_OFST 0
+#define       MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_LEN 4
+/* enum: guaranteed invalid .1p mapping handle value */
+#define          MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_INVALID 0xffffffff
+
+
+/***********************************/
+/* MC_CMD_DOT1P_MAPPING_FREE
+ * Free a .1p mapping.
+ */
+#define MC_CMD_DOT1P_MAPPING_FREE 0xa5
+#undef MC_CMD_0xa5_PRIVILEGE_CTG
+
+#define MC_CMD_0xa5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DOT1P_MAPPING_FREE_IN msgrequest */
+#define    MC_CMD_DOT1P_MAPPING_FREE_IN_LEN 4
+/* The handle of the .1p mapping */
+#define       MC_CMD_DOT1P_MAPPING_FREE_IN_DOT1P_MAPPING_ID_OFST 0
+#define       MC_CMD_DOT1P_MAPPING_FREE_IN_DOT1P_MAPPING_ID_LEN 4
+
+/* MC_CMD_DOT1P_MAPPING_FREE_OUT msgresponse */
+#define    MC_CMD_DOT1P_MAPPING_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_DOT1P_MAPPING_SET_TABLE
+ * Set the mapping table for a .1p mapping.
+ */
+#define MC_CMD_DOT1P_MAPPING_SET_TABLE 0xa6
+#undef MC_CMD_0xa6_PRIVILEGE_CTG
+
+#define MC_CMD_0xa6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DOT1P_MAPPING_SET_TABLE_IN msgrequest */
+#define    MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_LEN 36
+/* The handle of the .1p mapping */
+#define       MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0
+#define       MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_DOT1P_MAPPING_ID_LEN 4
+/* Per-priority mappings (1 32-bit word per entry - an offset or RSS context
+ * handle)
+ */
+#define       MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_MAPPING_TABLE_OFST 4
+#define       MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_MAPPING_TABLE_LEN 32
+
+/* MC_CMD_DOT1P_MAPPING_SET_TABLE_OUT msgresponse */
+#define    MC_CMD_DOT1P_MAPPING_SET_TABLE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_DOT1P_MAPPING_GET_TABLE
+ * Get the mapping table for a .1p mapping.
+ */
+#define MC_CMD_DOT1P_MAPPING_GET_TABLE 0xa7
+#undef MC_CMD_0xa7_PRIVILEGE_CTG
+
+#define MC_CMD_0xa7_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DOT1P_MAPPING_GET_TABLE_IN msgrequest */
+#define    MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_LEN 4
+/* The handle of the .1p mapping */
+#define       MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0
+#define       MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_DOT1P_MAPPING_ID_LEN 4
+
+/* MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT msgresponse */
+#define    MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_LEN 36
+/* Per-priority mappings (1 32-bit word per entry - an offset or RSS context
+ * handle)
+ */
+#define       MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_MAPPING_TABLE_OFST 4
+#define       MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_MAPPING_TABLE_LEN 32
+
+
+/***********************************/
+/* MC_CMD_GET_VECTOR_CFG
+ * Get Interrupt Vector config for this PF.
+ */
+#define MC_CMD_GET_VECTOR_CFG 0xbf
+#undef MC_CMD_0xbf_PRIVILEGE_CTG
+
+#define MC_CMD_0xbf_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_VECTOR_CFG_IN msgrequest */
+#define    MC_CMD_GET_VECTOR_CFG_IN_LEN 0
+
+/* MC_CMD_GET_VECTOR_CFG_OUT msgresponse */
+#define    MC_CMD_GET_VECTOR_CFG_OUT_LEN 12
+/* Base absolute interrupt vector number. */
+#define       MC_CMD_GET_VECTOR_CFG_OUT_VEC_BASE_OFST 0
+#define       MC_CMD_GET_VECTOR_CFG_OUT_VEC_BASE_LEN 4
+/* Number of interrupt vectors allocate to this PF. */
+#define       MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_PF_OFST 4
+#define       MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_PF_LEN 4
+/* Number of interrupt vectors to allocate per VF. */
+#define       MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_VF_OFST 8
+#define       MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_VF_LEN 4
+
+
+/***********************************/
+/* MC_CMD_SET_VECTOR_CFG
+ * Set Interrupt Vector config for this PF.
+ */
+#define MC_CMD_SET_VECTOR_CFG 0xc0
+#undef MC_CMD_0xc0_PRIVILEGE_CTG
+
+#define MC_CMD_0xc0_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_SET_VECTOR_CFG_IN msgrequest */
+#define    MC_CMD_SET_VECTOR_CFG_IN_LEN 12
+/* Base absolute interrupt vector number, or MC_CMD_RESOURCE_INSTANCE_ANY to
+ * let the system find a suitable base.
+ */
+#define       MC_CMD_SET_VECTOR_CFG_IN_VEC_BASE_OFST 0
+#define       MC_CMD_SET_VECTOR_CFG_IN_VEC_BASE_LEN 4
+/* Number of interrupt vectors allocate to this PF. */
+#define       MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_PF_OFST 4
+#define       MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_PF_LEN 4
+/* Number of interrupt vectors to allocate per VF. */
+#define       MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_VF_OFST 8
+#define       MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_VF_LEN 4
+
+/* MC_CMD_SET_VECTOR_CFG_OUT msgresponse */
+#define    MC_CMD_SET_VECTOR_CFG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VPORT_ADD_MAC_ADDRESS
+ * Add a MAC address to a v-port
+ */
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS 0xa8
+#undef MC_CMD_0xa8_PRIVILEGE_CTG
+
+#define MC_CMD_0xa8_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VPORT_ADD_MAC_ADDRESS_IN msgrequest */
+#define    MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN 10
+/* The handle of the v-port */
+#define       MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID_OFST 0
+#define       MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID_LEN 4
+/* MAC address to add */
+#define       MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_OFST 4
+#define       MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_LEN 6
+
+/* MC_CMD_VPORT_ADD_MAC_ADDRESS_OUT msgresponse */
+#define    MC_CMD_VPORT_ADD_MAC_ADDRESS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VPORT_DEL_MAC_ADDRESS
+ * Delete a MAC address from a v-port
+ */
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS 0xa9
+#undef MC_CMD_0xa9_PRIVILEGE_CTG
+
+#define MC_CMD_0xa9_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VPORT_DEL_MAC_ADDRESS_IN msgrequest */
+#define    MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN 10
+/* The handle of the v-port */
+#define       MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID_OFST 0
+#define       MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID_LEN 4
+/* MAC address to add */
+#define       MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_MACADDR_OFST 4
+#define       MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_MACADDR_LEN 6
+
+/* MC_CMD_VPORT_DEL_MAC_ADDRESS_OUT msgresponse */
+#define    MC_CMD_VPORT_DEL_MAC_ADDRESS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VPORT_GET_MAC_ADDRESSES
+ * Delete a MAC address from a v-port
+ */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES 0xaa
+#undef MC_CMD_0xaa_PRIVILEGE_CTG
+
+#define MC_CMD_0xaa_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VPORT_GET_MAC_ADDRESSES_IN msgrequest */
+#define    MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN 4
+/* The handle of the v-port */
+#define       MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID_OFST 0
+#define       MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID_LEN 4
+
+/* MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT msgresponse */
+#define    MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN 4
+#define    MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX 250
+#define    MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX_MCDI2 1018
+#define    MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LEN(num) (4+6*(num))
+#define    MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_NUM(len) (((len)-4)/6)
+/* The number of MAC addresses returned */
+#define       MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT_OFST 0
+#define       MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT_LEN 4
+/* Array of MAC addresses */
+#define       MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_OFST 4
+#define       MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_LEN 6
+#define       MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_MINNUM 0
+#define       MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_MAXNUM 41
+#define       MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_MAXNUM_MCDI2 169
+
+
+/***********************************/
+/* MC_CMD_VPORT_RECONFIGURE
+ * Replace VLAN tags and/or MAC addresses of an existing v-port. If the v-port
+ * has already been passed to another function (v-port's user), then that
+ * function will be reset before applying the changes.
+ */
+#define MC_CMD_VPORT_RECONFIGURE 0xeb
+#undef MC_CMD_0xeb_PRIVILEGE_CTG
+
+#define MC_CMD_0xeb_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VPORT_RECONFIGURE_IN msgrequest */
+#define    MC_CMD_VPORT_RECONFIGURE_IN_LEN 44
+/* The handle of the v-port */
+#define       MC_CMD_VPORT_RECONFIGURE_IN_VPORT_ID_OFST 0
+#define       MC_CMD_VPORT_RECONFIGURE_IN_VPORT_ID_LEN 4
+/* Flags requesting what should be changed. */
+#define       MC_CMD_VPORT_RECONFIGURE_IN_FLAGS_OFST 4
+#define       MC_CMD_VPORT_RECONFIGURE_IN_FLAGS_LEN 4
+#define        MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_OFST 4
+#define        MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_LBN 0
+#define        MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_WIDTH 1
+#define        MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_MACADDRS_OFST 4
+#define        MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_MACADDRS_LBN 1
+#define        MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_MACADDRS_WIDTH 1
+/* The number of VLAN tags to insert/remove. An error will be returned if
+ * incompatible with the number of VLAN tags specified for the upstream
+ * v-switch.
+ */
+#define       MC_CMD_VPORT_RECONFIGURE_IN_NUM_VLAN_TAGS_OFST 8
+#define       MC_CMD_VPORT_RECONFIGURE_IN_NUM_VLAN_TAGS_LEN 4
+/* The actual VLAN tags to insert/remove */
+#define       MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAGS_OFST 12
+#define       MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAGS_LEN 4
+#define        MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_OFST 12
+#define        MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_LBN 0
+#define        MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_WIDTH 16
+#define        MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_OFST 12
+#define        MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_LBN 16
+#define        MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_WIDTH 16
+/* The number of MAC addresses to add */
+#define       MC_CMD_VPORT_RECONFIGURE_IN_NUM_MACADDRS_OFST 16
+#define       MC_CMD_VPORT_RECONFIGURE_IN_NUM_MACADDRS_LEN 4
+/* MAC addresses to add */
+#define       MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_OFST 20
+#define       MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_LEN 6
+#define       MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_NUM 4
+
+/* MC_CMD_VPORT_RECONFIGURE_OUT msgresponse */
+#define    MC_CMD_VPORT_RECONFIGURE_OUT_LEN 4
+#define       MC_CMD_VPORT_RECONFIGURE_OUT_FLAGS_OFST 0
+#define       MC_CMD_VPORT_RECONFIGURE_OUT_FLAGS_LEN 4
+#define        MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_OFST 0
+#define        MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_LBN 0
+#define        MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_WIDTH 1
+
+
+/***********************************/
+/* MC_CMD_EVB_PORT_QUERY
+ * read some config of v-port.
+ */
+#define MC_CMD_EVB_PORT_QUERY 0x62
+#undef MC_CMD_0x62_PRIVILEGE_CTG
+
+#define MC_CMD_0x62_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_EVB_PORT_QUERY_IN msgrequest */
+#define    MC_CMD_EVB_PORT_QUERY_IN_LEN 4
+/* The handle of the v-port */
+#define       MC_CMD_EVB_PORT_QUERY_IN_PORT_ID_OFST 0
+#define       MC_CMD_EVB_PORT_QUERY_IN_PORT_ID_LEN 4
+
+/* MC_CMD_EVB_PORT_QUERY_OUT msgresponse */
+#define    MC_CMD_EVB_PORT_QUERY_OUT_LEN 8
+/* The EVB port flags as defined at MC_CMD_VPORT_ALLOC. */
+#define       MC_CMD_EVB_PORT_QUERY_OUT_PORT_FLAGS_OFST 0
+#define       MC_CMD_EVB_PORT_QUERY_OUT_PORT_FLAGS_LEN 4
+/* The number of VLAN tags that may be used on a v-adaptor connected to this
+ * EVB port.
+ */
+#define       MC_CMD_EVB_PORT_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_OFST 4
+#define       MC_CMD_EVB_PORT_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_LEN 4
+
+
+/***********************************/
+/* MC_CMD_DUMP_BUFTBL_ENTRIES
+ * Dump buffer table entries, mainly for command client debug use. Dumps
+ * absolute entries, and does not use chunk handles. All entries must be in
+ * range, and used for q page mapping, Although the latter restriction may be
+ * lifted in future.
+ */
+#define MC_CMD_DUMP_BUFTBL_ENTRIES 0xab
+#undef MC_CMD_0xab_PRIVILEGE_CTG
+
+#define MC_CMD_0xab_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_DUMP_BUFTBL_ENTRIES_IN msgrequest */
+#define    MC_CMD_DUMP_BUFTBL_ENTRIES_IN_LEN 8
+/* Index of the first buffer table entry. */
+#define       MC_CMD_DUMP_BUFTBL_ENTRIES_IN_FIRSTID_OFST 0
+#define       MC_CMD_DUMP_BUFTBL_ENTRIES_IN_FIRSTID_LEN 4
+/* Number of buffer table entries to dump. */
+#define       MC_CMD_DUMP_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 4
+#define       MC_CMD_DUMP_BUFTBL_ENTRIES_IN_NUMENTRIES_LEN 4
+
+/* MC_CMD_DUMP_BUFTBL_ENTRIES_OUT msgresponse */
+#define    MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMIN 12
+#define    MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMAX 252
+#define    MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMAX_MCDI2 1020
+#define    MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LEN(num) (0+12*(num))
+#define    MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_NUM(len) (((len)-0)/12)
+/* Raw buffer table entries, layed out as BUFTBL_ENTRY. */
+#define       MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_OFST 0
+#define       MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_LEN 12
+#define       MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MINNUM 1
+#define       MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MAXNUM 21
+#define       MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MAXNUM_MCDI2 85
+
+
+/***********************************/
+/* MC_CMD_SET_RXDP_CONFIG
+ * Set global RXDP configuration settings
+ */
+#define MC_CMD_SET_RXDP_CONFIG 0xc1
+#undef MC_CMD_0xc1_PRIVILEGE_CTG
+
+#define MC_CMD_0xc1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_RXDP_CONFIG_IN msgrequest */
+#define    MC_CMD_SET_RXDP_CONFIG_IN_LEN 4
+#define       MC_CMD_SET_RXDP_CONFIG_IN_DATA_OFST 0
+#define       MC_CMD_SET_RXDP_CONFIG_IN_DATA_LEN 4
+#define        MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_OFST 0
+#define        MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_LBN 0
+#define        MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_WIDTH 1
+#define        MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_OFST 0
+#define        MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_LBN 1
+#define        MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_WIDTH 2
+/* enum: pad to 64 bytes */
+#define          MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_64 0x0
+/* enum: pad to 128 bytes (Medford only) */
+#define          MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_128 0x1
+/* enum: pad to 256 bytes (Medford only) */
+#define          MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_256 0x2
+
+/* MC_CMD_SET_RXDP_CONFIG_OUT msgresponse */
+#define    MC_CMD_SET_RXDP_CONFIG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_RXDP_CONFIG
+ * Get global RXDP configuration settings
+ */
+#define MC_CMD_GET_RXDP_CONFIG 0xc2
+#undef MC_CMD_0xc2_PRIVILEGE_CTG
+
+#define MC_CMD_0xc2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_RXDP_CONFIG_IN msgrequest */
+#define    MC_CMD_GET_RXDP_CONFIG_IN_LEN 0
+
+/* MC_CMD_GET_RXDP_CONFIG_OUT msgresponse */
+#define    MC_CMD_GET_RXDP_CONFIG_OUT_LEN 4
+#define       MC_CMD_GET_RXDP_CONFIG_OUT_DATA_OFST 0
+#define       MC_CMD_GET_RXDP_CONFIG_OUT_DATA_LEN 4
+#define        MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_OFST 0
+#define        MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_LBN 0
+#define        MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_WIDTH 1
+#define        MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_LEN_OFST 0
+#define        MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_LEN_LBN 1
+#define        MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_LEN_WIDTH 2
+/*             Enum values, see field(s): */
+/*                MC_CMD_SET_RXDP_CONFIG/MC_CMD_SET_RXDP_CONFIG_IN/PAD_HOST_LEN */
+
+
+/***********************************/
+/* MC_CMD_GET_CLOCK
+ * Return the system and PDCPU clock frequencies.
+ */
+#define MC_CMD_GET_CLOCK 0xac
+#undef MC_CMD_0xac_PRIVILEGE_CTG
+
+#define MC_CMD_0xac_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_CLOCK_IN msgrequest */
+#define    MC_CMD_GET_CLOCK_IN_LEN 0
+
+/* MC_CMD_GET_CLOCK_OUT msgresponse */
+#define    MC_CMD_GET_CLOCK_OUT_LEN 8
+/* System frequency, MHz */
+#define       MC_CMD_GET_CLOCK_OUT_SYS_FREQ_OFST 0
+#define       MC_CMD_GET_CLOCK_OUT_SYS_FREQ_LEN 4
+/* DPCPU frequency, MHz */
+#define       MC_CMD_GET_CLOCK_OUT_DPCPU_FREQ_OFST 4
+#define       MC_CMD_GET_CLOCK_OUT_DPCPU_FREQ_LEN 4
+
+
+/***********************************/
+/* MC_CMD_SET_CLOCK
+ * Control the system and DPCPU clock frequencies. Changes are lost reboot.
+ */
+#define MC_CMD_SET_CLOCK 0xad
+#undef MC_CMD_0xad_PRIVILEGE_CTG
+
+#define MC_CMD_0xad_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_SET_CLOCK_IN msgrequest */
+#define    MC_CMD_SET_CLOCK_IN_LEN 28
+/* Requested frequency in MHz for system clock domain */
+#define       MC_CMD_SET_CLOCK_IN_SYS_FREQ_OFST 0
+#define       MC_CMD_SET_CLOCK_IN_SYS_FREQ_LEN 4
+/* enum: Leave the system clock domain frequency unchanged */
+#define          MC_CMD_SET_CLOCK_IN_SYS_DOMAIN_DONT_CHANGE 0x0
+/* Requested frequency in MHz for inter-core clock domain */
+#define       MC_CMD_SET_CLOCK_IN_ICORE_FREQ_OFST 4
+#define       MC_CMD_SET_CLOCK_IN_ICORE_FREQ_LEN 4
+/* enum: Leave the inter-core clock domain frequency unchanged */
+#define          MC_CMD_SET_CLOCK_IN_ICORE_DOMAIN_DONT_CHANGE 0x0
+/* Requested frequency in MHz for DPCPU clock domain */
+#define       MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_OFST 8
+#define       MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_LEN 4
+/* enum: Leave the DPCPU clock domain frequency unchanged */
+#define          MC_CMD_SET_CLOCK_IN_DPCPU_DOMAIN_DONT_CHANGE 0x0
+/* Requested frequency in MHz for PCS clock domain */
+#define       MC_CMD_SET_CLOCK_IN_PCS_FREQ_OFST 12
+#define       MC_CMD_SET_CLOCK_IN_PCS_FREQ_LEN 4
+/* enum: Leave the PCS clock domain frequency unchanged */
+#define          MC_CMD_SET_CLOCK_IN_PCS_DOMAIN_DONT_CHANGE 0x0
+/* Requested frequency in MHz for MC clock domain */
+#define       MC_CMD_SET_CLOCK_IN_MC_FREQ_OFST 16
+#define       MC_CMD_SET_CLOCK_IN_MC_FREQ_LEN 4
+/* enum: Leave the MC clock domain frequency unchanged */
+#define          MC_CMD_SET_CLOCK_IN_MC_DOMAIN_DONT_CHANGE 0x0
+/* Requested frequency in MHz for rmon clock domain */
+#define       MC_CMD_SET_CLOCK_IN_RMON_FREQ_OFST 20
+#define       MC_CMD_SET_CLOCK_IN_RMON_FREQ_LEN 4
+/* enum: Leave the rmon clock domain frequency unchanged */
+#define          MC_CMD_SET_CLOCK_IN_RMON_DOMAIN_DONT_CHANGE 0x0
+/* Requested frequency in MHz for vswitch clock domain */
+#define       MC_CMD_SET_CLOCK_IN_VSWITCH_FREQ_OFST 24
+#define       MC_CMD_SET_CLOCK_IN_VSWITCH_FREQ_LEN 4
+/* enum: Leave the vswitch clock domain frequency unchanged */
+#define          MC_CMD_SET_CLOCK_IN_VSWITCH_DOMAIN_DONT_CHANGE 0x0
+
+/* MC_CMD_SET_CLOCK_OUT msgresponse */
+#define    MC_CMD_SET_CLOCK_OUT_LEN 28
+/* Resulting system frequency in MHz */
+#define       MC_CMD_SET_CLOCK_OUT_SYS_FREQ_OFST 0
+#define       MC_CMD_SET_CLOCK_OUT_SYS_FREQ_LEN 4
+/* enum: The system clock domain doesn't exist */
+#define          MC_CMD_SET_CLOCK_OUT_SYS_DOMAIN_UNSUPPORTED 0x0
+/* Resulting inter-core frequency in MHz */
+#define       MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_OFST 4
+#define       MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_LEN 4
+/* enum: The inter-core clock domain doesn't exist / isn't used */
+#define          MC_CMD_SET_CLOCK_OUT_ICORE_DOMAIN_UNSUPPORTED 0x0
+/* Resulting DPCPU frequency in MHz */
+#define       MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_OFST 8
+#define       MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_LEN 4
+/* enum: The dpcpu clock domain doesn't exist */
+#define          MC_CMD_SET_CLOCK_OUT_DPCPU_DOMAIN_UNSUPPORTED 0x0
+/* Resulting PCS frequency in MHz */
+#define       MC_CMD_SET_CLOCK_OUT_PCS_FREQ_OFST 12
+#define       MC_CMD_SET_CLOCK_OUT_PCS_FREQ_LEN 4
+/* enum: The PCS clock domain doesn't exist / isn't controlled */
+#define          MC_CMD_SET_CLOCK_OUT_PCS_DOMAIN_UNSUPPORTED 0x0
+/* Resulting MC frequency in MHz */
+#define       MC_CMD_SET_CLOCK_OUT_MC_FREQ_OFST 16
+#define       MC_CMD_SET_CLOCK_OUT_MC_FREQ_LEN 4
+/* enum: The MC clock domain doesn't exist / isn't controlled */
+#define          MC_CMD_SET_CLOCK_OUT_MC_DOMAIN_UNSUPPORTED 0x0
+/* Resulting rmon frequency in MHz */
+#define       MC_CMD_SET_CLOCK_OUT_RMON_FREQ_OFST 20
+#define       MC_CMD_SET_CLOCK_OUT_RMON_FREQ_LEN 4
+/* enum: The rmon clock domain doesn't exist / isn't controlled */
+#define          MC_CMD_SET_CLOCK_OUT_RMON_DOMAIN_UNSUPPORTED 0x0
+/* Resulting vswitch frequency in MHz */
+#define       MC_CMD_SET_CLOCK_OUT_VSWITCH_FREQ_OFST 24
+#define       MC_CMD_SET_CLOCK_OUT_VSWITCH_FREQ_LEN 4
+/* enum: The vswitch clock domain doesn't exist / isn't controlled */
+#define          MC_CMD_SET_CLOCK_OUT_VSWITCH_DOMAIN_UNSUPPORTED 0x0
+
+
+/***********************************/
+/* MC_CMD_DPCPU_RPC
+ * Send an arbitrary DPCPU message.
+ */
+#define MC_CMD_DPCPU_RPC 0xae
+#undef MC_CMD_0xae_PRIVILEGE_CTG
+
+#define MC_CMD_0xae_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_DPCPU_RPC_IN msgrequest */
+#define    MC_CMD_DPCPU_RPC_IN_LEN 36
+#define       MC_CMD_DPCPU_RPC_IN_CPU_OFST 0
+#define       MC_CMD_DPCPU_RPC_IN_CPU_LEN 4
+/* enum: RxDPCPU0 */
+#define          MC_CMD_DPCPU_RPC_IN_DPCPU_RX0 0x0
+/* enum: TxDPCPU0 */
+#define          MC_CMD_DPCPU_RPC_IN_DPCPU_TX0 0x1
+/* enum: TxDPCPU1 */
+#define          MC_CMD_DPCPU_RPC_IN_DPCPU_TX1 0x2
+/* enum: RxDPCPU1 (Medford only) */
+#define          MC_CMD_DPCPU_RPC_IN_DPCPU_RX1 0x3
+/* enum: RxDPCPU (will be for the calling function; for now, just an alias of
+ * DPCPU_RX0)
+ */
+#define          MC_CMD_DPCPU_RPC_IN_DPCPU_RX 0x80
+/* enum: TxDPCPU (will be for the calling function; for now, just an alias of
+ * DPCPU_TX0)
+ */
+#define          MC_CMD_DPCPU_RPC_IN_DPCPU_TX 0x81
+/* First 8 bits [39:32] of DATA are consumed by MC-DPCPU protocol and must be
+ * initialised to zero
+ */
+#define       MC_CMD_DPCPU_RPC_IN_DATA_OFST 4
+#define       MC_CMD_DPCPU_RPC_IN_DATA_LEN 32
+#define        MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_OFST 4
+#define        MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_LBN 8
+#define        MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_WIDTH 8
+#define          MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_READ 0x6 /* enum */
+#define          MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_WRITE 0x7 /* enum */
+#define          MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_SELF_TEST 0xc /* enum */
+#define          MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_CSR_ACCESS 0xe /* enum */
+#define          MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_READ 0x46 /* enum */
+#define          MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_WRITE 0x47 /* enum */
+#define          MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SELF_TEST 0x4a /* enum */
+#define          MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_CSR_ACCESS 0x4c /* enum */
+#define          MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SET_MC_REPLAY_CNTXT 0x4d /* enum */
+#define        MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_OFST 4
+#define        MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_LBN 16
+#define        MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_WIDTH 16
+#define        MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_ADDR_OFST 4
+#define        MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_ADDR_LBN 16
+#define        MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_ADDR_WIDTH 16
+#define        MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_COUNT_OFST 4
+#define        MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_COUNT_LBN 48
+#define        MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_COUNT_WIDTH 16
+#define        MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_INFO_OFST 4
+#define        MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_INFO_LBN 16
+#define        MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_INFO_WIDTH 240
+#define        MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_OFST 4
+#define        MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_LBN 16
+#define        MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_WIDTH 16
+#define          MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_STOP_RETURN_RESULT 0x0 /* enum */
+#define          MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_READ 0x1 /* enum */
+#define          MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE 0x2 /* enum */
+#define          MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE_READ 0x3 /* enum */
+#define          MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_PIPELINED_READ 0x4 /* enum */
+#define        MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_OFST 4
+#define        MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_LBN 48
+#define        MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_WIDTH 16
+#define        MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_RPT_COUNT_OFST 4
+#define        MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_RPT_COUNT_LBN 64
+#define        MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_RPT_COUNT_WIDTH 16
+#define        MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_GAP_DELAY_OFST 4
+#define        MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_GAP_DELAY_LBN 80
+#define        MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_GAP_DELAY_WIDTH 16
+#define        MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_OFST 4
+#define        MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_LBN 16
+#define        MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_WIDTH 16
+#define          MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_CUT_THROUGH 0x1 /* enum */
+#define          MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD 0x2 /* enum */
+#define          MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD_FIRST 0x3 /* enum */
+#define        MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_OFST 4
+#define        MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_LBN 64
+#define        MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_WIDTH 16
+#define       MC_CMD_DPCPU_RPC_IN_WDATA_OFST 12
+#define       MC_CMD_DPCPU_RPC_IN_WDATA_LEN 24
+/* Register data to write. Only valid in write/write-read. */
+#define       MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_DATA_OFST 16
+#define       MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_DATA_LEN 4
+/* Register address. */
+#define       MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_ADDRESS_OFST 20
+#define       MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_ADDRESS_LEN 4
+
+/* MC_CMD_DPCPU_RPC_OUT msgresponse */
+#define    MC_CMD_DPCPU_RPC_OUT_LEN 36
+#define       MC_CMD_DPCPU_RPC_OUT_RC_OFST 0
+#define       MC_CMD_DPCPU_RPC_OUT_RC_LEN 4
+/* DATA */
+#define       MC_CMD_DPCPU_RPC_OUT_DATA_OFST 4
+#define       MC_CMD_DPCPU_RPC_OUT_DATA_LEN 32
+#define        MC_CMD_DPCPU_RPC_OUT_HDR_CMD_RESP_ERRCODE_OFST 4
+#define        MC_CMD_DPCPU_RPC_OUT_HDR_CMD_RESP_ERRCODE_LBN 32
+#define        MC_CMD_DPCPU_RPC_OUT_HDR_CMD_RESP_ERRCODE_WIDTH 16
+#define        MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_COUNT_OFST 4
+#define        MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_COUNT_LBN 48
+#define        MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_COUNT_WIDTH 16
+#define       MC_CMD_DPCPU_RPC_OUT_RDATA_OFST 12
+#define       MC_CMD_DPCPU_RPC_OUT_RDATA_LEN 24
+#define       MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_1_OFST 12
+#define       MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_1_LEN 4
+#define       MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_2_OFST 16
+#define       MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_2_LEN 4
+#define       MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_3_OFST 20
+#define       MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_3_LEN 4
+#define       MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_4_OFST 24
+#define       MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_4_LEN 4
+
+
+/***********************************/
+/* MC_CMD_TRIGGER_INTERRUPT
+ * Trigger an interrupt by prodding the BIU.
+ */
+#define MC_CMD_TRIGGER_INTERRUPT 0xe3
+#undef MC_CMD_0xe3_PRIVILEGE_CTG
+
+#define MC_CMD_0xe3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_TRIGGER_INTERRUPT_IN msgrequest */
+#define    MC_CMD_TRIGGER_INTERRUPT_IN_LEN 4
+/* Interrupt level relative to base for function. */
+#define       MC_CMD_TRIGGER_INTERRUPT_IN_INTR_LEVEL_OFST 0
+#define       MC_CMD_TRIGGER_INTERRUPT_IN_INTR_LEVEL_LEN 4
+
+/* MC_CMD_TRIGGER_INTERRUPT_OUT msgresponse */
+#define    MC_CMD_TRIGGER_INTERRUPT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SHMBOOT_OP
+ * Special operations to support (for now) shmboot.
+ */
+#define MC_CMD_SHMBOOT_OP 0xe6
+#undef MC_CMD_0xe6_PRIVILEGE_CTG
+
+#define MC_CMD_0xe6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SHMBOOT_OP_IN msgrequest */
+#define    MC_CMD_SHMBOOT_OP_IN_LEN 4
+/* Identifies the operation to perform */
+#define       MC_CMD_SHMBOOT_OP_IN_SHMBOOT_OP_OFST 0
+#define       MC_CMD_SHMBOOT_OP_IN_SHMBOOT_OP_LEN 4
+/* enum: Copy slave_data section to the slave core. (Greenport only) */
+#define          MC_CMD_SHMBOOT_OP_IN_PUSH_SLAVE_DATA 0x0
+
+/* MC_CMD_SHMBOOT_OP_OUT msgresponse */
+#define    MC_CMD_SHMBOOT_OP_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_CAP_BLK_READ
+ * Read multiple 64bit words from capture block memory
+ */
+#define MC_CMD_CAP_BLK_READ 0xe7
+#undef MC_CMD_0xe7_PRIVILEGE_CTG
+
+#define MC_CMD_0xe7_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_CAP_BLK_READ_IN msgrequest */
+#define    MC_CMD_CAP_BLK_READ_IN_LEN 12
+#define       MC_CMD_CAP_BLK_READ_IN_CAP_REG_OFST 0
+#define       MC_CMD_CAP_BLK_READ_IN_CAP_REG_LEN 4
+#define       MC_CMD_CAP_BLK_READ_IN_ADDR_OFST 4
+#define       MC_CMD_CAP_BLK_READ_IN_ADDR_LEN 4
+#define       MC_CMD_CAP_BLK_READ_IN_COUNT_OFST 8
+#define       MC_CMD_CAP_BLK_READ_IN_COUNT_LEN 4
+
+/* MC_CMD_CAP_BLK_READ_OUT msgresponse */
+#define    MC_CMD_CAP_BLK_READ_OUT_LENMIN 8
+#define    MC_CMD_CAP_BLK_READ_OUT_LENMAX 248
+#define    MC_CMD_CAP_BLK_READ_OUT_LENMAX_MCDI2 1016
+#define    MC_CMD_CAP_BLK_READ_OUT_LEN(num) (0+8*(num))
+#define    MC_CMD_CAP_BLK_READ_OUT_BUFFER_NUM(len) (((len)-0)/8)
+#define       MC_CMD_CAP_BLK_READ_OUT_BUFFER_OFST 0
+#define       MC_CMD_CAP_BLK_READ_OUT_BUFFER_LEN 8
+#define       MC_CMD_CAP_BLK_READ_OUT_BUFFER_LO_OFST 0
+#define       MC_CMD_CAP_BLK_READ_OUT_BUFFER_HI_OFST 4
+#define       MC_CMD_CAP_BLK_READ_OUT_BUFFER_MINNUM 1
+#define       MC_CMD_CAP_BLK_READ_OUT_BUFFER_MAXNUM 31
+#define       MC_CMD_CAP_BLK_READ_OUT_BUFFER_MAXNUM_MCDI2 127
+
+
+/***********************************/
+/* MC_CMD_DUMP_DO
+ * Take a dump of the DUT state
+ */
+#define MC_CMD_DUMP_DO 0xe8
+#undef MC_CMD_0xe8_PRIVILEGE_CTG
+
+#define MC_CMD_0xe8_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_DUMP_DO_IN msgrequest */
+#define    MC_CMD_DUMP_DO_IN_LEN 52
+#define       MC_CMD_DUMP_DO_IN_PADDING_OFST 0
+#define       MC_CMD_DUMP_DO_IN_PADDING_LEN 4
+#define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_OFST 4
+#define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_LEN 4
+#define          MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM 0x0 /* enum */
+#define          MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_DEFAULT 0x1 /* enum */
+#define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_TYPE_OFST 8
+#define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_TYPE_LEN 4
+#define          MC_CMD_DUMP_DO_IN_DUMP_LOCATION_NVRAM 0x1 /* enum */
+#define          MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY 0x2 /* enum */
+#define          MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY_MLI 0x3 /* enum */
+#define          MC_CMD_DUMP_DO_IN_DUMP_LOCATION_UART 0x4 /* enum */
+#define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 12
+#define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_LEN 4
+#define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_OFST 16
+#define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_LEN 4
+#define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 12
+#define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_LEN 4
+#define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 16
+#define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_LEN 4
+#define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 12
+#define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_LEN 4
+#define          MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_PAGE_SIZE 0x1000 /* enum */
+#define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 16
+#define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_LEN 4
+#define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 20
+#define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_LEN 4
+#define          MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_MAX_DEPTH 0x2 /* enum */
+#define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_OFST 12
+#define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_LEN 4
+/* enum: The uart port this command was received over (if using a uart
+ * transport)
+ */
+#define          MC_CMD_DUMP_DO_IN_UART_PORT_SRC 0xff
+#define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_SIZE_OFST 24
+#define       MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_SIZE_LEN 4
+#define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_OFST 28
+#define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_LEN 4
+#define          MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM 0x0 /* enum */
+#define          MC_CMD_DUMP_DO_IN_DUMPFILE_DST_NVRAM_DUMP_PARTITION 0x1 /* enum */
+#define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_TYPE_OFST 32
+#define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_TYPE_LEN 4
+/*            Enum values, see field(s): */
+/*               MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */
+#define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 36
+#define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_LEN 4
+#define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_OFST 40
+#define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_LEN 4
+#define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 36
+#define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_LEN 4
+#define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 40
+#define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_LEN 4
+#define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 36
+#define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_LEN 4
+#define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 40
+#define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_LEN 4
+#define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 44
+#define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_LEN 4
+#define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_UART_PORT_OFST 36
+#define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_UART_PORT_LEN 4
+#define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_SIZE_OFST 48
+#define       MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_SIZE_LEN 4
+
+/* MC_CMD_DUMP_DO_OUT msgresponse */
+#define    MC_CMD_DUMP_DO_OUT_LEN 4
+#define       MC_CMD_DUMP_DO_OUT_DUMPFILE_SIZE_OFST 0
+#define       MC_CMD_DUMP_DO_OUT_DUMPFILE_SIZE_LEN 4
+
+
+/***********************************/
+/* MC_CMD_DUMP_CONFIGURE_UNSOLICITED
+ * Configure unsolicited dumps
+ */
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED 0xe9
+#undef MC_CMD_0xe9_PRIVILEGE_CTG
+
+#define MC_CMD_0xe9_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN msgrequest */
+#define    MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_LEN 52
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_ENABLE_OFST 0
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_ENABLE_LEN 4
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_OFST 4
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_LEN 4
+/*            Enum values, see field(s): */
+/*               MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC */
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_TYPE_OFST 8
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_TYPE_LEN 4
+/*            Enum values, see field(s): */
+/*               MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 12
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_LEN 4
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_OFST 16
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_LEN 4
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 12
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_LEN 4
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 16
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_LEN 4
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 12
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_LEN 4
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 16
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_LEN 4
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 20
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_LEN 4
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_OFST 12
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_LEN 4
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_SIZE_OFST 24
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_SIZE_LEN 4
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_OFST 28
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_LEN 4
+/*            Enum values, see field(s): */
+/*               MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPFILE_DST */
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_TYPE_OFST 32
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_TYPE_LEN 4
+/*            Enum values, see field(s): */
+/*               MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 36
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_LEN 4
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_OFST 40
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_LEN 4
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 36
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_LEN 4
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 40
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_LEN 4
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 36
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_LEN 4
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 40
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_LEN 4
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 44
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_LEN 4
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_UART_PORT_OFST 36
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_UART_PORT_LEN 4
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_SIZE_OFST 48
+#define       MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_SIZE_LEN 4
+
+
+/***********************************/
+/* MC_CMD_SET_PSU
+ * Adjusts power supply parameters. This is a warranty-voiding operation.
+ * Returns: ENOENT if the parameter or rail specified does not exist, EINVAL if
+ * the parameter is out of range.
+ */
+#define MC_CMD_SET_PSU 0xea
+#undef MC_CMD_0xea_PRIVILEGE_CTG
+
+#define MC_CMD_0xea_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_SET_PSU_IN msgrequest */
+#define    MC_CMD_SET_PSU_IN_LEN 12
+#define       MC_CMD_SET_PSU_IN_PARAM_OFST 0
+#define       MC_CMD_SET_PSU_IN_PARAM_LEN 4
+#define          MC_CMD_SET_PSU_IN_PARAM_SUPPLY_VOLTAGE 0x0 /* enum */
+#define       MC_CMD_SET_PSU_IN_RAIL_OFST 4
+#define       MC_CMD_SET_PSU_IN_RAIL_LEN 4
+#define          MC_CMD_SET_PSU_IN_RAIL_0V9 0x0 /* enum */
+#define          MC_CMD_SET_PSU_IN_RAIL_1V2 0x1 /* enum */
+/* desired value, eg voltage in mV */
+#define       MC_CMD_SET_PSU_IN_VALUE_OFST 8
+#define       MC_CMD_SET_PSU_IN_VALUE_LEN 4
+
+/* MC_CMD_SET_PSU_OUT msgresponse */
+#define    MC_CMD_SET_PSU_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_FUNCTION_INFO
+ * Get function information. PF and VF number.
+ */
+#define MC_CMD_GET_FUNCTION_INFO 0xec
+#undef MC_CMD_0xec_PRIVILEGE_CTG
+
+#define MC_CMD_0xec_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_FUNCTION_INFO_IN msgrequest */
+#define    MC_CMD_GET_FUNCTION_INFO_IN_LEN 0
+
+/* MC_CMD_GET_FUNCTION_INFO_OUT msgresponse */
+#define    MC_CMD_GET_FUNCTION_INFO_OUT_LEN 8
+#define       MC_CMD_GET_FUNCTION_INFO_OUT_PF_OFST 0
+#define       MC_CMD_GET_FUNCTION_INFO_OUT_PF_LEN 4
+#define       MC_CMD_GET_FUNCTION_INFO_OUT_VF_OFST 4
+#define       MC_CMD_GET_FUNCTION_INFO_OUT_VF_LEN 4
+
+
+/***********************************/
+/* MC_CMD_ENABLE_OFFLINE_BIST
+ * Enters offline BIST mode. All queues are torn down, chip enters quiescent
+ * mode, calling function gets exclusive MCDI ownership. The only way out is
+ * reboot.
+ */
+#define MC_CMD_ENABLE_OFFLINE_BIST 0xed
+#undef MC_CMD_0xed_PRIVILEGE_CTG
+
+#define MC_CMD_0xed_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_ENABLE_OFFLINE_BIST_IN msgrequest */
+#define    MC_CMD_ENABLE_OFFLINE_BIST_IN_LEN 0
+
+/* MC_CMD_ENABLE_OFFLINE_BIST_OUT msgresponse */
+#define    MC_CMD_ENABLE_OFFLINE_BIST_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_UART_SEND_DATA
+ * Send checksummed[sic] block of data over the uart. Response is a placeholder
+ * should we wish to make this reliable; currently requests are fire-and-
+ * forget.
+ */
+#define MC_CMD_UART_SEND_DATA 0xee
+#undef MC_CMD_0xee_PRIVILEGE_CTG
+
+#define MC_CMD_0xee_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_UART_SEND_DATA_OUT msgrequest */
+#define    MC_CMD_UART_SEND_DATA_OUT_LENMIN 16
+#define    MC_CMD_UART_SEND_DATA_OUT_LENMAX 252
+#define    MC_CMD_UART_SEND_DATA_OUT_LENMAX_MCDI2 1020
+#define    MC_CMD_UART_SEND_DATA_OUT_LEN(num) (16+1*(num))
+#define    MC_CMD_UART_SEND_DATA_OUT_DATA_NUM(len) (((len)-16)/1)
+/* CRC32 over OFFSET, LENGTH, RESERVED, DATA */
+#define       MC_CMD_UART_SEND_DATA_OUT_CHECKSUM_OFST 0
+#define       MC_CMD_UART_SEND_DATA_OUT_CHECKSUM_LEN 4
+/* Offset at which to write the data */
+#define       MC_CMD_UART_SEND_DATA_OUT_OFFSET_OFST 4
+#define       MC_CMD_UART_SEND_DATA_OUT_OFFSET_LEN 4
+/* Length of data */
+#define       MC_CMD_UART_SEND_DATA_OUT_LENGTH_OFST 8
+#define       MC_CMD_UART_SEND_DATA_OUT_LENGTH_LEN 4
+/* Reserved for future use */
+#define       MC_CMD_UART_SEND_DATA_OUT_RESERVED_OFST 12
+#define       MC_CMD_UART_SEND_DATA_OUT_RESERVED_LEN 4
+#define       MC_CMD_UART_SEND_DATA_OUT_DATA_OFST 16
+#define       MC_CMD_UART_SEND_DATA_OUT_DATA_LEN 1
+#define       MC_CMD_UART_SEND_DATA_OUT_DATA_MINNUM 0
+#define       MC_CMD_UART_SEND_DATA_OUT_DATA_MAXNUM 236
+#define       MC_CMD_UART_SEND_DATA_OUT_DATA_MAXNUM_MCDI2 1004
+
+/* MC_CMD_UART_SEND_DATA_IN msgresponse */
+#define    MC_CMD_UART_SEND_DATA_IN_LEN 0
+
+
+/***********************************/
+/* MC_CMD_UART_RECV_DATA
+ * Request checksummed[sic] block of data over the uart. Only a placeholder,
+ * subject to change and not currently implemented.
+ */
+#define MC_CMD_UART_RECV_DATA 0xef
+#undef MC_CMD_0xef_PRIVILEGE_CTG
+
+#define MC_CMD_0xef_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_UART_RECV_DATA_OUT msgrequest */
+#define    MC_CMD_UART_RECV_DATA_OUT_LEN 16
+/* CRC32 over OFFSET, LENGTH, RESERVED */
+#define       MC_CMD_UART_RECV_DATA_OUT_CHECKSUM_OFST 0
+#define       MC_CMD_UART_RECV_DATA_OUT_CHECKSUM_LEN 4
+/* Offset from which to read the data */
+#define       MC_CMD_UART_RECV_DATA_OUT_OFFSET_OFST 4
+#define       MC_CMD_UART_RECV_DATA_OUT_OFFSET_LEN 4
+/* Length of data */
+#define       MC_CMD_UART_RECV_DATA_OUT_LENGTH_OFST 8
+#define       MC_CMD_UART_RECV_DATA_OUT_LENGTH_LEN 4
+/* Reserved for future use */
+#define       MC_CMD_UART_RECV_DATA_OUT_RESERVED_OFST 12
+#define       MC_CMD_UART_RECV_DATA_OUT_RESERVED_LEN 4
+
+/* MC_CMD_UART_RECV_DATA_IN msgresponse */
+#define    MC_CMD_UART_RECV_DATA_IN_LENMIN 16
+#define    MC_CMD_UART_RECV_DATA_IN_LENMAX 252
+#define    MC_CMD_UART_RECV_DATA_IN_LENMAX_MCDI2 1020
+#define    MC_CMD_UART_RECV_DATA_IN_LEN(num) (16+1*(num))
+#define    MC_CMD_UART_RECV_DATA_IN_DATA_NUM(len) (((len)-16)/1)
+/* CRC32 over RESERVED1, RESERVED2, RESERVED3, DATA */
+#define       MC_CMD_UART_RECV_DATA_IN_CHECKSUM_OFST 0
+#define       MC_CMD_UART_RECV_DATA_IN_CHECKSUM_LEN 4
+/* Offset at which to write the data */
+#define       MC_CMD_UART_RECV_DATA_IN_RESERVED1_OFST 4
+#define       MC_CMD_UART_RECV_DATA_IN_RESERVED1_LEN 4
+/* Length of data */
+#define       MC_CMD_UART_RECV_DATA_IN_RESERVED2_OFST 8
+#define       MC_CMD_UART_RECV_DATA_IN_RESERVED2_LEN 4
+/* Reserved for future use */
+#define       MC_CMD_UART_RECV_DATA_IN_RESERVED3_OFST 12
+#define       MC_CMD_UART_RECV_DATA_IN_RESERVED3_LEN 4
+#define       MC_CMD_UART_RECV_DATA_IN_DATA_OFST 16
+#define       MC_CMD_UART_RECV_DATA_IN_DATA_LEN 1
+#define       MC_CMD_UART_RECV_DATA_IN_DATA_MINNUM 0
+#define       MC_CMD_UART_RECV_DATA_IN_DATA_MAXNUM 236
+#define       MC_CMD_UART_RECV_DATA_IN_DATA_MAXNUM_MCDI2 1004
+
+
+/***********************************/
+/* MC_CMD_READ_FUSES
+ * Read data programmed into the device One-Time-Programmable (OTP) Fuses
+ */
+#define MC_CMD_READ_FUSES 0xf0
+#undef MC_CMD_0xf0_PRIVILEGE_CTG
+
+#define MC_CMD_0xf0_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_READ_FUSES_IN msgrequest */
+#define    MC_CMD_READ_FUSES_IN_LEN 8
+/* Offset in OTP to read */
+#define       MC_CMD_READ_FUSES_IN_OFFSET_OFST 0
+#define       MC_CMD_READ_FUSES_IN_OFFSET_LEN 4
+/* Length of data to read in bytes */
+#define       MC_CMD_READ_FUSES_IN_LENGTH_OFST 4
+#define       MC_CMD_READ_FUSES_IN_LENGTH_LEN 4
+
+/* MC_CMD_READ_FUSES_OUT msgresponse */
+#define    MC_CMD_READ_FUSES_OUT_LENMIN 4
+#define    MC_CMD_READ_FUSES_OUT_LENMAX 252
+#define    MC_CMD_READ_FUSES_OUT_LENMAX_MCDI2 1020
+#define    MC_CMD_READ_FUSES_OUT_LEN(num) (4+1*(num))
+#define    MC_CMD_READ_FUSES_OUT_DATA_NUM(len) (((len)-4)/1)
+/* Length of returned OTP data in bytes */
+#define       MC_CMD_READ_FUSES_OUT_LENGTH_OFST 0
+#define       MC_CMD_READ_FUSES_OUT_LENGTH_LEN 4
+/* Returned data */
+#define       MC_CMD_READ_FUSES_OUT_DATA_OFST 4
+#define       MC_CMD_READ_FUSES_OUT_DATA_LEN 1
+#define       MC_CMD_READ_FUSES_OUT_DATA_MINNUM 0
+#define       MC_CMD_READ_FUSES_OUT_DATA_MAXNUM 248
+#define       MC_CMD_READ_FUSES_OUT_DATA_MAXNUM_MCDI2 1016
+
+
+/***********************************/
+/* MC_CMD_KR_TUNE
+ * Get or set KR Serdes RXEQ and TX Driver settings
+ */
+#define MC_CMD_KR_TUNE 0xf1
+#undef MC_CMD_0xf1_PRIVILEGE_CTG
+
+#define MC_CMD_0xf1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_KR_TUNE_IN msgrequest */
+#define    MC_CMD_KR_TUNE_IN_LENMIN 4
+#define    MC_CMD_KR_TUNE_IN_LENMAX 252
+#define    MC_CMD_KR_TUNE_IN_LENMAX_MCDI2 1020
+#define    MC_CMD_KR_TUNE_IN_LEN(num) (4+4*(num))
+#define    MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_NUM(len) (((len)-4)/4)
+/* Requested operation */
+#define       MC_CMD_KR_TUNE_IN_KR_TUNE_OP_OFST 0
+#define       MC_CMD_KR_TUNE_IN_KR_TUNE_OP_LEN 1
+/* enum: Get current RXEQ settings */
+#define          MC_CMD_KR_TUNE_IN_RXEQ_GET 0x0
+/* enum: Override RXEQ settings */
+#define          MC_CMD_KR_TUNE_IN_RXEQ_SET 0x1
+/* enum: Get current TX Driver settings */
+#define          MC_CMD_KR_TUNE_IN_TXEQ_GET 0x2
+/* enum: Override TX Driver settings */
+#define          MC_CMD_KR_TUNE_IN_TXEQ_SET 0x3
+/* enum: Force KR Serdes reset / recalibration */
+#define          MC_CMD_KR_TUNE_IN_RECAL 0x4
+/* enum: Start KR Serdes Eye diagram plot on a given lane. Lane must have valid
+ * signal.
+ */
+#define          MC_CMD_KR_TUNE_IN_START_EYE_PLOT 0x5
+/* enum: Poll KR Serdes Eye diagram plot. Returns one row of BER data. The
+ * caller should call this command repeatedly after starting eye plot, until no
+ * more data is returned.
+ */
+#define          MC_CMD_KR_TUNE_IN_POLL_EYE_PLOT 0x6
+/* enum: Read Figure Of Merit (eye quality, higher is better). */
+#define          MC_CMD_KR_TUNE_IN_READ_FOM 0x7
+/* enum: Start/stop link training frames */
+#define          MC_CMD_KR_TUNE_IN_LINK_TRAIN_RUN 0x8
+/* enum: Issue KR link training command (control training coefficients) */
+#define          MC_CMD_KR_TUNE_IN_LINK_TRAIN_CMD 0x9
+/* Align the arguments to 32 bits */
+#define       MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_OFST 1
+#define       MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_LEN 3
+/* Arguments specific to the operation */
+#define       MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_OFST 4
+#define       MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_LEN 4
+#define       MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_MINNUM 0
+#define       MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_MAXNUM 62
+#define       MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_MAXNUM_MCDI2 254
+
+/* MC_CMD_KR_TUNE_OUT msgresponse */
+#define    MC_CMD_KR_TUNE_OUT_LEN 0
+
+/* MC_CMD_KR_TUNE_RXEQ_GET_IN msgrequest */
+#define    MC_CMD_KR_TUNE_RXEQ_GET_IN_LEN 4
+/* Requested operation */
+#define       MC_CMD_KR_TUNE_RXEQ_GET_IN_KR_TUNE_OP_OFST 0
+#define       MC_CMD_KR_TUNE_RXEQ_GET_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define       MC_CMD_KR_TUNE_RXEQ_GET_IN_KR_TUNE_RSVD_OFST 1
+#define       MC_CMD_KR_TUNE_RXEQ_GET_IN_KR_TUNE_RSVD_LEN 3
+
+/* MC_CMD_KR_TUNE_RXEQ_GET_OUT msgresponse */
+#define    MC_CMD_KR_TUNE_RXEQ_GET_OUT_LENMIN 4
+#define    MC_CMD_KR_TUNE_RXEQ_GET_OUT_LENMAX 252
+#define    MC_CMD_KR_TUNE_RXEQ_GET_OUT_LENMAX_MCDI2 1020
+#define    MC_CMD_KR_TUNE_RXEQ_GET_OUT_LEN(num) (0+4*(num))
+#define    MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_NUM(len) (((len)-0)/4)
+/* RXEQ Parameter */
+#define       MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_OFST 0
+#define       MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LEN 4
+#define       MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_MINNUM 1
+#define       MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM 63
+#define       MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM_MCDI2 255
+#define        MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_OFST 0
+#define        MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0
+#define        MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8
+/* enum: Attenuation (0-15, Huntington) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_ATT 0x0
+/* enum: CTLE Boost (0-15, Huntington) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_BOOST 0x1
+/* enum: Edge DFE Tap1 (Huntington - 0 - max negative, 64 - zero, 127 - max
+ * positive, Medford - 0-31)
+ */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP1 0x2
+/* enum: Edge DFE Tap2 (Huntington - 0 - max negative, 32 - zero, 63 - max
+ * positive, Medford - 0-31)
+ */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP2 0x3
+/* enum: Edge DFE Tap3 (Huntington - 0 - max negative, 32 - zero, 63 - max
+ * positive, Medford - 0-16)
+ */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP3 0x4
+/* enum: Edge DFE Tap4 (Huntington - 0 - max negative, 32 - zero, 63 - max
+ * positive, Medford - 0-16)
+ */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP4 0x5
+/* enum: Edge DFE Tap5 (Huntington - 0 - max negative, 32 - zero, 63 - max
+ * positive, Medford - 0-16)
+ */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP5 0x6
+/* enum: Edge DFE DLEV (0-128 for Medford) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_DLEV 0x7
+/* enum: Variable Gain Amplifier (0-15, Medford) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_VGA 0x8
+/* enum: CTLE EQ Capacitor (0-15, Medford) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_EQC 0x9
+/* enum: CTLE EQ Resistor (0-7, Medford) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_EQRES 0xa
+/* enum: CTLE gain (0-31, Medford2) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_GAIN 0xb
+/* enum: CTLE pole (0-31, Medford2) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_POLE 0xc
+/* enum: CTLE peaking (0-31, Medford2) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_PEAK 0xd
+/* enum: DFE Tap1 - even path (Medford2 - 6 bit signed (-29 - +29)) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP1_EVEN 0xe
+/* enum: DFE Tap1 - odd path (Medford2 - 6 bit signed (-29 - +29)) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP1_ODD 0xf
+/* enum: DFE Tap2 (Medford2 - 6 bit signed (-20 - +20)) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP2 0x10
+/* enum: DFE Tap3 (Medford2 - 6 bit signed (-20 - +20)) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP3 0x11
+/* enum: DFE Tap4 (Medford2 - 6 bit signed (-20 - +20)) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP4 0x12
+/* enum: DFE Tap5 (Medford2 - 6 bit signed (-24 - +24)) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP5 0x13
+/* enum: DFE Tap6 (Medford2 - 6 bit signed (-24 - +24)) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP6 0x14
+/* enum: DFE Tap7 (Medford2 - 6 bit signed (-24 - +24)) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP7 0x15
+/* enum: DFE Tap8 (Medford2 - 6 bit signed (-24 - +24)) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP8 0x16
+/* enum: DFE Tap9 (Medford2 - 6 bit signed (-24 - +24)) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP9 0x17
+/* enum: DFE Tap10 (Medford2 - 6 bit signed (-24 - +24)) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP10 0x18
+/* enum: DFE Tap11 (Medford2 - 6 bit signed (-24 - +24)) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP11 0x19
+/* enum: DFE Tap12 (Medford2 - 6 bit signed (-24 - +24)) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP12 0x1a
+/* enum: I/Q clk offset (Medford2 - 4 bit signed (-5 - +5))) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_IQ_OFF 0x1b
+/* enum: Negative h1 polarity data sampler offset calibration code, even path
+ * (Medford2 - 6 bit signed (-29 - +29)))
+ */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1N_OFF_EVEN 0x1c
+/* enum: Negative h1 polarity data sampler offset calibration code, odd path
+ * (Medford2 - 6 bit signed (-29 - +29)))
+ */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1N_OFF_ODD 0x1d
+/* enum: Positive h1 polarity data sampler offset calibration code, even path
+ * (Medford2 - 6 bit signed (-29 - +29)))
+ */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1P_OFF_EVEN 0x1e
+/* enum: Positive h1 polarity data sampler offset calibration code, odd path
+ * (Medford2 - 6 bit signed (-29 - +29)))
+ */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_H1P_OFF_ODD 0x1f
+/* enum: CDR calibration loop code (Medford2) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_CDR_PVT 0x20
+/* enum: CDR integral loop code (Medford2) */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_CDR_INTEG 0x21
+/* enum: CTLE Boost stages - retimer lineside (Medford2 with DS250x retimer - 4
+ * stages, 2 bits per stage)
+ */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_BOOST_RT_LS 0x22
+/* enum: DFE Tap1 - retimer lineside (Medford2 with DS250x retimer (-31 - 31))
+ */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP1_RT_LS 0x23
+/* enum: DFE Tap2 - retimer lineside (Medford2 with DS250x retimer (-15 - 15))
+ */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP2_RT_LS 0x24
+/* enum: DFE Tap3 - retimer lineside (Medford2 with DS250x retimer (-15 - 15))
+ */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP3_RT_LS 0x25
+/* enum: DFE Tap4 - retimer lineside (Medford2 with DS250x retimer (-15 - 15))
+ */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP4_RT_LS 0x26
+/* enum: DFE Tap5 - retimer lineside (Medford2 with DS250x retimer (-15 - 15))
+ */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP5_RT_LS 0x27
+/* enum: CTLE Boost stages - retimer hostside (Medford2 with DS250x retimer - 4
+ * stages, 2 bits per stage)
+ */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_BOOST_RT_HS 0x28
+/* enum: DFE Tap1 - retimer hostside (Medford2 with DS250x retimer (-31 - 31))
+ */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP1_RT_HS 0x29
+/* enum: DFE Tap2 - retimer hostside (Medford2 with DS250x retimer (-15 - 15))
+ */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP2_RT_HS 0x2a
+/* enum: DFE Tap3 - retimer hostside (Medford2 with DS250x retimer (-15 - 15))
+ */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP3_RT_HS 0x2b
+/* enum: DFE Tap4 - retimer hostside (Medford2 with DS250x retimer (-15 - 15))
+ */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP4_RT_HS 0x2c
+/* enum: DFE Tap5 - retimer hostside (Medford2 with DS250x retimer (-15 - 15))
+ */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_DFE_TAP5_RT_HS 0x2d
+#define        MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_OFST 0
+#define        MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8
+#define        MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 3
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_1 0x1 /* enum */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_2 0x2 /* enum */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_3 0x3 /* enum */
+#define          MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_ALL 0x4 /* enum */
+#define        MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_OFST 0
+#define        MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_LBN 11
+#define        MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_WIDTH 1
+#define        MC_CMD_KR_TUNE_RXEQ_GET_OUT_RESERVED_OFST 0
+#define        MC_CMD_KR_TUNE_RXEQ_GET_OUT_RESERVED_LBN 12
+#define        MC_CMD_KR_TUNE_RXEQ_GET_OUT_RESERVED_WIDTH 4
+#define        MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_INITIAL_OFST 0
+#define        MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_INITIAL_LBN 16
+#define        MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_INITIAL_WIDTH 8
+#define        MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_OFST 0
+#define        MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_LBN 24
+#define        MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8
+
+/* MC_CMD_KR_TUNE_RXEQ_SET_IN msgrequest */
+#define    MC_CMD_KR_TUNE_RXEQ_SET_IN_LENMIN 8
+#define    MC_CMD_KR_TUNE_RXEQ_SET_IN_LENMAX 252
+#define    MC_CMD_KR_TUNE_RXEQ_SET_IN_LENMAX_MCDI2 1020
+#define    MC_CMD_KR_TUNE_RXEQ_SET_IN_LEN(num) (4+4*(num))
+#define    MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_NUM(len) (((len)-4)/4)
+/* Requested operation */
+#define       MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_OP_OFST 0
+#define       MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define       MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_RSVD_OFST 1
+#define       MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_RSVD_LEN 3
+/* RXEQ Parameter */
+#define       MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_OFST 4
+#define       MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_LEN 4
+#define       MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_MINNUM 1
+#define       MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_MAXNUM 62
+#define       MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_MAXNUM_MCDI2 254
+#define        MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_ID_OFST 4
+#define        MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_ID_LBN 0
+#define        MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_ID_WIDTH 8
+/*             Enum values, see field(s): */
+/*                MC_CMD_KR_TUNE_RXEQ_GET_OUT/PARAM_ID */
+#define        MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_LANE_OFST 4
+#define        MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_LANE_LBN 8
+#define        MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_LANE_WIDTH 3
+/*             Enum values, see field(s): */
+/*                MC_CMD_KR_TUNE_RXEQ_GET_OUT/PARAM_LANE */
+#define        MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_OFST 4
+#define        MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_LBN 11
+#define        MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_WIDTH 1
+#define        MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED_OFST 4
+#define        MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED_LBN 12
+#define        MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED_WIDTH 4
+#define        MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_INITIAL_OFST 4
+#define        MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_INITIAL_LBN 16
+#define        MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_INITIAL_WIDTH 8
+#define        MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED2_OFST 4
+#define        MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED2_LBN 24
+#define        MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED2_WIDTH 8
+
+/* MC_CMD_KR_TUNE_RXEQ_SET_OUT msgresponse */
+#define    MC_CMD_KR_TUNE_RXEQ_SET_OUT_LEN 0
+
+/* MC_CMD_KR_TUNE_TXEQ_GET_IN msgrequest */
+#define    MC_CMD_KR_TUNE_TXEQ_GET_IN_LEN 4
+/* Requested operation */
+#define       MC_CMD_KR_TUNE_TXEQ_GET_IN_KR_TUNE_OP_OFST 0
+#define       MC_CMD_KR_TUNE_TXEQ_GET_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define       MC_CMD_KR_TUNE_TXEQ_GET_IN_KR_TUNE_RSVD_OFST 1
+#define       MC_CMD_KR_TUNE_TXEQ_GET_IN_KR_TUNE_RSVD_LEN 3
+
+/* MC_CMD_KR_TUNE_TXEQ_GET_OUT msgresponse */
+#define    MC_CMD_KR_TUNE_TXEQ_GET_OUT_LENMIN 4
+#define    MC_CMD_KR_TUNE_TXEQ_GET_OUT_LENMAX 252
+#define    MC_CMD_KR_TUNE_TXEQ_GET_OUT_LENMAX_MCDI2 1020
+#define    MC_CMD_KR_TUNE_TXEQ_GET_OUT_LEN(num) (0+4*(num))
+#define    MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_NUM(len) (((len)-0)/4)
+/* TXEQ Parameter */
+#define       MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_OFST 0
+#define       MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LEN 4
+#define       MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_MINNUM 1
+#define       MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM 63
+#define       MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM_MCDI2 255
+#define        MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_OFST 0
+#define        MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_LBN 0
+#define        MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_WIDTH 8
+/* enum: TX Amplitude (Huntington, Medford, Medford2) */
+#define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV 0x0
+/* enum: De-Emphasis Tap1 Magnitude (0-7) (Huntington) */
+#define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_MODE 0x1
+/* enum: De-Emphasis Tap1 Fine */
+#define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_DTLEV 0x2
+/* enum: De-Emphasis Tap2 Magnitude (0-6) (Huntington) */
+#define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_D2 0x3
+/* enum: De-Emphasis Tap2 Fine (Huntington) */
+#define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_D2TLEV 0x4
+/* enum: Pre-Emphasis Magnitude (Huntington) */
+#define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_E 0x5
+/* enum: Pre-Emphasis Fine (Huntington) */
+#define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_ETLEV 0x6
+/* enum: TX Slew Rate Coarse control (Huntington) */
+#define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_PREDRV_DLY 0x7
+/* enum: TX Slew Rate Fine control (Huntington) */
+#define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_SR_SET 0x8
+/* enum: TX Termination Impedance control (Huntington) */
+#define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_RT_SET 0x9
+/* enum: TX Amplitude Fine control (Medford) */
+#define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV_FINE 0xa
+/* enum: Pre-cursor Tap (Medford, Medford2) */
+#define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_ADV 0xb
+/* enum: Post-cursor Tap (Medford, Medford2) */
+#define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_DLY 0xc
+/* enum: TX Amplitude (Retimer Lineside) */
+#define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV_RT_LS 0xd
+/* enum: Pre-cursor Tap (Retimer Lineside) */
+#define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_ADV_RT_LS 0xe
+/* enum: Post-cursor Tap (Retimer Lineside) */
+#define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_DLY_RT_LS 0xf
+/* enum: TX Amplitude (Retimer Hostside) */
+#define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV_RT_HS 0x10
+/* enum: Pre-cursor Tap (Retimer Hostside) */
+#define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_ADV_RT_HS 0x11
+/* enum: Post-cursor Tap (Retimer Hostside) */
+#define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_DLY_RT_HS 0x12
+#define        MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_OFST 0
+#define        MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8
+#define        MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 3
+#define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_0 0x0 /* enum */
+#define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_1 0x1 /* enum */
+#define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_2 0x2 /* enum */
+#define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_3 0x3 /* enum */
+#define          MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_ALL 0x4 /* enum */
+#define        MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED_OFST 0
+#define        MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED_LBN 11
+#define        MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED_WIDTH 5
+#define        MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_INITIAL_OFST 0
+#define        MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_INITIAL_LBN 16
+#define        MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_INITIAL_WIDTH 8
+#define        MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED2_OFST 0
+#define        MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED2_LBN 24
+#define        MC_CMD_KR_TUNE_TXEQ_GET_OUT_RESERVED2_WIDTH 8
+
+/* MC_CMD_KR_TUNE_TXEQ_SET_IN msgrequest */
+#define    MC_CMD_KR_TUNE_TXEQ_SET_IN_LENMIN 8
+#define    MC_CMD_KR_TUNE_TXEQ_SET_IN_LENMAX 252
+#define    MC_CMD_KR_TUNE_TXEQ_SET_IN_LENMAX_MCDI2 1020
+#define    MC_CMD_KR_TUNE_TXEQ_SET_IN_LEN(num) (4+4*(num))
+#define    MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_NUM(len) (((len)-4)/4)
+/* Requested operation */
+#define       MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_OP_OFST 0
+#define       MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define       MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_RSVD_OFST 1
+#define       MC_CMD_KR_TUNE_TXEQ_SET_IN_KR_TUNE_RSVD_LEN 3
+/* TXEQ Parameter */
+#define       MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_OFST 4
+#define       MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_LEN 4
+#define       MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_MINNUM 1
+#define       MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_MAXNUM 62
+#define       MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_MAXNUM_MCDI2 254
+#define        MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_ID_OFST 4
+#define        MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_ID_LBN 0
+#define        MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_ID_WIDTH 8
+/*             Enum values, see field(s): */
+/*                MC_CMD_KR_TUNE_TXEQ_GET_OUT/PARAM_ID */
+#define        MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_LANE_OFST 4
+#define        MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_LANE_LBN 8
+#define        MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_LANE_WIDTH 3
+/*             Enum values, see field(s): */
+/*                MC_CMD_KR_TUNE_TXEQ_GET_OUT/PARAM_LANE */
+#define        MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED_OFST 4
+#define        MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED_LBN 11
+#define        MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED_WIDTH 5
+#define        MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_INITIAL_OFST 4
+#define        MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_INITIAL_LBN 16
+#define        MC_CMD_KR_TUNE_TXEQ_SET_IN_PARAM_INITIAL_WIDTH 8
+#define        MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED2_OFST 4
+#define        MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED2_LBN 24
+#define        MC_CMD_KR_TUNE_TXEQ_SET_IN_RESERVED2_WIDTH 8
+
+/* MC_CMD_KR_TUNE_TXEQ_SET_OUT msgresponse */
+#define    MC_CMD_KR_TUNE_TXEQ_SET_OUT_LEN 0
+
+/* MC_CMD_KR_TUNE_RECAL_IN msgrequest */
+#define    MC_CMD_KR_TUNE_RECAL_IN_LEN 4
+/* Requested operation */
+#define       MC_CMD_KR_TUNE_RECAL_IN_KR_TUNE_OP_OFST 0
+#define       MC_CMD_KR_TUNE_RECAL_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define       MC_CMD_KR_TUNE_RECAL_IN_KR_TUNE_RSVD_OFST 1
+#define       MC_CMD_KR_TUNE_RECAL_IN_KR_TUNE_RSVD_LEN 3
+
+/* MC_CMD_KR_TUNE_RECAL_OUT msgresponse */
+#define    MC_CMD_KR_TUNE_RECAL_OUT_LEN 0
+
+/* MC_CMD_KR_TUNE_START_EYE_PLOT_IN msgrequest */
+#define    MC_CMD_KR_TUNE_START_EYE_PLOT_IN_LEN 8
+/* Requested operation */
+#define       MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_OP_OFST 0
+#define       MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define       MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_RSVD_OFST 1
+#define       MC_CMD_KR_TUNE_START_EYE_PLOT_IN_KR_TUNE_RSVD_LEN 3
+/* Port-relative lane to scan eye on */
+#define       MC_CMD_KR_TUNE_START_EYE_PLOT_IN_LANE_OFST 4
+#define       MC_CMD_KR_TUNE_START_EYE_PLOT_IN_LANE_LEN 4
+
+/* MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN msgrequest */
+#define    MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LEN 12
+/* Requested operation */
+#define       MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_KR_TUNE_OP_OFST 0
+#define       MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define       MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_KR_TUNE_RSVD_OFST 1
+#define       MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_KR_TUNE_RSVD_LEN 3
+#define       MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_OFST 4
+#define       MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_LEN 4
+#define        MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_NUM_OFST 4
+#define        MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_NUM_LBN 0
+#define        MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_NUM_WIDTH 8
+#define        MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_ABS_REL_OFST 4
+#define        MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_ABS_REL_LBN 31
+#define        MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_LANE_ABS_REL_WIDTH 1
+/* Scan duration / cycle count */
+#define       MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_BER_OFST 8
+#define       MC_CMD_KR_TUNE_START_EYE_PLOT_V2_IN_BER_LEN 4
+
+/* MC_CMD_KR_TUNE_START_EYE_PLOT_OUT msgresponse */
+#define    MC_CMD_KR_TUNE_START_EYE_PLOT_OUT_LEN 0
+
+/* MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN msgrequest */
+#define    MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_LEN 4
+/* Requested operation */
+#define       MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_KR_TUNE_OP_OFST 0
+#define       MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define       MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_KR_TUNE_RSVD_OFST 1
+#define       MC_CMD_KR_TUNE_POLL_EYE_PLOT_IN_KR_TUNE_RSVD_LEN 3
+
+/* MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT msgresponse */
+#define    MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_LENMIN 0
+#define    MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_LENMAX 252
+#define    MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_LENMAX_MCDI2 1020
+#define    MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_LEN(num) (0+2*(num))
+#define    MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_NUM(len) (((len)-0)/2)
+#define       MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_OFST 0
+#define       MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_LEN 2
+#define       MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MINNUM 0
+#define       MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM 126
+#define       MC_CMD_KR_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM_MCDI2 510
+
+/* MC_CMD_KR_TUNE_READ_FOM_IN msgrequest */
+#define    MC_CMD_KR_TUNE_READ_FOM_IN_LEN 8
+/* Requested operation */
+#define       MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_OP_OFST 0
+#define       MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define       MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_RSVD_OFST 1
+#define       MC_CMD_KR_TUNE_READ_FOM_IN_KR_TUNE_RSVD_LEN 3
+#define       MC_CMD_KR_TUNE_READ_FOM_IN_LANE_OFST 4
+#define       MC_CMD_KR_TUNE_READ_FOM_IN_LANE_LEN 4
+#define        MC_CMD_KR_TUNE_READ_FOM_IN_LANE_NUM_OFST 4
+#define        MC_CMD_KR_TUNE_READ_FOM_IN_LANE_NUM_LBN 0
+#define        MC_CMD_KR_TUNE_READ_FOM_IN_LANE_NUM_WIDTH 8
+#define        MC_CMD_KR_TUNE_READ_FOM_IN_LANE_ABS_REL_OFST 4
+#define        MC_CMD_KR_TUNE_READ_FOM_IN_LANE_ABS_REL_LBN 31
+#define        MC_CMD_KR_TUNE_READ_FOM_IN_LANE_ABS_REL_WIDTH 1
+
+/* MC_CMD_KR_TUNE_READ_FOM_OUT msgresponse */
+#define    MC_CMD_KR_TUNE_READ_FOM_OUT_LEN 4
+#define       MC_CMD_KR_TUNE_READ_FOM_OUT_FOM_OFST 0
+#define       MC_CMD_KR_TUNE_READ_FOM_OUT_FOM_LEN 4
+
+/* MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN msgrequest */
+#define    MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_LEN 8
+/* Requested operation */
+#define       MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_KR_TUNE_OP_OFST 0
+#define       MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define       MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_KR_TUNE_RSVD_OFST 1
+#define       MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_KR_TUNE_RSVD_LEN 3
+#define       MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_RUN_OFST 4
+#define       MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_RUN_LEN 4
+#define          MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_STOP 0x0 /* enum */
+#define          MC_CMD_KR_TUNE_LINK_TRAIN_RUN_IN_START 0x1 /* enum */
+
+/* MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN msgrequest */
+#define    MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_LEN 28
+/* Requested operation */
+#define       MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_KR_TUNE_OP_OFST 0
+#define       MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define       MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_KR_TUNE_RSVD_OFST 1
+#define       MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_KR_TUNE_RSVD_LEN 3
+#define       MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_LANE_OFST 4
+#define       MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_LANE_LEN 4
+/* Set INITIALIZE state */
+#define       MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_INITIALIZE_OFST 8
+#define       MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_INITIALIZE_LEN 4
+/* Set PRESET state */
+#define       MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_PRESET_OFST 12
+#define       MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_PRESET_LEN 4
+/* C(-1) request */
+#define       MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_CM1_OFST 16
+#define       MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_CM1_LEN 4
+#define          MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_REQ_HOLD 0x0 /* enum */
+#define          MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_REQ_INCREMENT 0x1 /* enum */
+#define          MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_REQ_DECREMENT 0x2 /* enum */
+/* C(0) request */
+#define       MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_C0_OFST 20
+#define       MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_C0_LEN 4
+/*            Enum values, see field(s): */
+/*               MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN/CM1 */
+/* C(+1) request */
+#define       MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_CP1_OFST 24
+#define       MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN_CP1_LEN 4
+/*            Enum values, see field(s): */
+/*               MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN/CM1 */
+
+/* MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT msgresponse */
+#define    MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_LEN 24
+/* C(-1) status */
+#define       MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CM1_STATUS_OFST 0
+#define       MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CM1_STATUS_LEN 4
+#define          MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_STATUS_NOT_UPDATED 0x0 /* enum */
+#define          MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_STATUS_UPDATED 0x1 /* enum */
+#define          MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_STATUS_MINIMUM 0x2 /* enum */
+#define          MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_STATUS_MAXIMUM 0x3 /* enum */
+/* C(0) status */
+#define       MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_C0_STATUS_OFST 4
+#define       MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_C0_STATUS_LEN 4
+/*            Enum values, see field(s): */
+/*               MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN/CM1 */
+/* C(+1) status */
+#define       MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CP1_STATUS_OFST 8
+#define       MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CP1_STATUS_LEN 4
+/*            Enum values, see field(s): */
+/*               MC_CMD_KR_TUNE_LINK_TRAIN_CMD_IN/CM1 */
+/* C(-1) value */
+#define       MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CM1_VALUE_OFST 12
+#define       MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CM1_VALUE_LEN 4
+/* C(0) value */
+#define       MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_C0_VALUE_OFST 16
+#define       MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_C0_VALUE_LEN 4
+/* C(+1) status */
+#define       MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CP1_VALUE_OFST 20
+#define       MC_CMD_KR_TUNE_LINK_TRAIN_CMD_OUT_CP1_VALUE_LEN 4
+
+
+/***********************************/
+/* MC_CMD_PCIE_TUNE
+ * Get or set PCIE Serdes RXEQ and TX Driver settings
+ */
+#define MC_CMD_PCIE_TUNE 0xf2
+#undef MC_CMD_0xf2_PRIVILEGE_CTG
+
+#define MC_CMD_0xf2_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_PCIE_TUNE_IN msgrequest */
+#define    MC_CMD_PCIE_TUNE_IN_LENMIN 4
+#define    MC_CMD_PCIE_TUNE_IN_LENMAX 252
+#define    MC_CMD_PCIE_TUNE_IN_LENMAX_MCDI2 1020
+#define    MC_CMD_PCIE_TUNE_IN_LEN(num) (4+4*(num))
+#define    MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_NUM(len) (((len)-4)/4)
+/* Requested operation */
+#define       MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_OP_OFST 0
+#define       MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_OP_LEN 1
+/* enum: Get current RXEQ settings */
+#define          MC_CMD_PCIE_TUNE_IN_RXEQ_GET 0x0
+/* enum: Override RXEQ settings */
+#define          MC_CMD_PCIE_TUNE_IN_RXEQ_SET 0x1
+/* enum: Get current TX Driver settings */
+#define          MC_CMD_PCIE_TUNE_IN_TXEQ_GET 0x2
+/* enum: Override TX Driver settings */
+#define          MC_CMD_PCIE_TUNE_IN_TXEQ_SET 0x3
+/* enum: Start PCIe Serdes Eye diagram plot on a given lane. */
+#define          MC_CMD_PCIE_TUNE_IN_START_EYE_PLOT 0x5
+/* enum: Poll PCIe Serdes Eye diagram plot. Returns one row of BER data. The
+ * caller should call this command repeatedly after starting eye plot, until no
+ * more data is returned.
+ */
+#define          MC_CMD_PCIE_TUNE_IN_POLL_EYE_PLOT 0x6
+/* enum: Enable the SERDES BIST and set it to generate a 200MHz square wave */
+#define          MC_CMD_PCIE_TUNE_IN_BIST_SQUARE_WAVE 0x7
+/* Align the arguments to 32 bits */
+#define       MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_OFST 1
+#define       MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_LEN 3
+/* Arguments specific to the operation */
+#define       MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_OFST 4
+#define       MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_LEN 4
+#define       MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_MINNUM 0
+#define       MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_MAXNUM 62
+#define       MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_MAXNUM_MCDI2 254
+
+/* MC_CMD_PCIE_TUNE_OUT msgresponse */
+#define    MC_CMD_PCIE_TUNE_OUT_LEN 0
+
+/* MC_CMD_PCIE_TUNE_RXEQ_GET_IN msgrequest */
+#define    MC_CMD_PCIE_TUNE_RXEQ_GET_IN_LEN 4
+/* Requested operation */
+#define       MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_OP_OFST 0
+#define       MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define       MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_RSVD_OFST 1
+#define       MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_RSVD_LEN 3
+
+/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT msgresponse */
+#define    MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LENMIN 4
+#define    MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LENMAX 252
+#define    MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LENMAX_MCDI2 1020
+#define    MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LEN(num) (0+4*(num))
+#define    MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_NUM(len) (((len)-0)/4)
+/* RXEQ Parameter */
+#define       MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_OFST 0
+#define       MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LEN 4
+#define       MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_MINNUM 1
+#define       MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM 63
+#define       MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM_MCDI2 255
+#define        MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_OFST 0
+#define        MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0
+#define        MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8
+/* enum: Attenuation (0-15) */
+#define          MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_ATT 0x0
+/* enum: CTLE Boost (0-15) */
+#define          MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_BOOST 0x1
+/* enum: DFE Tap1 (0 - max negative, 64 - zero, 127 - max positive) */
+#define          MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP1 0x2
+/* enum: DFE Tap2 (0 - max negative, 32 - zero, 63 - max positive) */
+#define          MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP2 0x3
+/* enum: DFE Tap3 (0 - max negative, 32 - zero, 63 - max positive) */
+#define          MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP3 0x4
+/* enum: DFE Tap4 (0 - max negative, 32 - zero, 63 - max positive) */
+#define          MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP4 0x5
+/* enum: DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive) */
+#define          MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP5 0x6
+/* enum: DFE DLev */
+#define          MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_DLEV 0x7
+/* enum: Figure of Merit */
+#define          MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_FOM 0x8
+/* enum: CTLE EQ Capacitor (HF Gain) */
+#define          MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_CTLE_EQC 0x9
+/* enum: CTLE EQ Resistor (DC Gain) */
+#define          MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_CTLE_EQRES 0xa
+#define        MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_OFST 0
+#define        MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8
+#define        MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 5
+#define          MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */
+#define          MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_1 0x1 /* enum */
+#define          MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_2 0x2 /* enum */
+#define          MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_3 0x3 /* enum */
+#define          MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_4 0x4 /* enum */
+#define          MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_5 0x5 /* enum */
+#define          MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_6 0x6 /* enum */
+#define          MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_7 0x7 /* enum */
+#define          MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_8 0x8 /* enum */
+#define          MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_9 0x9 /* enum */
+#define          MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_10 0xa /* enum */
+#define          MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_11 0xb /* enum */
+#define          MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_12 0xc /* enum */
+#define          MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_13 0xd /* enum */
+#define          MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_14 0xe /* enum */
+#define          MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_15 0xf /* enum */
+#define          MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_ALL 0x10 /* enum */
+#define        MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_OFST 0
+#define        MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_LBN 13
+#define        MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_WIDTH 1
+#define        MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_OFST 0
+#define        MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_LBN 14
+#define        MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_WIDTH 10
+#define        MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_OFST 0
+#define        MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_LBN 24
+#define        MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8
+
+/* MC_CMD_PCIE_TUNE_RXEQ_SET_IN msgrequest */
+#define    MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LENMIN 8
+#define    MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LENMAX 252
+#define    MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LENMAX_MCDI2 1020
+#define    MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LEN(num) (4+4*(num))
+#define    MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_NUM(len) (((len)-4)/4)
+/* Requested operation */
+#define       MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_OP_OFST 0
+#define       MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define       MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_RSVD_OFST 1
+#define       MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_RSVD_LEN 3
+/* RXEQ Parameter */
+#define       MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_OFST 4
+#define       MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LEN 4
+#define       MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_MINNUM 1
+#define       MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_MAXNUM 62
+#define       MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_MAXNUM_MCDI2 254
+#define        MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_ID_OFST 4
+#define        MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_ID_LBN 0
+#define        MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_ID_WIDTH 8
+/*             Enum values, see field(s): */
+/*                MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_ID */
+#define        MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LANE_OFST 4
+#define        MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LANE_LBN 8
+#define        MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LANE_WIDTH 5
+/*             Enum values, see field(s): */
+/*                MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_LANE */
+#define        MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_OFST 4
+#define        MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_LBN 13
+#define        MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_WIDTH 1
+#define        MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED_OFST 4
+#define        MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED_LBN 14
+#define        MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED_WIDTH 2
+#define        MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_INITIAL_OFST 4
+#define        MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_INITIAL_LBN 16
+#define        MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_INITIAL_WIDTH 8
+#define        MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED2_OFST 4
+#define        MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED2_LBN 24
+#define        MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED2_WIDTH 8
+
+/* MC_CMD_PCIE_TUNE_RXEQ_SET_OUT msgresponse */
+#define    MC_CMD_PCIE_TUNE_RXEQ_SET_OUT_LEN 0
+
+/* MC_CMD_PCIE_TUNE_TXEQ_GET_IN msgrequest */
+#define    MC_CMD_PCIE_TUNE_TXEQ_GET_IN_LEN 4
+/* Requested operation */
+#define       MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_OP_OFST 0
+#define       MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define       MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_RSVD_OFST 1
+#define       MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_RSVD_LEN 3
+
+/* MC_CMD_PCIE_TUNE_TXEQ_GET_OUT msgresponse */
+#define    MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LENMIN 4
+#define    MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LENMAX 252
+#define    MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LENMAX_MCDI2 1020
+#define    MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LEN(num) (0+4*(num))
+#define    MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_NUM(len) (((len)-0)/4)
+/* RXEQ Parameter */
+#define       MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_OFST 0
+#define       MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LEN 4
+#define       MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_MINNUM 1
+#define       MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM 63
+#define       MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM_MCDI2 255
+#define        MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_OFST 0
+#define        MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_LBN 0
+#define        MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_WIDTH 8
+/* enum: TxMargin (PIPE) */
+#define          MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_TXMARGIN 0x0
+/* enum: TxSwing (PIPE) */
+#define          MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_TXSWING 0x1
+/* enum: De-emphasis coefficient C(-1) (PIPE) */
+#define          MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_CM1 0x2
+/* enum: De-emphasis coefficient C(0) (PIPE) */
+#define          MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_C0 0x3
+/* enum: De-emphasis coefficient C(+1) (PIPE) */
+#define          MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_CP1 0x4
+#define        MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_OFST 0
+#define        MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8
+#define        MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 4
+/*             Enum values, see field(s): */
+/*                MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_LANE */
+#define        MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_RESERVED_OFST 0
+#define        MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_RESERVED_LBN 12
+#define        MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_RESERVED_WIDTH 12
+#define        MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_OFST 0
+#define        MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_LBN 24
+#define        MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8
+
+/* MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN msgrequest */
+#define    MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LEN 8
+/* Requested operation */
+#define       MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_OP_OFST 0
+#define       MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define       MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_OFST 1
+#define       MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_PCIE_TUNE_RSVD_LEN 3
+#define       MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LANE_OFST 4
+#define       MC_CMD_PCIE_TUNE_START_EYE_PLOT_IN_LANE_LEN 4
+
+/* MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT msgresponse */
+#define    MC_CMD_PCIE_TUNE_START_EYE_PLOT_OUT_LEN 0
+
+/* MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN msgrequest */
+#define    MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_LEN 4
+/* Requested operation */
+#define       MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_OP_OFST 0
+#define       MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define       MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_RSVD_OFST 1
+#define       MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_IN_PCIE_TUNE_RSVD_LEN 3
+
+/* MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT msgresponse */
+#define    MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LENMIN 0
+#define    MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LENMAX 252
+#define    MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LENMAX_MCDI2 1020
+#define    MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_LEN(num) (0+2*(num))
+#define    MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_NUM(len) (((len)-0)/2)
+#define       MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_OFST 0
+#define       MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_LEN 2
+#define       MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MINNUM 0
+#define       MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM 126
+#define       MC_CMD_PCIE_TUNE_POLL_EYE_PLOT_OUT_SAMPLES_MAXNUM_MCDI2 510
+
+/* MC_CMD_PCIE_TUNE_BIST_SQUARE_WAVE_IN msgrequest */
+#define    MC_CMD_PCIE_TUNE_BIST_SQUARE_WAVE_IN_LEN 0
+
+/* MC_CMD_PCIE_TUNE_BIST_SQUARE_WAVE_OUT msgrequest */
+#define    MC_CMD_PCIE_TUNE_BIST_SQUARE_WAVE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_LICENSING
+ * Operations on the NVRAM_PARTITION_TYPE_LICENSE application license partition
+ * - not used for V3 licensing
+ */
+#define MC_CMD_LICENSING 0xf3
+#undef MC_CMD_0xf3_PRIVILEGE_CTG
+
+#define MC_CMD_0xf3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LICENSING_IN msgrequest */
+#define    MC_CMD_LICENSING_IN_LEN 4
+/* identifies the type of operation requested */
+#define       MC_CMD_LICENSING_IN_OP_OFST 0
+#define       MC_CMD_LICENSING_IN_OP_LEN 4
+/* enum: re-read and apply licenses after a license key partition update; note
+ * that this operation returns a zero-length response
+ */
+#define          MC_CMD_LICENSING_IN_OP_UPDATE_LICENSE 0x0
+/* enum: report counts of installed licenses */
+#define          MC_CMD_LICENSING_IN_OP_GET_KEY_STATS 0x1
+
+/* MC_CMD_LICENSING_OUT msgresponse */
+#define    MC_CMD_LICENSING_OUT_LEN 28
+/* count of application keys which are valid */
+#define       MC_CMD_LICENSING_OUT_VALID_APP_KEYS_OFST 0
+#define       MC_CMD_LICENSING_OUT_VALID_APP_KEYS_LEN 4
+/* sum of UNVERIFIABLE_APP_KEYS + WRONG_NODE_APP_KEYS (for compatibility with
+ * MC_CMD_FC_OP_LICENSE)
+ */
+#define       MC_CMD_LICENSING_OUT_INVALID_APP_KEYS_OFST 4
+#define       MC_CMD_LICENSING_OUT_INVALID_APP_KEYS_LEN 4
+/* count of application keys which are invalid due to being blacklisted */
+#define       MC_CMD_LICENSING_OUT_BLACKLISTED_APP_KEYS_OFST 8
+#define       MC_CMD_LICENSING_OUT_BLACKLISTED_APP_KEYS_LEN 4
+/* count of application keys which are invalid due to being unverifiable */
+#define       MC_CMD_LICENSING_OUT_UNVERIFIABLE_APP_KEYS_OFST 12
+#define       MC_CMD_LICENSING_OUT_UNVERIFIABLE_APP_KEYS_LEN 4
+/* count of application keys which are invalid due to being for the wrong node
+ */
+#define       MC_CMD_LICENSING_OUT_WRONG_NODE_APP_KEYS_OFST 16
+#define       MC_CMD_LICENSING_OUT_WRONG_NODE_APP_KEYS_LEN 4
+/* licensing state (for diagnostics; the exact meaning of the bits in this
+ * field are private to the firmware)
+ */
+#define       MC_CMD_LICENSING_OUT_LICENSING_STATE_OFST 20
+#define       MC_CMD_LICENSING_OUT_LICENSING_STATE_LEN 4
+/* licensing subsystem self-test report (for manftest) */
+#define       MC_CMD_LICENSING_OUT_LICENSING_SELF_TEST_OFST 24
+#define       MC_CMD_LICENSING_OUT_LICENSING_SELF_TEST_LEN 4
+/* enum: licensing subsystem self-test failed */
+#define          MC_CMD_LICENSING_OUT_SELF_TEST_FAIL 0x0
+/* enum: licensing subsystem self-test passed */
+#define          MC_CMD_LICENSING_OUT_SELF_TEST_PASS 0x1
+
+
+/***********************************/
+/* MC_CMD_LICENSING_V3
+ * Operations on the NVRAM_PARTITION_TYPE_LICENSE application license partition
+ * - V3 licensing (Medford)
+ */
+#define MC_CMD_LICENSING_V3 0xd0
+#undef MC_CMD_0xd0_PRIVILEGE_CTG
+
+#define MC_CMD_0xd0_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LICENSING_V3_IN msgrequest */
+#define    MC_CMD_LICENSING_V3_IN_LEN 4
+/* identifies the type of operation requested */
+#define       MC_CMD_LICENSING_V3_IN_OP_OFST 0
+#define       MC_CMD_LICENSING_V3_IN_OP_LEN 4
+/* enum: re-read and apply licenses after a license key partition update; note
+ * that this operation returns a zero-length response
+ */
+#define          MC_CMD_LICENSING_V3_IN_OP_UPDATE_LICENSE 0x0
+/* enum: report counts of installed licenses Returns EAGAIN if license
+ * processing (updating) has been started but not yet completed.
+ */
+#define          MC_CMD_LICENSING_V3_IN_OP_REPORT_LICENSE 0x1
+
+/* MC_CMD_LICENSING_V3_OUT msgresponse */
+#define    MC_CMD_LICENSING_V3_OUT_LEN 88
+/* count of keys which are valid */
+#define       MC_CMD_LICENSING_V3_OUT_VALID_KEYS_OFST 0
+#define       MC_CMD_LICENSING_V3_OUT_VALID_KEYS_LEN 4
+/* sum of UNVERIFIABLE_KEYS + WRONG_NODE_KEYS (for compatibility with
+ * MC_CMD_FC_OP_LICENSE)
+ */
+#define       MC_CMD_LICENSING_V3_OUT_INVALID_KEYS_OFST 4
+#define       MC_CMD_LICENSING_V3_OUT_INVALID_KEYS_LEN 4
+/* count of keys which are invalid due to being unverifiable */
+#define       MC_CMD_LICENSING_V3_OUT_UNVERIFIABLE_KEYS_OFST 8
+#define       MC_CMD_LICENSING_V3_OUT_UNVERIFIABLE_KEYS_LEN 4
+/* count of keys which are invalid due to being for the wrong node */
+#define       MC_CMD_LICENSING_V3_OUT_WRONG_NODE_KEYS_OFST 12
+#define       MC_CMD_LICENSING_V3_OUT_WRONG_NODE_KEYS_LEN 4
+/* licensing state (for diagnostics; the exact meaning of the bits in this
+ * field are private to the firmware)
+ */
+#define       MC_CMD_LICENSING_V3_OUT_LICENSING_STATE_OFST 16
+#define       MC_CMD_LICENSING_V3_OUT_LICENSING_STATE_LEN 4
+/* licensing subsystem self-test report (for manftest) */
+#define       MC_CMD_LICENSING_V3_OUT_LICENSING_SELF_TEST_OFST 20
+#define       MC_CMD_LICENSING_V3_OUT_LICENSING_SELF_TEST_LEN 4
+/* enum: licensing subsystem self-test failed */
+#define          MC_CMD_LICENSING_V3_OUT_SELF_TEST_FAIL 0x0
+/* enum: licensing subsystem self-test passed */
+#define          MC_CMD_LICENSING_V3_OUT_SELF_TEST_PASS 0x1
+/* bitmask of licensed applications */
+#define       MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_OFST 24
+#define       MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_LEN 8
+#define       MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_LO_OFST 24
+#define       MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_HI_OFST 28
+/* reserved for future use */
+#define       MC_CMD_LICENSING_V3_OUT_RESERVED_0_OFST 32
+#define       MC_CMD_LICENSING_V3_OUT_RESERVED_0_LEN 24
+/* bitmask of licensed features */
+#define       MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_OFST 56
+#define       MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_LEN 8
+#define       MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_LO_OFST 56
+#define       MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_HI_OFST 60
+/* reserved for future use */
+#define       MC_CMD_LICENSING_V3_OUT_RESERVED_1_OFST 64
+#define       MC_CMD_LICENSING_V3_OUT_RESERVED_1_LEN 24
+
+
+/***********************************/
+/* MC_CMD_LICENSING_GET_ID_V3
+ * Get ID and type from the NVRAM_PARTITION_TYPE_LICENSE application license
+ * partition - V3 licensing (Medford)
+ */
+#define MC_CMD_LICENSING_GET_ID_V3 0xd1
+#undef MC_CMD_0xd1_PRIVILEGE_CTG
+
+#define MC_CMD_0xd1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LICENSING_GET_ID_V3_IN msgrequest */
+#define    MC_CMD_LICENSING_GET_ID_V3_IN_LEN 0
+
+/* MC_CMD_LICENSING_GET_ID_V3_OUT msgresponse */
+#define    MC_CMD_LICENSING_GET_ID_V3_OUT_LENMIN 8
+#define    MC_CMD_LICENSING_GET_ID_V3_OUT_LENMAX 252
+#define    MC_CMD_LICENSING_GET_ID_V3_OUT_LENMAX_MCDI2 1020
+#define    MC_CMD_LICENSING_GET_ID_V3_OUT_LEN(num) (8+1*(num))
+#define    MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_NUM(len) (((len)-8)/1)
+/* type of license (eg 3) */
+#define       MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_TYPE_OFST 0
+#define       MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_TYPE_LEN 4
+/* length of the license ID (in bytes) */
+#define       MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH_OFST 4
+#define       MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH_LEN 4
+/* the unique license ID of the adapter */
+#define       MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_OFST 8
+#define       MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LEN 1
+#define       MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_MINNUM 0
+#define       MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_MAXNUM 244
+#define       MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_MAXNUM_MCDI2 1012
+
+
+/***********************************/
+/* MC_CMD_MC2MC_PROXY
+ * Execute an arbitrary MCDI command on the slave MC of a dual-core device.
+ * This will fail on a single-core system.
+ */
+#define MC_CMD_MC2MC_PROXY 0xf4
+#undef MC_CMD_0xf4_PRIVILEGE_CTG
+
+#define MC_CMD_0xf4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_MC2MC_PROXY_IN msgrequest */
+#define    MC_CMD_MC2MC_PROXY_IN_LEN 0
+
+/* MC_CMD_MC2MC_PROXY_OUT msgresponse */
+#define    MC_CMD_MC2MC_PROXY_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_LICENSED_APP_STATE
+ * Query the state of an individual licensed application. (Note that the actual
+ * state may be invalidated by the MC_CMD_LICENSING OP_UPDATE_LICENSE operation
+ * or a reboot of the MC.) Not used for V3 licensing
+ */
+#define MC_CMD_GET_LICENSED_APP_STATE 0xf5
+#undef MC_CMD_0xf5_PRIVILEGE_CTG
+
+#define MC_CMD_0xf5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_LICENSED_APP_STATE_IN msgrequest */
+#define    MC_CMD_GET_LICENSED_APP_STATE_IN_LEN 4
+/* application ID to query (LICENSED_APP_ID_xxx) */
+#define       MC_CMD_GET_LICENSED_APP_STATE_IN_APP_ID_OFST 0
+#define       MC_CMD_GET_LICENSED_APP_STATE_IN_APP_ID_LEN 4
+
+/* MC_CMD_GET_LICENSED_APP_STATE_OUT msgresponse */
+#define    MC_CMD_GET_LICENSED_APP_STATE_OUT_LEN 4
+/* state of this application */
+#define       MC_CMD_GET_LICENSED_APP_STATE_OUT_STATE_OFST 0
+#define       MC_CMD_GET_LICENSED_APP_STATE_OUT_STATE_LEN 4
+/* enum: no (or invalid) license is present for the application */
+#define          MC_CMD_GET_LICENSED_APP_STATE_OUT_NOT_LICENSED 0x0
+/* enum: a valid license is present for the application */
+#define          MC_CMD_GET_LICENSED_APP_STATE_OUT_LICENSED 0x1
+
+
+/***********************************/
+/* MC_CMD_GET_LICENSED_V3_APP_STATE
+ * Query the state of an individual licensed application. (Note that the actual
+ * state may be invalidated by the MC_CMD_LICENSING_V3 OP_UPDATE_LICENSE
+ * operation or a reboot of the MC.) Used for V3 licensing (Medford)
+ */
+#define MC_CMD_GET_LICENSED_V3_APP_STATE 0xd2
+#undef MC_CMD_0xd2_PRIVILEGE_CTG
+
+#define MC_CMD_0xd2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_LICENSED_V3_APP_STATE_IN msgrequest */
+#define    MC_CMD_GET_LICENSED_V3_APP_STATE_IN_LEN 8
+/* application ID to query (LICENSED_V3_APPS_xxx) expressed as a single bit
+ * mask
+ */
+#define       MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_OFST 0
+#define       MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LEN 8
+#define       MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LO_OFST 0
+#define       MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_HI_OFST 4
+
+/* MC_CMD_GET_LICENSED_V3_APP_STATE_OUT msgresponse */
+#define    MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN 4
+/* state of this application */
+#define       MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_STATE_OFST 0
+#define       MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_STATE_LEN 4
+/* enum: no (or invalid) license is present for the application */
+#define          MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_NOT_LICENSED 0x0
+/* enum: a valid license is present for the application */
+#define          MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LICENSED 0x1
+
+
+/***********************************/
+/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES
+ * Query the state of an one or more licensed features. (Note that the actual
+ * state may be invalidated by the MC_CMD_LICENSING_V3 OP_UPDATE_LICENSE
+ * operation or a reboot of the MC.) Used for V3 licensing (Medford)
+ */
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES 0xd3
+#undef MC_CMD_0xd3_PRIVILEGE_CTG
+
+#define MC_CMD_0xd3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN msgrequest */
+#define    MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_LEN 8
+/* features to query (LICENSED_V3_FEATURES_xxx) expressed as a mask with one or
+ * more bits set
+ */
+#define       MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_OFST 0
+#define       MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LEN 8
+#define       MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LO_OFST 0
+#define       MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_HI_OFST 4
+
+/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT msgresponse */
+#define    MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_LEN 8
+/* states of these features - bit set for licensed, clear for not licensed */
+#define       MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_OFST 0
+#define       MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LEN 8
+#define       MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LO_OFST 0
+#define       MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_HI_OFST 4
+
+
+/***********************************/
+/* MC_CMD_LICENSED_APP_OP
+ * Perform an action for an individual licensed application - not used for V3
+ * licensing.
+ */
+#define MC_CMD_LICENSED_APP_OP 0xf6
+#undef MC_CMD_0xf6_PRIVILEGE_CTG
+
+#define MC_CMD_0xf6_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LICENSED_APP_OP_IN msgrequest */
+#define    MC_CMD_LICENSED_APP_OP_IN_LENMIN 8
+#define    MC_CMD_LICENSED_APP_OP_IN_LENMAX 252
+#define    MC_CMD_LICENSED_APP_OP_IN_LENMAX_MCDI2 1020
+#define    MC_CMD_LICENSED_APP_OP_IN_LEN(num) (8+4*(num))
+#define    MC_CMD_LICENSED_APP_OP_IN_ARGS_NUM(len) (((len)-8)/4)
+/* application ID */
+#define       MC_CMD_LICENSED_APP_OP_IN_APP_ID_OFST 0
+#define       MC_CMD_LICENSED_APP_OP_IN_APP_ID_LEN 4
+/* the type of operation requested */
+#define       MC_CMD_LICENSED_APP_OP_IN_OP_OFST 4
+#define       MC_CMD_LICENSED_APP_OP_IN_OP_LEN 4
+/* enum: validate application */
+#define          MC_CMD_LICENSED_APP_OP_IN_OP_VALIDATE 0x0
+/* enum: mask application */
+#define          MC_CMD_LICENSED_APP_OP_IN_OP_MASK 0x1
+/* arguments specific to this particular operation */
+#define       MC_CMD_LICENSED_APP_OP_IN_ARGS_OFST 8
+#define       MC_CMD_LICENSED_APP_OP_IN_ARGS_LEN 4
+#define       MC_CMD_LICENSED_APP_OP_IN_ARGS_MINNUM 0
+#define       MC_CMD_LICENSED_APP_OP_IN_ARGS_MAXNUM 61
+#define       MC_CMD_LICENSED_APP_OP_IN_ARGS_MAXNUM_MCDI2 253
+
+/* MC_CMD_LICENSED_APP_OP_OUT msgresponse */
+#define    MC_CMD_LICENSED_APP_OP_OUT_LENMIN 0
+#define    MC_CMD_LICENSED_APP_OP_OUT_LENMAX 252
+#define    MC_CMD_LICENSED_APP_OP_OUT_LENMAX_MCDI2 1020
+#define    MC_CMD_LICENSED_APP_OP_OUT_LEN(num) (0+4*(num))
+#define    MC_CMD_LICENSED_APP_OP_OUT_RESULT_NUM(len) (((len)-0)/4)
+/* result specific to this particular operation */
+#define       MC_CMD_LICENSED_APP_OP_OUT_RESULT_OFST 0
+#define       MC_CMD_LICENSED_APP_OP_OUT_RESULT_LEN 4
+#define       MC_CMD_LICENSED_APP_OP_OUT_RESULT_MINNUM 0
+#define       MC_CMD_LICENSED_APP_OP_OUT_RESULT_MAXNUM 63
+#define       MC_CMD_LICENSED_APP_OP_OUT_RESULT_MAXNUM_MCDI2 255
+
+/* MC_CMD_LICENSED_APP_OP_VALIDATE_IN msgrequest */
+#define    MC_CMD_LICENSED_APP_OP_VALIDATE_IN_LEN 72
+/* application ID */
+#define       MC_CMD_LICENSED_APP_OP_VALIDATE_IN_APP_ID_OFST 0
+#define       MC_CMD_LICENSED_APP_OP_VALIDATE_IN_APP_ID_LEN 4
+/* the type of operation requested */
+#define       MC_CMD_LICENSED_APP_OP_VALIDATE_IN_OP_OFST 4
+#define       MC_CMD_LICENSED_APP_OP_VALIDATE_IN_OP_LEN 4
+/* validation challenge */
+#define       MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_OFST 8
+#define       MC_CMD_LICENSED_APP_OP_VALIDATE_IN_CHALLENGE_LEN 64
+
+/* MC_CMD_LICENSED_APP_OP_VALIDATE_OUT msgresponse */
+#define    MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_LEN 68
+/* feature expiry (time_t) */
+#define       MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_EXPIRY_OFST 0
+#define       MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_EXPIRY_LEN 4
+/* validation response */
+#define       MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_OFST 4
+#define       MC_CMD_LICENSED_APP_OP_VALIDATE_OUT_RESPONSE_LEN 64
+
+/* MC_CMD_LICENSED_APP_OP_MASK_IN msgrequest */
+#define    MC_CMD_LICENSED_APP_OP_MASK_IN_LEN 12
+/* application ID */
+#define       MC_CMD_LICENSED_APP_OP_MASK_IN_APP_ID_OFST 0
+#define       MC_CMD_LICENSED_APP_OP_MASK_IN_APP_ID_LEN 4
+/* the type of operation requested */
+#define       MC_CMD_LICENSED_APP_OP_MASK_IN_OP_OFST 4
+#define       MC_CMD_LICENSED_APP_OP_MASK_IN_OP_LEN 4
+/* flag */
+#define       MC_CMD_LICENSED_APP_OP_MASK_IN_FLAG_OFST 8
+#define       MC_CMD_LICENSED_APP_OP_MASK_IN_FLAG_LEN 4
+
+/* MC_CMD_LICENSED_APP_OP_MASK_OUT msgresponse */
+#define    MC_CMD_LICENSED_APP_OP_MASK_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_LICENSED_V3_VALIDATE_APP
+ * Perform validation for an individual licensed application - V3 licensing
+ * (Medford)
+ */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP 0xd4
+#undef MC_CMD_0xd4_PRIVILEGE_CTG
+
+#define MC_CMD_0xd4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LICENSED_V3_VALIDATE_APP_IN msgrequest */
+#define    MC_CMD_LICENSED_V3_VALIDATE_APP_IN_LEN 56
+/* challenge for validation (384 bits) */
+#define       MC_CMD_LICENSED_V3_VALIDATE_APP_IN_CHALLENGE_OFST 0
+#define       MC_CMD_LICENSED_V3_VALIDATE_APP_IN_CHALLENGE_LEN 48
+/* application ID expressed as a single bit mask */
+#define       MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_OFST 48
+#define       MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LEN 8
+#define       MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LO_OFST 48
+#define       MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_HI_OFST 52
+
+/* MC_CMD_LICENSED_V3_VALIDATE_APP_OUT msgresponse */
+#define    MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_LEN 116
+/* validation response to challenge in the form of ECDSA signature consisting
+ * of two 384-bit integers, r and s, in big-endian order. The signature signs a
+ * SHA-384 digest of a message constructed from the concatenation of the input
+ * message and the remaining fields of this output message, e.g. challenge[48
+ * bytes] ... expiry_time[4 bytes] ...
+ */
+#define       MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_RESPONSE_OFST 0
+#define       MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_RESPONSE_LEN 96
+/* application expiry time */
+#define       MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_TIME_OFST 96
+#define       MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_TIME_LEN 4
+/* application expiry units */
+#define       MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNITS_OFST 100
+#define       MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNITS_LEN 4
+/* enum: expiry units are accounting units */
+#define          MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_ACC 0x0
+/* enum: expiry units are calendar days */
+#define          MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_DAYS 0x1
+/* base MAC address of the NIC stored in NVRAM (note that this is a constant
+ * value for a given NIC regardless which function is calling, effectively this
+ * is PF0 base MAC address)
+ */
+#define       MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_BASE_MACADDR_OFST 104
+#define       MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_BASE_MACADDR_LEN 6
+/* MAC address of v-adaptor associated with the client. If no such v-adapator
+ * exists, then the field is filled with 0xFF.
+ */
+#define       MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_VADAPTOR_MACADDR_OFST 110
+#define       MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_VADAPTOR_MACADDR_LEN 6
+
+
+/***********************************/
+/* MC_CMD_LICENSED_V3_MASK_FEATURES
+ * Mask features - V3 licensing (Medford)
+ */
+#define MC_CMD_LICENSED_V3_MASK_FEATURES 0xd5
+#undef MC_CMD_0xd5_PRIVILEGE_CTG
+
+#define MC_CMD_0xd5_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_LICENSED_V3_MASK_FEATURES_IN msgrequest */
+#define    MC_CMD_LICENSED_V3_MASK_FEATURES_IN_LEN 12
+/* mask to be applied to features to be changed */
+#define       MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_OFST 0
+#define       MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LEN 8
+#define       MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LO_OFST 0
+#define       MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_OFST 4
+/* whether to turn on or turn off the masked features */
+#define       MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_OFST 8
+#define       MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_LEN 4
+/* enum: turn the features off */
+#define          MC_CMD_LICENSED_V3_MASK_FEATURES_IN_OFF 0x0
+/* enum: turn the features back on */
+#define          MC_CMD_LICENSED_V3_MASK_FEATURES_IN_ON 0x1
+
+/* MC_CMD_LICENSED_V3_MASK_FEATURES_OUT msgresponse */
+#define    MC_CMD_LICENSED_V3_MASK_FEATURES_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_LICENSING_V3_TEMPORARY
+ * Perform operations to support installation of a single temporary license in
+ * the adapter, in addition to those found in the licensing partition. See
+ * SF-116124-SW for an overview of how this could be used. The license is
+ * stored in MC persistent data and so will survive a MC reboot, but will be
+ * erased when the adapter is power cycled
+ */
+#define MC_CMD_LICENSING_V3_TEMPORARY 0xd6
+#undef MC_CMD_0xd6_PRIVILEGE_CTG
+
+#define MC_CMD_0xd6_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_LICENSING_V3_TEMPORARY_IN msgrequest */
+#define    MC_CMD_LICENSING_V3_TEMPORARY_IN_LEN 4
+/* operation code */
+#define       MC_CMD_LICENSING_V3_TEMPORARY_IN_OP_OFST 0
+#define       MC_CMD_LICENSING_V3_TEMPORARY_IN_OP_LEN 4
+/* enum: install a new license, overwriting any existing temporary license.
+ * This is an asynchronous operation owing to the time taken to validate an
+ * ECDSA license
+ */
+#define          MC_CMD_LICENSING_V3_TEMPORARY_SET 0x0
+/* enum: clear the license immediately rather than waiting for the next power
+ * cycle
+ */
+#define          MC_CMD_LICENSING_V3_TEMPORARY_CLEAR 0x1
+/* enum: get the status of the asynchronous MC_CMD_LICENSING_V3_TEMPORARY_SET
+ * operation
+ */
+#define          MC_CMD_LICENSING_V3_TEMPORARY_STATUS 0x2
+
+/* MC_CMD_LICENSING_V3_TEMPORARY_IN_SET msgrequest */
+#define    MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LEN 164
+#define       MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_OP_OFST 0
+#define       MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_OP_LEN 4
+/* ECDSA license and signature */
+#define       MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LICENSE_OFST 4
+#define       MC_CMD_LICENSING_V3_TEMPORARY_IN_SET_LICENSE_LEN 160
+
+/* MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR msgrequest */
+#define    MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_LEN 4
+#define       MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_OP_OFST 0
+#define       MC_CMD_LICENSING_V3_TEMPORARY_IN_CLEAR_OP_LEN 4
+
+/* MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS msgrequest */
+#define    MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_LEN 4
+#define       MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_OP_OFST 0
+#define       MC_CMD_LICENSING_V3_TEMPORARY_IN_STATUS_OP_LEN 4
+
+/* MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS msgresponse */
+#define    MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LEN 12
+/* status code */
+#define       MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_STATUS_OFST 0
+#define       MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_STATUS_LEN 4
+/* enum: finished validating and installing license */
+#define          MC_CMD_LICENSING_V3_TEMPORARY_STATUS_OK 0x0
+/* enum: license validation and installation in progress */
+#define          MC_CMD_LICENSING_V3_TEMPORARY_STATUS_IN_PROGRESS 0x1
+/* enum: licensing error. More specific error messages are not provided to
+ * avoid exposing details of the licensing system to the client
+ */
+#define          MC_CMD_LICENSING_V3_TEMPORARY_STATUS_ERROR 0x2
+/* bitmask of licensed features */
+#define       MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_OFST 4
+#define       MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LEN 8
+#define       MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_LO_OFST 4
+#define       MC_CMD_LICENSING_V3_TEMPORARY_OUT_STATUS_LICENSED_FEATURES_HI_OFST 8
+
+
+/***********************************/
+/* MC_CMD_SET_PORT_SNIFF_CONFIG
+ * Configure RX port sniffing for the physical port associated with the calling
+ * function. Only a privileged function may change the port sniffing
+ * configuration. A copy of all traffic delivered to the host (non-promiscuous
+ * mode) or all traffic arriving at the port (promiscuous mode) may be
+ * delivered to a specific queue, or a set of queues with RSS.
+ */
+#define MC_CMD_SET_PORT_SNIFF_CONFIG 0xf7
+#undef MC_CMD_0xf7_PRIVILEGE_CTG
+
+#define MC_CMD_0xf7_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_PORT_SNIFF_CONFIG_IN msgrequest */
+#define    MC_CMD_SET_PORT_SNIFF_CONFIG_IN_LEN 16
+/* configuration flags */
+#define       MC_CMD_SET_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0
+#define       MC_CMD_SET_PORT_SNIFF_CONFIG_IN_FLAGS_LEN 4
+#define        MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_OFST 0
+#define        MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0
+#define        MC_CMD_SET_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1
+#define        MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_OFST 0
+#define        MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_LBN 1
+#define        MC_CMD_SET_PORT_SNIFF_CONFIG_IN_PROMISCUOUS_WIDTH 1
+/* receive queue handle (for RSS mode, this is the base queue) */
+#define       MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4
+#define       MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_QUEUE_LEN 4
+/* receive mode */
+#define       MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8
+#define       MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_LEN 4
+/* enum: receive to just the specified queue */
+#define          MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0
+/* enum: receive to multiple queues using RSS context */
+#define          MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_MODE_RSS 0x1
+/* RSS context (for RX_MODE_RSS) as returned by MC_CMD_RSS_CONTEXT_ALLOC. Note
+ * that these handles should be considered opaque to the host, although a value
+ * of 0xFFFFFFFF is guaranteed never to be a valid handle.
+ */
+#define       MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12
+#define       MC_CMD_SET_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_LEN 4
+
+/* MC_CMD_SET_PORT_SNIFF_CONFIG_OUT msgresponse */
+#define    MC_CMD_SET_PORT_SNIFF_CONFIG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_PORT_SNIFF_CONFIG
+ * Obtain the current RX port sniffing configuration for the physical port
+ * associated with the calling function. Only a privileged function may read
+ * the configuration.
+ */
+#define MC_CMD_GET_PORT_SNIFF_CONFIG 0xf8
+#undef MC_CMD_0xf8_PRIVILEGE_CTG
+
+#define MC_CMD_0xf8_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PORT_SNIFF_CONFIG_IN msgrequest */
+#define    MC_CMD_GET_PORT_SNIFF_CONFIG_IN_LEN 0
+
+/* MC_CMD_GET_PORT_SNIFF_CONFIG_OUT msgresponse */
+#define    MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_LEN 16
+/* configuration flags */
+#define       MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0
+#define       MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_FLAGS_LEN 4
+#define        MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_OFST 0
+#define        MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0
+#define        MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1
+#define        MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_OFST 0
+#define        MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_LBN 1
+#define        MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_PROMISCUOUS_WIDTH 1
+/* receiving queue handle (for RSS mode, this is the base queue) */
+#define       MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4
+#define       MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_LEN 4
+/* receive mode */
+#define       MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8
+#define       MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_LEN 4
+/* enum: receiving to just the specified queue */
+#define          MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0
+/* enum: receiving to multiple queues using RSS context */
+#define          MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1
+/* RSS context (for RX_MODE_RSS) */
+#define       MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12
+#define       MC_CMD_GET_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_LEN 4
+
+
+/***********************************/
+/* MC_CMD_SET_PARSER_DISP_CONFIG
+ * Change configuration related to the parser-dispatcher subsystem.
+ */
+#define MC_CMD_SET_PARSER_DISP_CONFIG 0xf9
+#undef MC_CMD_0xf9_PRIVILEGE_CTG
+
+#define MC_CMD_0xf9_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_SET_PARSER_DISP_CONFIG_IN msgrequest */
+#define    MC_CMD_SET_PARSER_DISP_CONFIG_IN_LENMIN 12
+#define    MC_CMD_SET_PARSER_DISP_CONFIG_IN_LENMAX 252
+#define    MC_CMD_SET_PARSER_DISP_CONFIG_IN_LENMAX_MCDI2 1020
+#define    MC_CMD_SET_PARSER_DISP_CONFIG_IN_LEN(num) (8+4*(num))
+#define    MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_NUM(len) (((len)-8)/4)
+/* the type of configuration setting to change */
+#define       MC_CMD_SET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0
+#define       MC_CMD_SET_PARSER_DISP_CONFIG_IN_TYPE_LEN 4
+/* enum: Per-TXQ enable for multicast UDP destination lookup for possible
+ * internal loopback. (ENTITY is a queue handle, VALUE is a single boolean.)
+ */
+#define          MC_CMD_SET_PARSER_DISP_CONFIG_IN_TXQ_MCAST_UDP_DST_LOOKUP_EN 0x0
+/* enum: Per-v-adaptor enable for suppression of self-transmissions on the
+ * internal loopback path. (ENTITY is an EVB_PORT_ID, VALUE is a single
+ * boolean.)
+ */
+#define          MC_CMD_SET_PARSER_DISP_CONFIG_IN_VADAPTOR_SUPPRESS_SELF_TX 0x1
+/* handle for the entity to update: queue handle, EVB port ID, etc. depending
+ * on the type of configuration setting being changed
+ */
+#define       MC_CMD_SET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4
+#define       MC_CMD_SET_PARSER_DISP_CONFIG_IN_ENTITY_LEN 4
+/* new value: the details depend on the type of configuration setting being
+ * changed
+ */
+#define       MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_OFST 8
+#define       MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_LEN 4
+#define       MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_MINNUM 1
+#define       MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_MAXNUM 61
+#define       MC_CMD_SET_PARSER_DISP_CONFIG_IN_VALUE_MAXNUM_MCDI2 253
+
+/* MC_CMD_SET_PARSER_DISP_CONFIG_OUT msgresponse */
+#define    MC_CMD_SET_PARSER_DISP_CONFIG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_PARSER_DISP_CONFIG
+ * Read configuration related to the parser-dispatcher subsystem.
+ */
+#define MC_CMD_GET_PARSER_DISP_CONFIG 0xfa
+#undef MC_CMD_0xfa_PRIVILEGE_CTG
+
+#define MC_CMD_0xfa_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PARSER_DISP_CONFIG_IN msgrequest */
+#define    MC_CMD_GET_PARSER_DISP_CONFIG_IN_LEN 8
+/* the type of configuration setting to read */
+#define       MC_CMD_GET_PARSER_DISP_CONFIG_IN_TYPE_OFST 0
+#define       MC_CMD_GET_PARSER_DISP_CONFIG_IN_TYPE_LEN 4
+/*            Enum values, see field(s): */
+/*               MC_CMD_SET_PARSER_DISP_CONFIG/MC_CMD_SET_PARSER_DISP_CONFIG_IN/TYPE */
+/* handle for the entity to query: queue handle, EVB port ID, etc. depending on
+ * the type of configuration setting being read
+ */
+#define       MC_CMD_GET_PARSER_DISP_CONFIG_IN_ENTITY_OFST 4
+#define       MC_CMD_GET_PARSER_DISP_CONFIG_IN_ENTITY_LEN 4
+
+/* MC_CMD_GET_PARSER_DISP_CONFIG_OUT msgresponse */
+#define    MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMIN 4
+#define    MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMAX 252
+#define    MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LENMAX_MCDI2 1020
+#define    MC_CMD_GET_PARSER_DISP_CONFIG_OUT_LEN(num) (0+4*(num))
+#define    MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_NUM(len) (((len)-0)/4)
+/* current value: the details depend on the type of configuration setting being
+ * read
+ */
+#define       MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_OFST 0
+#define       MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_LEN 4
+#define       MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MINNUM 1
+#define       MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MAXNUM 63
+#define       MC_CMD_GET_PARSER_DISP_CONFIG_OUT_VALUE_MAXNUM_MCDI2 255
+
+
+/***********************************/
+/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG
+ * Configure TX port sniffing for the physical port associated with the calling
+ * function. Only a privileged function may change the port sniffing
+ * configuration. A copy of all traffic transmitted through the port may be
+ * delivered to a specific queue, or a set of queues with RSS. Note that these
+ * packets are delivered with transmit timestamps in the packet prefix, not
+ * receive timestamps, so it is likely that the queue(s) will need to be
+ * dedicated as TX sniff receivers.
+ */
+#define MC_CMD_SET_TX_PORT_SNIFF_CONFIG 0xfb
+#undef MC_CMD_0xfb_PRIVILEGE_CTG
+
+#define MC_CMD_0xfb_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN msgrequest */
+#define    MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_LEN 16
+/* configuration flags */
+#define       MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_FLAGS_OFST 0
+#define       MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_FLAGS_LEN 4
+#define        MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_OFST 0
+#define        MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_LBN 0
+#define        MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_ENABLE_WIDTH 1
+/* receive queue handle (for RSS mode, this is the base queue) */
+#define       MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_QUEUE_OFST 4
+#define       MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_QUEUE_LEN 4
+/* receive mode */
+#define       MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_OFST 8
+#define       MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_LEN 4
+/* enum: receive to just the specified queue */
+#define          MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_SIMPLE 0x0
+/* enum: receive to multiple queues using RSS context */
+#define          MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_MODE_RSS 0x1
+/* RSS context (for RX_MODE_RSS) as returned by MC_CMD_RSS_CONTEXT_ALLOC. Note
+ * that these handles should be considered opaque to the host, although a value
+ * of 0xFFFFFFFF is guaranteed never to be a valid handle.
+ */
+#define       MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_OFST 12
+#define       MC_CMD_SET_TX_PORT_SNIFF_CONFIG_IN_RX_CONTEXT_LEN 4
+
+/* MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT msgresponse */
+#define    MC_CMD_SET_TX_PORT_SNIFF_CONFIG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG
+ * Obtain the current TX port sniffing configuration for the physical port
+ * associated with the calling function. Only a privileged function may read
+ * the configuration.
+ */
+#define MC_CMD_GET_TX_PORT_SNIFF_CONFIG 0xfc
+#undef MC_CMD_0xfc_PRIVILEGE_CTG
+
+#define MC_CMD_0xfc_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG_IN msgrequest */
+#define    MC_CMD_GET_TX_PORT_SNIFF_CONFIG_IN_LEN 0
+
+/* MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT msgresponse */
+#define    MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_LEN 16
+/* configuration flags */
+#define       MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_FLAGS_OFST 0
+#define       MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_FLAGS_LEN 4
+#define        MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_OFST 0
+#define        MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_LBN 0
+#define        MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_ENABLE_WIDTH 1
+/* receiving queue handle (for RSS mode, this is the base queue) */
+#define       MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_OFST 4
+#define       MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_QUEUE_LEN 4
+/* receive mode */
+#define       MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_OFST 8
+#define       MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_LEN 4
+/* enum: receiving to just the specified queue */
+#define          MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_SIMPLE 0x0
+/* enum: receiving to multiple queues using RSS context */
+#define          MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_MODE_RSS 0x1
+/* RSS context (for RX_MODE_RSS) */
+#define       MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_OFST 12
+#define       MC_CMD_GET_TX_PORT_SNIFF_CONFIG_OUT_RX_CONTEXT_LEN 4
+
+
+/***********************************/
+/* MC_CMD_RMON_STATS_RX_ERRORS
+ * Per queue rx error stats.
+ */
+#define MC_CMD_RMON_STATS_RX_ERRORS 0xfe
+#undef MC_CMD_0xfe_PRIVILEGE_CTG
+
+#define MC_CMD_0xfe_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_RMON_STATS_RX_ERRORS_IN msgrequest */
+#define    MC_CMD_RMON_STATS_RX_ERRORS_IN_LEN 8
+/* The rx queue to get stats for. */
+#define       MC_CMD_RMON_STATS_RX_ERRORS_IN_RX_QUEUE_OFST 0
+#define       MC_CMD_RMON_STATS_RX_ERRORS_IN_RX_QUEUE_LEN 4
+#define       MC_CMD_RMON_STATS_RX_ERRORS_IN_FLAGS_OFST 4
+#define       MC_CMD_RMON_STATS_RX_ERRORS_IN_FLAGS_LEN 4
+#define        MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_OFST 4
+#define        MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_LBN 0
+#define        MC_CMD_RMON_STATS_RX_ERRORS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_STATS_RX_ERRORS_OUT msgresponse */
+#define    MC_CMD_RMON_STATS_RX_ERRORS_OUT_LEN 16
+#define       MC_CMD_RMON_STATS_RX_ERRORS_OUT_CRC_ERRORS_OFST 0
+#define       MC_CMD_RMON_STATS_RX_ERRORS_OUT_CRC_ERRORS_LEN 4
+#define       MC_CMD_RMON_STATS_RX_ERRORS_OUT_TRUNC_ERRORS_OFST 4
+#define       MC_CMD_RMON_STATS_RX_ERRORS_OUT_TRUNC_ERRORS_LEN 4
+#define       MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_NO_DESC_DROPS_OFST 8
+#define       MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_NO_DESC_DROPS_LEN 4
+#define       MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_ABORT_OFST 12
+#define       MC_CMD_RMON_STATS_RX_ERRORS_OUT_RX_ABORT_LEN 4
+
+
+/***********************************/
+/* MC_CMD_GET_PCIE_RESOURCE_INFO
+ * Find out about available PCIE resources
+ */
+#define MC_CMD_GET_PCIE_RESOURCE_INFO 0xfd
+#undef MC_CMD_0xfd_PRIVILEGE_CTG
+
+#define MC_CMD_0xfd_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PCIE_RESOURCE_INFO_IN msgrequest */
+#define    MC_CMD_GET_PCIE_RESOURCE_INFO_IN_LEN 0
+
+/* MC_CMD_GET_PCIE_RESOURCE_INFO_OUT msgresponse */
+#define    MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_LEN 28
+/* The maximum number of PFs the device can expose */
+#define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PFS_OFST 0
+#define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PFS_LEN 4
+/* The maximum number of VFs the device can expose in total */
+#define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VFS_OFST 4
+#define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VFS_LEN 4
+/* The maximum number of MSI-X vectors the device can provide in total */
+#define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VECTORS_OFST 8
+#define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VECTORS_LEN 4
+/* the number of MSI-X vectors the device will allocate by default to each PF
+ */
+#define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_PF_VECTORS_OFST 12
+#define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_PF_VECTORS_LEN 4
+/* the number of MSI-X vectors the device will allocate by default to each VF
+ */
+#define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_VF_VECTORS_OFST 16
+#define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_DEFAULT_VF_VECTORS_LEN 4
+/* the maximum number of MSI-X vectors the device can allocate to any one PF */
+#define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PF_VECTORS_OFST 20
+#define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_PF_VECTORS_LEN 4
+/* the maximum number of MSI-X vectors the device can allocate to any one VF */
+#define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VF_VECTORS_OFST 24
+#define       MC_CMD_GET_PCIE_RESOURCE_INFO_OUT_MAX_VF_VECTORS_LEN 4
+
+
+/***********************************/
+/* MC_CMD_GET_PORT_MODES
+ * Find out about available port modes
+ */
+#define MC_CMD_GET_PORT_MODES 0xff
+#undef MC_CMD_0xff_PRIVILEGE_CTG
+
+#define MC_CMD_0xff_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_PORT_MODES_IN msgrequest */
+#define    MC_CMD_GET_PORT_MODES_IN_LEN 0
+
+/* MC_CMD_GET_PORT_MODES_OUT msgresponse */
+#define    MC_CMD_GET_PORT_MODES_OUT_LEN 12
+/* Bitmask of port modes available on the board (indexed by TLV_PORT_MODE_*)
+ * that are supported for customer use in production firmware.
+ */
+#define       MC_CMD_GET_PORT_MODES_OUT_MODES_OFST 0
+#define       MC_CMD_GET_PORT_MODES_OUT_MODES_LEN 4
+/* Default (canonical) board mode */
+#define       MC_CMD_GET_PORT_MODES_OUT_DEFAULT_MODE_OFST 4
+#define       MC_CMD_GET_PORT_MODES_OUT_DEFAULT_MODE_LEN 4
+/* Current board mode */
+#define       MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST 8
+#define       MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_LEN 4
+
+/* MC_CMD_GET_PORT_MODES_OUT_V2 msgresponse */
+#define    MC_CMD_GET_PORT_MODES_OUT_V2_LEN 16
+/* Bitmask of port modes available on the board (indexed by TLV_PORT_MODE_*)
+ * that are supported for customer use in production firmware.
+ */
+#define       MC_CMD_GET_PORT_MODES_OUT_V2_MODES_OFST 0
+#define       MC_CMD_GET_PORT_MODES_OUT_V2_MODES_LEN 4
+/* Default (canonical) board mode */
+#define       MC_CMD_GET_PORT_MODES_OUT_V2_DEFAULT_MODE_OFST 4
+#define       MC_CMD_GET_PORT_MODES_OUT_V2_DEFAULT_MODE_LEN 4
+/* Current board mode */
+#define       MC_CMD_GET_PORT_MODES_OUT_V2_CURRENT_MODE_OFST 8
+#define       MC_CMD_GET_PORT_MODES_OUT_V2_CURRENT_MODE_LEN 4
+/* Bitmask of engineering port modes available on the board (indexed by
+ * TLV_PORT_MODE_*). A superset of MC_CMD_GET_PORT_MODES_OUT/MODES that
+ * contains all modes implemented in firmware for a particular board. Modes
+ * listed in MODES are considered production modes and should be exposed in
+ * userland tools. Modes listed in in ENGINEERING_MODES, but not in MODES
+ * should be considered hidden (not to be exposed in userland tools) and for
+ * engineering use only. There are no other semantic differences and any mode
+ * listed in either MODES or ENGINEERING_MODES can be set on the board.
+ */
+#define       MC_CMD_GET_PORT_MODES_OUT_V2_ENGINEERING_MODES_OFST 12
+#define       MC_CMD_GET_PORT_MODES_OUT_V2_ENGINEERING_MODES_LEN 4
+
+
+/***********************************/
+/* MC_CMD_OVERRIDE_PORT_MODE
+ * Override flash config port mode for subsequent MC reboot(s). Override data
+ * is stored in the presistent data section of DMEM and activated on next MC
+ * warm reboot. A cold reboot resets the override. It is assumed that a
+ * sufficient number of PFs are available and that port mapping is valid for
+ * the new port mode, as the override does not affect PF configuration.
+ */
+#define MC_CMD_OVERRIDE_PORT_MODE 0x137
+#undef MC_CMD_0x137_PRIVILEGE_CTG
+
+#define MC_CMD_0x137_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_OVERRIDE_PORT_MODE_IN msgrequest */
+#define    MC_CMD_OVERRIDE_PORT_MODE_IN_LEN 8
+#define       MC_CMD_OVERRIDE_PORT_MODE_IN_FLAGS_OFST 0
+#define       MC_CMD_OVERRIDE_PORT_MODE_IN_FLAGS_LEN 4
+#define        MC_CMD_OVERRIDE_PORT_MODE_IN_ENABLE_OFST 0
+#define        MC_CMD_OVERRIDE_PORT_MODE_IN_ENABLE_LBN 0
+#define        MC_CMD_OVERRIDE_PORT_MODE_IN_ENABLE_WIDTH 1
+/* New mode (TLV_PORT_MODE_*) to set, if override enabled */
+#define       MC_CMD_OVERRIDE_PORT_MODE_IN_MODE_OFST 4
+#define       MC_CMD_OVERRIDE_PORT_MODE_IN_MODE_LEN 4
+
+/* MC_CMD_OVERRIDE_PORT_MODE_OUT msgresponse */
+#define    MC_CMD_OVERRIDE_PORT_MODE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_READ_ATB
+ * Sample voltages on the ATB
+ */
+#define MC_CMD_READ_ATB 0x100
+#undef MC_CMD_0x100_PRIVILEGE_CTG
+
+#define MC_CMD_0x100_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_READ_ATB_IN msgrequest */
+#define    MC_CMD_READ_ATB_IN_LEN 16
+#define       MC_CMD_READ_ATB_IN_SIGNAL_BUS_OFST 0
+#define       MC_CMD_READ_ATB_IN_SIGNAL_BUS_LEN 4
+#define          MC_CMD_READ_ATB_IN_BUS_CCOM 0x0 /* enum */
+#define          MC_CMD_READ_ATB_IN_BUS_CKR 0x1 /* enum */
+#define          MC_CMD_READ_ATB_IN_BUS_CPCIE 0x8 /* enum */
+#define       MC_CMD_READ_ATB_IN_SIGNAL_EN_BITNO_OFST 4
+#define       MC_CMD_READ_ATB_IN_SIGNAL_EN_BITNO_LEN 4
+#define       MC_CMD_READ_ATB_IN_SIGNAL_SEL_OFST 8
+#define       MC_CMD_READ_ATB_IN_SIGNAL_SEL_LEN 4
+#define       MC_CMD_READ_ATB_IN_SETTLING_TIME_US_OFST 12
+#define       MC_CMD_READ_ATB_IN_SETTLING_TIME_US_LEN 4
+
+/* MC_CMD_READ_ATB_OUT msgresponse */
+#define    MC_CMD_READ_ATB_OUT_LEN 4
+#define       MC_CMD_READ_ATB_OUT_SAMPLE_MV_OFST 0
+#define       MC_CMD_READ_ATB_OUT_SAMPLE_MV_LEN 4
+
+
+/***********************************/
+/* MC_CMD_GET_WORKAROUNDS
+ * Read the list of all implemented and all currently enabled workarounds. The
+ * enums here must correspond with those in MC_CMD_WORKAROUND.
+ */
+#define MC_CMD_GET_WORKAROUNDS 0x59
+#undef MC_CMD_0x59_PRIVILEGE_CTG
+
+#define MC_CMD_0x59_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_WORKAROUNDS_OUT msgresponse */
+#define    MC_CMD_GET_WORKAROUNDS_OUT_LEN 8
+/* Each workaround is represented by a single bit according to the enums below.
+ */
+#define       MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_OFST 0
+#define       MC_CMD_GET_WORKAROUNDS_OUT_IMPLEMENTED_LEN 4
+#define       MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_OFST 4
+#define       MC_CMD_GET_WORKAROUNDS_OUT_ENABLED_LEN 4
+/* enum: Bug 17230 work around. */
+#define          MC_CMD_GET_WORKAROUNDS_OUT_BUG17230 0x2
+/* enum: Bug 35388 work around (unsafe EVQ writes). */
+#define          MC_CMD_GET_WORKAROUNDS_OUT_BUG35388 0x4
+/* enum: Bug35017 workaround (A64 tables must be identity map) */
+#define          MC_CMD_GET_WORKAROUNDS_OUT_BUG35017 0x8
+/* enum: Bug 41750 present (MC_CMD_TRIGGER_INTERRUPT won't work) */
+#define          MC_CMD_GET_WORKAROUNDS_OUT_BUG41750 0x10
+/* enum: Bug 42008 present (Interrupts can overtake associated events). Caution
+ * - before adding code that queries this workaround, remember that there's
+ * released Monza firmware that doesn't understand MC_CMD_WORKAROUND_BUG42008,
+ * and will hence (incorrectly) report that the bug doesn't exist.
+ */
+#define          MC_CMD_GET_WORKAROUNDS_OUT_BUG42008 0x20
+/* enum: Bug 26807 features present in firmware (multicast filter chaining) */
+#define          MC_CMD_GET_WORKAROUNDS_OUT_BUG26807 0x40
+/* enum: Bug 61265 work around (broken EVQ TMR writes). */
+#define          MC_CMD_GET_WORKAROUNDS_OUT_BUG61265 0x80
+
+
+/***********************************/
+/* MC_CMD_PRIVILEGE_MASK
+ * Read/set privileges of an arbitrary PCIe function
+ */
+#define MC_CMD_PRIVILEGE_MASK 0x5a
+#undef MC_CMD_0x5a_PRIVILEGE_CTG
+
+#define MC_CMD_0x5a_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_PRIVILEGE_MASK_IN msgrequest */
+#define    MC_CMD_PRIVILEGE_MASK_IN_LEN 8
+/* The target function to have its mask read or set e.g. PF 0 = 0xFFFF0000, VF
+ * 1,3 = 0x00030001
+ */
+#define       MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_OFST 0
+#define       MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_LEN 4
+#define        MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_OFST 0
+#define        MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_LBN 0
+#define        MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_PF_WIDTH 16
+#define        MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_OFST 0
+#define        MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_LBN 16
+#define        MC_CMD_PRIVILEGE_MASK_IN_FUNCTION_VF_WIDTH 16
+#define          MC_CMD_PRIVILEGE_MASK_IN_VF_NULL 0xffff /* enum */
+/* New privilege mask to be set. The mask will only be changed if the MSB is
+ * set to 1.
+ */
+#define       MC_CMD_PRIVILEGE_MASK_IN_NEW_MASK_OFST 4
+#define       MC_CMD_PRIVILEGE_MASK_IN_NEW_MASK_LEN 4
+#define          MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN 0x1 /* enum */
+#define          MC_CMD_PRIVILEGE_MASK_IN_GRP_LINK 0x2 /* enum */
+#define          MC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD 0x4 /* enum */
+#define          MC_CMD_PRIVILEGE_MASK_IN_GRP_PTP 0x8 /* enum */
+#define          MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE_FILTERS 0x10 /* enum */
+/* enum: Deprecated. Equivalent to MAC_SPOOFING_TX combined with CHANGE_MAC. */
+#define          MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING 0x20
+#define          MC_CMD_PRIVILEGE_MASK_IN_GRP_UNICAST 0x40 /* enum */
+#define          MC_CMD_PRIVILEGE_MASK_IN_GRP_MULTICAST 0x80 /* enum */
+#define          MC_CMD_PRIVILEGE_MASK_IN_GRP_BROADCAST 0x100 /* enum */
+#define          MC_CMD_PRIVILEGE_MASK_IN_GRP_ALL_MULTICAST 0x200 /* enum */
+#define          MC_CMD_PRIVILEGE_MASK_IN_GRP_PROMISCUOUS 0x400 /* enum */
+/* enum: Allows to set the TX packets' source MAC address to any arbitrary MAC
+ * adress.
+ */
+#define          MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING_TX 0x800
+/* enum: Privilege that allows a Function to change the MAC address configured
+ * in its associated vAdapter/vPort.
+ */
+#define          MC_CMD_PRIVILEGE_MASK_IN_GRP_CHANGE_MAC 0x1000
+/* enum: Privilege that allows a Function to install filters that specify VLANs
+ * that are not in the permit list for the associated vPort. This privilege is
+ * primarily to support ESX where vPorts are created that restrict traffic to
+ * only a set of permitted VLANs. See the vPort flag FLAG_VLAN_RESTRICT.
+ */
+#define          MC_CMD_PRIVILEGE_MASK_IN_GRP_UNRESTRICTED_VLAN 0x2000
+/* enum: Privilege for insecure commands. Commands that belong to this group
+ * are not permitted on secure adapters regardless of the privilege mask.
+ */
+#define          MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE 0x4000
+/* enum: Trusted Server Adapter (TSA) / ServerLock. Privilege for
+ * administrator-level operations that are not allowed from the local host once
+ * an adapter has Bound to a remote ServerLock Controller (see doxbox
+ * SF-117064-DG for background).
+ */
+#define          MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN_TSA_UNBOUND 0x8000
+/* enum: Set this bit to indicate that a new privilege mask is to be set,
+ * otherwise the command will only read the existing mask.
+ */
+#define          MC_CMD_PRIVILEGE_MASK_IN_DO_CHANGE 0x80000000
+
+/* MC_CMD_PRIVILEGE_MASK_OUT msgresponse */
+#define    MC_CMD_PRIVILEGE_MASK_OUT_LEN 4
+/* For an admin function, always all the privileges are reported. */
+#define       MC_CMD_PRIVILEGE_MASK_OUT_OLD_MASK_OFST 0
+#define       MC_CMD_PRIVILEGE_MASK_OUT_OLD_MASK_LEN 4
+
+
+/***********************************/
+/* MC_CMD_LINK_STATE_MODE
+ * Read/set link state mode of a VF
+ */
+#define MC_CMD_LINK_STATE_MODE 0x5c
+#undef MC_CMD_0x5c_PRIVILEGE_CTG
+
+#define MC_CMD_0x5c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LINK_STATE_MODE_IN msgrequest */
+#define    MC_CMD_LINK_STATE_MODE_IN_LEN 8
+/* The target function to have its link state mode read or set, must be a VF
+ * e.g. VF 1,3 = 0x00030001
+ */
+#define       MC_CMD_LINK_STATE_MODE_IN_FUNCTION_OFST 0
+#define       MC_CMD_LINK_STATE_MODE_IN_FUNCTION_LEN 4
+#define        MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_OFST 0
+#define        MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_LBN 0
+#define        MC_CMD_LINK_STATE_MODE_IN_FUNCTION_PF_WIDTH 16
+#define        MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_OFST 0
+#define        MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_LBN 16
+#define        MC_CMD_LINK_STATE_MODE_IN_FUNCTION_VF_WIDTH 16
+/* New link state mode to be set */
+#define       MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_OFST 4
+#define       MC_CMD_LINK_STATE_MODE_IN_NEW_MODE_LEN 4
+#define          MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_AUTO 0x0 /* enum */
+#define          MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_UP 0x1 /* enum */
+#define          MC_CMD_LINK_STATE_MODE_IN_LINK_STATE_DOWN 0x2 /* enum */
+/* enum: Use this value to just read the existing setting without modifying it.
+ */
+#define          MC_CMD_LINK_STATE_MODE_IN_DO_NOT_CHANGE 0xffffffff
+
+/* MC_CMD_LINK_STATE_MODE_OUT msgresponse */
+#define    MC_CMD_LINK_STATE_MODE_OUT_LEN 4
+#define       MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_OFST 0
+#define       MC_CMD_LINK_STATE_MODE_OUT_OLD_MODE_LEN 4
+
+
+/***********************************/
+/* MC_CMD_GET_SNAPSHOT_LENGTH
+ * Obtain the current range of allowable values for the SNAPSHOT_LENGTH
+ * parameter to MC_CMD_INIT_RXQ.
+ */
+#define MC_CMD_GET_SNAPSHOT_LENGTH 0x101
+#undef MC_CMD_0x101_PRIVILEGE_CTG
+
+#define MC_CMD_0x101_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_SNAPSHOT_LENGTH_IN msgrequest */
+#define    MC_CMD_GET_SNAPSHOT_LENGTH_IN_LEN 0
+
+/* MC_CMD_GET_SNAPSHOT_LENGTH_OUT msgresponse */
+#define    MC_CMD_GET_SNAPSHOT_LENGTH_OUT_LEN 8
+/* Minimum acceptable snapshot length. */
+#define       MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MIN_OFST 0
+#define       MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MIN_LEN 4
+/* Maximum acceptable snapshot length. */
+#define       MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MAX_OFST 4
+#define       MC_CMD_GET_SNAPSHOT_LENGTH_OUT_RX_SNAPLEN_MAX_LEN 4
+
+
+/***********************************/
+/* MC_CMD_FUSE_DIAGS
+ * Additional fuse diagnostics
+ */
+#define MC_CMD_FUSE_DIAGS 0x102
+#undef MC_CMD_0x102_PRIVILEGE_CTG
+
+#define MC_CMD_0x102_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_FUSE_DIAGS_IN msgrequest */
+#define    MC_CMD_FUSE_DIAGS_IN_LEN 0
+
+/* MC_CMD_FUSE_DIAGS_OUT msgresponse */
+#define    MC_CMD_FUSE_DIAGS_OUT_LEN 48
+/* Total number of mismatched bits between pairs in area 0 */
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA0_MISMATCH_BITS_OFST 0
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA0_MISMATCH_BITS_LEN 4
+/* Total number of unexpectedly clear (set in B but not A) bits in area 0 */
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_A_BAD_BITS_OFST 4
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_A_BAD_BITS_LEN 4
+/* Total number of unexpectedly clear (set in A but not B) bits in area 0 */
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_B_BAD_BITS_OFST 8
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA0_PAIR_B_BAD_BITS_LEN 4
+/* Checksum of data after logical OR of pairs in area 0 */
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA0_CHECKSUM_OFST 12
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA0_CHECKSUM_LEN 4
+/* Total number of mismatched bits between pairs in area 1 */
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA1_MISMATCH_BITS_OFST 16
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA1_MISMATCH_BITS_LEN 4
+/* Total number of unexpectedly clear (set in B but not A) bits in area 1 */
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_A_BAD_BITS_OFST 20
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_A_BAD_BITS_LEN 4
+/* Total number of unexpectedly clear (set in A but not B) bits in area 1 */
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_B_BAD_BITS_OFST 24
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA1_PAIR_B_BAD_BITS_LEN 4
+/* Checksum of data after logical OR of pairs in area 1 */
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA1_CHECKSUM_OFST 28
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA1_CHECKSUM_LEN 4
+/* Total number of mismatched bits between pairs in area 2 */
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA2_MISMATCH_BITS_OFST 32
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA2_MISMATCH_BITS_LEN 4
+/* Total number of unexpectedly clear (set in B but not A) bits in area 2 */
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_A_BAD_BITS_OFST 36
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_A_BAD_BITS_LEN 4
+/* Total number of unexpectedly clear (set in A but not B) bits in area 2 */
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_B_BAD_BITS_OFST 40
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA2_PAIR_B_BAD_BITS_LEN 4
+/* Checksum of data after logical OR of pairs in area 2 */
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA2_CHECKSUM_OFST 44
+#define       MC_CMD_FUSE_DIAGS_OUT_AREA2_CHECKSUM_LEN 4
+
+
+/***********************************/
+/* MC_CMD_PRIVILEGE_MODIFY
+ * Modify the privileges of a set of PCIe functions. Note that this operation
+ * only effects non-admin functions unless the admin privilege itself is
+ * included in one of the masks provided.
+ */
+#define MC_CMD_PRIVILEGE_MODIFY 0x60
+#undef MC_CMD_0x60_PRIVILEGE_CTG
+
+#define MC_CMD_0x60_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_PRIVILEGE_MODIFY_IN msgrequest */
+#define    MC_CMD_PRIVILEGE_MODIFY_IN_LEN 16
+/* The groups of functions to have their privilege masks modified. */
+#define       MC_CMD_PRIVILEGE_MODIFY_IN_FN_GROUP_OFST 0
+#define       MC_CMD_PRIVILEGE_MODIFY_IN_FN_GROUP_LEN 4
+#define          MC_CMD_PRIVILEGE_MODIFY_IN_NONE 0x0 /* enum */
+#define          MC_CMD_PRIVILEGE_MODIFY_IN_ALL 0x1 /* enum */
+#define          MC_CMD_PRIVILEGE_MODIFY_IN_PFS_ONLY 0x2 /* enum */
+#define          MC_CMD_PRIVILEGE_MODIFY_IN_VFS_ONLY 0x3 /* enum */
+#define          MC_CMD_PRIVILEGE_MODIFY_IN_VFS_OF_PF 0x4 /* enum */
+#define          MC_CMD_PRIVILEGE_MODIFY_IN_ONE 0x5 /* enum */
+/* For VFS_OF_PF specify the PF, for ONE specify the target function */
+#define       MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_OFST 4
+#define       MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_LEN 4
+#define        MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_OFST 4
+#define        MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_LBN 0
+#define        MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_PF_WIDTH 16
+#define        MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_OFST 4
+#define        MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_LBN 16
+#define        MC_CMD_PRIVILEGE_MODIFY_IN_FUNCTION_VF_WIDTH 16
+/* Privileges to be added to the target functions. For privilege definitions
+ * refer to the command MC_CMD_PRIVILEGE_MASK
+ */
+#define       MC_CMD_PRIVILEGE_MODIFY_IN_ADD_MASK_OFST 8
+#define       MC_CMD_PRIVILEGE_MODIFY_IN_ADD_MASK_LEN 4
+/* Privileges to be removed from the target functions. For privilege
+ * definitions refer to the command MC_CMD_PRIVILEGE_MASK
+ */
+#define       MC_CMD_PRIVILEGE_MODIFY_IN_REMOVE_MASK_OFST 12
+#define       MC_CMD_PRIVILEGE_MODIFY_IN_REMOVE_MASK_LEN 4
+
+/* MC_CMD_PRIVILEGE_MODIFY_OUT msgresponse */
+#define    MC_CMD_PRIVILEGE_MODIFY_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_XPM_READ_BYTES
+ * Read XPM memory
+ */
+#define MC_CMD_XPM_READ_BYTES 0x103
+#undef MC_CMD_0x103_PRIVILEGE_CTG
+
+#define MC_CMD_0x103_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_READ_BYTES_IN msgrequest */
+#define    MC_CMD_XPM_READ_BYTES_IN_LEN 8
+/* Start address (byte) */
+#define       MC_CMD_XPM_READ_BYTES_IN_ADDR_OFST 0
+#define       MC_CMD_XPM_READ_BYTES_IN_ADDR_LEN 4
+/* Count (bytes) */
+#define       MC_CMD_XPM_READ_BYTES_IN_COUNT_OFST 4
+#define       MC_CMD_XPM_READ_BYTES_IN_COUNT_LEN 4
+
+/* MC_CMD_XPM_READ_BYTES_OUT msgresponse */
+#define    MC_CMD_XPM_READ_BYTES_OUT_LENMIN 0
+#define    MC_CMD_XPM_READ_BYTES_OUT_LENMAX 252
+#define    MC_CMD_XPM_READ_BYTES_OUT_LENMAX_MCDI2 1020
+#define    MC_CMD_XPM_READ_BYTES_OUT_LEN(num) (0+1*(num))
+#define    MC_CMD_XPM_READ_BYTES_OUT_DATA_NUM(len) (((len)-0)/1)
+/* Data */
+#define       MC_CMD_XPM_READ_BYTES_OUT_DATA_OFST 0
+#define       MC_CMD_XPM_READ_BYTES_OUT_DATA_LEN 1
+#define       MC_CMD_XPM_READ_BYTES_OUT_DATA_MINNUM 0
+#define       MC_CMD_XPM_READ_BYTES_OUT_DATA_MAXNUM 252
+#define       MC_CMD_XPM_READ_BYTES_OUT_DATA_MAXNUM_MCDI2 1020
+
+
+/***********************************/
+/* MC_CMD_XPM_WRITE_BYTES
+ * Write XPM memory
+ */
+#define MC_CMD_XPM_WRITE_BYTES 0x104
+#undef MC_CMD_0x104_PRIVILEGE_CTG
+
+#define MC_CMD_0x104_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_XPM_WRITE_BYTES_IN msgrequest */
+#define    MC_CMD_XPM_WRITE_BYTES_IN_LENMIN 8
+#define    MC_CMD_XPM_WRITE_BYTES_IN_LENMAX 252
+#define    MC_CMD_XPM_WRITE_BYTES_IN_LENMAX_MCDI2 1020
+#define    MC_CMD_XPM_WRITE_BYTES_IN_LEN(num) (8+1*(num))
+#define    MC_CMD_XPM_WRITE_BYTES_IN_DATA_NUM(len) (((len)-8)/1)
+/* Start address (byte) */
+#define       MC_CMD_XPM_WRITE_BYTES_IN_ADDR_OFST 0
+#define       MC_CMD_XPM_WRITE_BYTES_IN_ADDR_LEN 4
+/* Count (bytes) */
+#define       MC_CMD_XPM_WRITE_BYTES_IN_COUNT_OFST 4
+#define       MC_CMD_XPM_WRITE_BYTES_IN_COUNT_LEN 4
+/* Data */
+#define       MC_CMD_XPM_WRITE_BYTES_IN_DATA_OFST 8
+#define       MC_CMD_XPM_WRITE_BYTES_IN_DATA_LEN 1
+#define       MC_CMD_XPM_WRITE_BYTES_IN_DATA_MINNUM 0
+#define       MC_CMD_XPM_WRITE_BYTES_IN_DATA_MAXNUM 244
+#define       MC_CMD_XPM_WRITE_BYTES_IN_DATA_MAXNUM_MCDI2 1012
+
+/* MC_CMD_XPM_WRITE_BYTES_OUT msgresponse */
+#define    MC_CMD_XPM_WRITE_BYTES_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_XPM_READ_SECTOR
+ * Read XPM sector
+ */
+#define MC_CMD_XPM_READ_SECTOR 0x105
+#undef MC_CMD_0x105_PRIVILEGE_CTG
+
+#define MC_CMD_0x105_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_XPM_READ_SECTOR_IN msgrequest */
+#define    MC_CMD_XPM_READ_SECTOR_IN_LEN 8
+/* Sector index */
+#define       MC_CMD_XPM_READ_SECTOR_IN_INDEX_OFST 0
+#define       MC_CMD_XPM_READ_SECTOR_IN_INDEX_LEN 4
+/* Sector size */
+#define       MC_CMD_XPM_READ_SECTOR_IN_SIZE_OFST 4
+#define       MC_CMD_XPM_READ_SECTOR_IN_SIZE_LEN 4
+
+/* MC_CMD_XPM_READ_SECTOR_OUT msgresponse */
+#define    MC_CMD_XPM_READ_SECTOR_OUT_LENMIN 4
+#define    MC_CMD_XPM_READ_SECTOR_OUT_LENMAX 36
+#define    MC_CMD_XPM_READ_SECTOR_OUT_LENMAX_MCDI2 36
+#define    MC_CMD_XPM_READ_SECTOR_OUT_LEN(num) (4+1*(num))
+#define    MC_CMD_XPM_READ_SECTOR_OUT_DATA_NUM(len) (((len)-4)/1)
+/* Sector type */
+#define       MC_CMD_XPM_READ_SECTOR_OUT_TYPE_OFST 0
+#define       MC_CMD_XPM_READ_SECTOR_OUT_TYPE_LEN 4
+#define          MC_CMD_XPM_READ_SECTOR_OUT_BLANK 0x0 /* enum */
+#define          MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_128 0x1 /* enum */
+#define          MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_KEY_256 0x2 /* enum */
+#define          MC_CMD_XPM_READ_SECTOR_OUT_CRYPTO_DATA 0x3 /* enum */
+#define          MC_CMD_XPM_READ_SECTOR_OUT_INVALID 0xff /* enum */
+/* Sector data */
+#define       MC_CMD_XPM_READ_SECTOR_OUT_DATA_OFST 4
+#define       MC_CMD_XPM_READ_SECTOR_OUT_DATA_LEN 1
+#define       MC_CMD_XPM_READ_SECTOR_OUT_DATA_MINNUM 0
+#define       MC_CMD_XPM_READ_SECTOR_OUT_DATA_MAXNUM 32
+#define       MC_CMD_XPM_READ_SECTOR_OUT_DATA_MAXNUM_MCDI2 32
+
+
+/***********************************/
+/* MC_CMD_XPM_WRITE_SECTOR
+ * Write XPM sector
+ */
+#define MC_CMD_XPM_WRITE_SECTOR 0x106
+#undef MC_CMD_0x106_PRIVILEGE_CTG
+
+#define MC_CMD_0x106_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_XPM_WRITE_SECTOR_IN msgrequest */
+#define    MC_CMD_XPM_WRITE_SECTOR_IN_LENMIN 12
+#define    MC_CMD_XPM_WRITE_SECTOR_IN_LENMAX 44
+#define    MC_CMD_XPM_WRITE_SECTOR_IN_LENMAX_MCDI2 44
+#define    MC_CMD_XPM_WRITE_SECTOR_IN_LEN(num) (12+1*(num))
+#define    MC_CMD_XPM_WRITE_SECTOR_IN_DATA_NUM(len) (((len)-12)/1)
+/* If writing fails due to an uncorrectable error, try up to RETRIES following
+ * sectors (or until no more space available). If 0, only one write attempt is
+ * made. Note that uncorrectable errors are unlikely, thanks to XPM self-repair
+ * mechanism.
+ */
+#define       MC_CMD_XPM_WRITE_SECTOR_IN_RETRIES_OFST 0
+#define       MC_CMD_XPM_WRITE_SECTOR_IN_RETRIES_LEN 1
+#define       MC_CMD_XPM_WRITE_SECTOR_IN_RESERVED_OFST 1
+#define       MC_CMD_XPM_WRITE_SECTOR_IN_RESERVED_LEN 3
+/* Sector type */
+#define       MC_CMD_XPM_WRITE_SECTOR_IN_TYPE_OFST 4
+#define       MC_CMD_XPM_WRITE_SECTOR_IN_TYPE_LEN 4
+/*            Enum values, see field(s): */
+/*               MC_CMD_XPM_READ_SECTOR/MC_CMD_XPM_READ_SECTOR_OUT/TYPE */
+/* Sector size */
+#define       MC_CMD_XPM_WRITE_SECTOR_IN_SIZE_OFST 8
+#define       MC_CMD_XPM_WRITE_SECTOR_IN_SIZE_LEN 4
+/* Sector data */
+#define       MC_CMD_XPM_WRITE_SECTOR_IN_DATA_OFST 12
+#define       MC_CMD_XPM_WRITE_SECTOR_IN_DATA_LEN 1
+#define       MC_CMD_XPM_WRITE_SECTOR_IN_DATA_MINNUM 0
+#define       MC_CMD_XPM_WRITE_SECTOR_IN_DATA_MAXNUM 32
+#define       MC_CMD_XPM_WRITE_SECTOR_IN_DATA_MAXNUM_MCDI2 32
+
+/* MC_CMD_XPM_WRITE_SECTOR_OUT msgresponse */
+#define    MC_CMD_XPM_WRITE_SECTOR_OUT_LEN 4
+/* New sector index */
+#define       MC_CMD_XPM_WRITE_SECTOR_OUT_INDEX_OFST 0
+#define       MC_CMD_XPM_WRITE_SECTOR_OUT_INDEX_LEN 4
+
+
+/***********************************/
+/* MC_CMD_XPM_INVALIDATE_SECTOR
+ * Invalidate XPM sector
+ */
+#define MC_CMD_XPM_INVALIDATE_SECTOR 0x107
+#undef MC_CMD_0x107_PRIVILEGE_CTG
+
+#define MC_CMD_0x107_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_XPM_INVALIDATE_SECTOR_IN msgrequest */
+#define    MC_CMD_XPM_INVALIDATE_SECTOR_IN_LEN 4
+/* Sector index */
+#define       MC_CMD_XPM_INVALIDATE_SECTOR_IN_INDEX_OFST 0
+#define       MC_CMD_XPM_INVALIDATE_SECTOR_IN_INDEX_LEN 4
+
+/* MC_CMD_XPM_INVALIDATE_SECTOR_OUT msgresponse */
+#define    MC_CMD_XPM_INVALIDATE_SECTOR_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_XPM_BLANK_CHECK
+ * Blank-check XPM memory and report bad locations
+ */
+#define MC_CMD_XPM_BLANK_CHECK 0x108
+#undef MC_CMD_0x108_PRIVILEGE_CTG
+
+#define MC_CMD_0x108_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_XPM_BLANK_CHECK_IN msgrequest */
+#define    MC_CMD_XPM_BLANK_CHECK_IN_LEN 8
+/* Start address (byte) */
+#define       MC_CMD_XPM_BLANK_CHECK_IN_ADDR_OFST 0
+#define       MC_CMD_XPM_BLANK_CHECK_IN_ADDR_LEN 4
+/* Count (bytes) */
+#define       MC_CMD_XPM_BLANK_CHECK_IN_COUNT_OFST 4
+#define       MC_CMD_XPM_BLANK_CHECK_IN_COUNT_LEN 4
+
+/* MC_CMD_XPM_BLANK_CHECK_OUT msgresponse */
+#define    MC_CMD_XPM_BLANK_CHECK_OUT_LENMIN 4
+#define    MC_CMD_XPM_BLANK_CHECK_OUT_LENMAX 252
+#define    MC_CMD_XPM_BLANK_CHECK_OUT_LENMAX_MCDI2 1020
+#define    MC_CMD_XPM_BLANK_CHECK_OUT_LEN(num) (4+2*(num))
+#define    MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_NUM(len) (((len)-4)/2)
+/* Total number of bad (non-blank) locations */
+#define       MC_CMD_XPM_BLANK_CHECK_OUT_BAD_COUNT_OFST 0
+#define       MC_CMD_XPM_BLANK_CHECK_OUT_BAD_COUNT_LEN 4
+/* Addresses of bad locations (may be less than BAD_COUNT, if all cannot fit
+ * into MCDI response)
+ */
+#define       MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_OFST 4
+#define       MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_LEN 2
+#define       MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_MINNUM 0
+#define       MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_MAXNUM 124
+#define       MC_CMD_XPM_BLANK_CHECK_OUT_BAD_ADDR_MAXNUM_MCDI2 508
+
+
+/***********************************/
+/* MC_CMD_XPM_REPAIR
+ * Blank-check and repair XPM memory
+ */
+#define MC_CMD_XPM_REPAIR 0x109
+#undef MC_CMD_0x109_PRIVILEGE_CTG
+
+#define MC_CMD_0x109_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_XPM_REPAIR_IN msgrequest */
+#define    MC_CMD_XPM_REPAIR_IN_LEN 8
+/* Start address (byte) */
+#define       MC_CMD_XPM_REPAIR_IN_ADDR_OFST 0
+#define       MC_CMD_XPM_REPAIR_IN_ADDR_LEN 4
+/* Count (bytes) */
+#define       MC_CMD_XPM_REPAIR_IN_COUNT_OFST 4
+#define       MC_CMD_XPM_REPAIR_IN_COUNT_LEN 4
+
+/* MC_CMD_XPM_REPAIR_OUT msgresponse */
+#define    MC_CMD_XPM_REPAIR_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_XPM_DECODER_TEST
+ * Test XPM memory address decoders for gross manufacturing defects. Can only
+ * be performed on an unprogrammed part.
+ */
+#define MC_CMD_XPM_DECODER_TEST 0x10a
+#undef MC_CMD_0x10a_PRIVILEGE_CTG
+
+#define MC_CMD_0x10a_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_XPM_DECODER_TEST_IN msgrequest */
+#define    MC_CMD_XPM_DECODER_TEST_IN_LEN 0
+
+/* MC_CMD_XPM_DECODER_TEST_OUT msgresponse */
+#define    MC_CMD_XPM_DECODER_TEST_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_XPM_WRITE_TEST
+ * XPM memory write test. Test XPM write logic for gross manufacturing defects
+ * by writing to a dedicated test row. There are 16 locations in the test row
+ * and the test can only be performed on locations that have not been
+ * previously used (i.e. can be run at most 16 times). The test will pick the
+ * first available location to use, or fail with ENOSPC if none left.
+ */
+#define MC_CMD_XPM_WRITE_TEST 0x10b
+#undef MC_CMD_0x10b_PRIVILEGE_CTG
+
+#define MC_CMD_0x10b_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_XPM_WRITE_TEST_IN msgrequest */
+#define    MC_CMD_XPM_WRITE_TEST_IN_LEN 0
+
+/* MC_CMD_XPM_WRITE_TEST_OUT msgresponse */
+#define    MC_CMD_XPM_WRITE_TEST_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_EXEC_SIGNED
+ * Check the CMAC of the contents of IMEM and DMEM against the value supplied
+ * and if correct begin execution from the start of IMEM. The caller supplies a
+ * key ID, the length of IMEM and DMEM to validate and the expected CMAC. CMAC
+ * computation runs from the start of IMEM, and from the start of DMEM + 16k,
+ * to match flash booting. The command will respond with EINVAL if the CMAC
+ * does match, otherwise it will respond with success before it jumps to IMEM.
+ */
+#define MC_CMD_EXEC_SIGNED 0x10c
+#undef MC_CMD_0x10c_PRIVILEGE_CTG
+
+#define MC_CMD_0x10c_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_EXEC_SIGNED_IN msgrequest */
+#define    MC_CMD_EXEC_SIGNED_IN_LEN 28
+/* the length of code to include in the CMAC */
+#define       MC_CMD_EXEC_SIGNED_IN_CODELEN_OFST 0
+#define       MC_CMD_EXEC_SIGNED_IN_CODELEN_LEN 4
+/* the length of date to include in the CMAC */
+#define       MC_CMD_EXEC_SIGNED_IN_DATALEN_OFST 4
+#define       MC_CMD_EXEC_SIGNED_IN_DATALEN_LEN 4
+/* the XPM sector containing the key to use */
+#define       MC_CMD_EXEC_SIGNED_IN_KEYSECTOR_OFST 8
+#define       MC_CMD_EXEC_SIGNED_IN_KEYSECTOR_LEN 4
+/* the expected CMAC value */
+#define       MC_CMD_EXEC_SIGNED_IN_CMAC_OFST 12
+#define       MC_CMD_EXEC_SIGNED_IN_CMAC_LEN 16
+
+/* MC_CMD_EXEC_SIGNED_OUT msgresponse */
+#define    MC_CMD_EXEC_SIGNED_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_PREPARE_SIGNED
+ * Prepare to upload a signed image. This will scrub the specified length of
+ * the data region, which must be at least as large as the DATALEN supplied to
+ * MC_CMD_EXEC_SIGNED.
+ */
+#define MC_CMD_PREPARE_SIGNED 0x10d
+#undef MC_CMD_0x10d_PRIVILEGE_CTG
+
+#define MC_CMD_0x10d_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_PREPARE_SIGNED_IN msgrequest */
+#define    MC_CMD_PREPARE_SIGNED_IN_LEN 4
+/* the length of data area to clear */
+#define       MC_CMD_PREPARE_SIGNED_IN_DATALEN_OFST 0
+#define       MC_CMD_PREPARE_SIGNED_IN_DATALEN_LEN 4
+
+/* MC_CMD_PREPARE_SIGNED_OUT msgresponse */
+#define    MC_CMD_PREPARE_SIGNED_OUT_LEN 0
+
+
+/* TUNNEL_ENCAP_UDP_PORT_ENTRY structuredef */
+#define    TUNNEL_ENCAP_UDP_PORT_ENTRY_LEN 4
+/* UDP port (the standard ports are named below but any port may be used) */
+#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_OFST 0
+#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LEN 2
+/* enum: the IANA allocated UDP port for VXLAN */
+#define          TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_VXLAN_UDP_PORT 0x12b5
+/* enum: the IANA allocated UDP port for Geneve */
+#define          TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_GENEVE_UDP_PORT 0x17c1
+#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LBN 0
+#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_WIDTH 16
+/* tunnel encapsulation protocol (only those named below are supported) */
+#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_OFST 2
+#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LEN 2
+/* enum: This port will be used for VXLAN on both IPv4 and IPv6 */
+#define          TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN 0x0
+/* enum: This port will be used for Geneve on both IPv4 and IPv6 */
+#define          TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE 0x1
+#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LBN 16
+#define       TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_WIDTH 16
+
+
+/***********************************/
+/* MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS
+ * Configure UDP ports for tunnel encapsulation hardware acceleration. The
+ * parser-dispatcher will attempt to parse traffic on these ports as tunnel
+ * encapsulation PDUs and filter them using the tunnel encapsulation filter
+ * chain rather than the standard filter chain. Note that this command can
+ * cause all functions to see a reset. (Available on Medford only.)
+ */
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS 0x117
+#undef MC_CMD_0x117_PRIVILEGE_CTG
+
+#define MC_CMD_0x117_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN msgrequest */
+#define    MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMIN 4
+#define    MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMAX 68
+#define    MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMAX_MCDI2 68
+#define    MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LEN(num) (4+4*(num))
+#define    MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_NUM(len) (((len)-4)/4)
+/* Flags */
+#define       MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS_OFST 0
+#define       MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS_LEN 2
+#define        MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING_OFST 0
+#define        MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING_LBN 0
+#define        MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING_WIDTH 1
+/* The number of entries in the ENTRIES array */
+#define       MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_OFST 2
+#define       MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_LEN 2
+/* Entries defining the UDP port to protocol mapping, each laid out as a
+ * TUNNEL_ENCAP_UDP_PORT_ENTRY
+ */
+#define       MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_OFST 4
+#define       MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_LEN 4
+#define       MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MINNUM 0
+#define       MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM 16
+#define       MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM_MCDI2 16
+
+/* MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT msgresponse */
+#define    MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_LEN 2
+/* Flags */
+#define       MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_FLAGS_OFST 0
+#define       MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_FLAGS_LEN 2
+#define        MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_OFST 0
+#define        MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_LBN 0
+#define        MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_WIDTH 1
+
+
+/***********************************/
+/* MC_CMD_RX_BALANCING
+ * Configure a port upconverter to distribute the packets on both RX engines.
+ * Packets are distributed based on a table with the destination vFIFO. The
+ * index of the table is a hash of source and destination of IPV4 and VLAN
+ * priority.
+ */
+#define MC_CMD_RX_BALANCING 0x118
+#undef MC_CMD_0x118_PRIVILEGE_CTG
+
+#define MC_CMD_0x118_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_RX_BALANCING_IN msgrequest */
+#define    MC_CMD_RX_BALANCING_IN_LEN 16
+/* The RX port whose upconverter table will be modified */
+#define       MC_CMD_RX_BALANCING_IN_PORT_OFST 0
+#define       MC_CMD_RX_BALANCING_IN_PORT_LEN 4
+/* The VLAN priority associated to the table index and vFIFO */
+#define       MC_CMD_RX_BALANCING_IN_PRIORITY_OFST 4
+#define       MC_CMD_RX_BALANCING_IN_PRIORITY_LEN 4
+/* The resulting bit of SRC^DST for indexing the table */
+#define       MC_CMD_RX_BALANCING_IN_SRC_DST_OFST 8
+#define       MC_CMD_RX_BALANCING_IN_SRC_DST_LEN 4
+/* The RX engine to which the vFIFO in the table entry will point to */
+#define       MC_CMD_RX_BALANCING_IN_ENG_OFST 12
+#define       MC_CMD_RX_BALANCING_IN_ENG_LEN 4
+
+/* MC_CMD_RX_BALANCING_OUT msgresponse */
+#define    MC_CMD_RX_BALANCING_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_NVRAM_PRIVATE_APPEND
+ * Append a single TLV to the MC_USAGE_TLV partition. Returns MC_CMD_ERR_EEXIST
+ * if the tag is already present.
+ */
+#define MC_CMD_NVRAM_PRIVATE_APPEND 0x11c
+#undef MC_CMD_0x11c_PRIVILEGE_CTG
+
+#define MC_CMD_0x11c_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_NVRAM_PRIVATE_APPEND_IN msgrequest */
+#define    MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENMIN 9
+#define    MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENMAX 252
+#define    MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENMAX_MCDI2 1020
+#define    MC_CMD_NVRAM_PRIVATE_APPEND_IN_LEN(num) (8+1*(num))
+#define    MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_NUM(len) (((len)-8)/1)
+/* The tag to be appended */
+#define       MC_CMD_NVRAM_PRIVATE_APPEND_IN_TAG_OFST 0
+#define       MC_CMD_NVRAM_PRIVATE_APPEND_IN_TAG_LEN 4
+/* The length of the data */
+#define       MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENGTH_OFST 4
+#define       MC_CMD_NVRAM_PRIVATE_APPEND_IN_LENGTH_LEN 4
+/* The data to be contained in the TLV structure */
+#define       MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_OFST 8
+#define       MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_LEN 1
+#define       MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_MINNUM 1
+#define       MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_MAXNUM 244
+#define       MC_CMD_NVRAM_PRIVATE_APPEND_IN_DATA_BUFFER_MAXNUM_MCDI2 1012
+
+/* MC_CMD_NVRAM_PRIVATE_APPEND_OUT msgresponse */
+#define    MC_CMD_NVRAM_PRIVATE_APPEND_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_XPM_VERIFY_CONTENTS
+ * Verify that the contents of the XPM memory is correct (Medford only). This
+ * is used during manufacture to check that the XPM memory has been programmed
+ * correctly at ATE.
+ */
+#define MC_CMD_XPM_VERIFY_CONTENTS 0x11b
+#undef MC_CMD_0x11b_PRIVILEGE_CTG
+
+#define MC_CMD_0x11b_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_XPM_VERIFY_CONTENTS_IN msgrequest */
+#define    MC_CMD_XPM_VERIFY_CONTENTS_IN_LEN 4
+/* Data type to be checked */
+#define       MC_CMD_XPM_VERIFY_CONTENTS_IN_DATA_TYPE_OFST 0
+#define       MC_CMD_XPM_VERIFY_CONTENTS_IN_DATA_TYPE_LEN 4
+
+/* MC_CMD_XPM_VERIFY_CONTENTS_OUT msgresponse */
+#define    MC_CMD_XPM_VERIFY_CONTENTS_OUT_LENMIN 12
+#define    MC_CMD_XPM_VERIFY_CONTENTS_OUT_LENMAX 252
+#define    MC_CMD_XPM_VERIFY_CONTENTS_OUT_LENMAX_MCDI2 1020
+#define    MC_CMD_XPM_VERIFY_CONTENTS_OUT_LEN(num) (12+1*(num))
+#define    MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_NUM(len) (((len)-12)/1)
+/* Number of sectors found (test builds only) */
+#define       MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_SECTORS_OFST 0
+#define       MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_SECTORS_LEN 4
+/* Number of bytes found (test builds only) */
+#define       MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_BYTES_OFST 4
+#define       MC_CMD_XPM_VERIFY_CONTENTS_OUT_NUM_BYTES_LEN 4
+/* Length of signature */
+#define       MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIG_LENGTH_OFST 8
+#define       MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIG_LENGTH_LEN 4
+/* Signature */
+#define       MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_OFST 12
+#define       MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_LEN 1
+#define       MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_MINNUM 0
+#define       MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_MAXNUM 240
+#define       MC_CMD_XPM_VERIFY_CONTENTS_OUT_SIGNATURE_MAXNUM_MCDI2 1008
+
+
+/***********************************/
+/* MC_CMD_SET_EVQ_TMR
+ * Update the timer load, timer reload and timer mode values for a given EVQ.
+ * The requested timer values (in TMR_LOAD_REQ_NS and TMR_RELOAD_REQ_NS) will
+ * be rounded up to the granularity supported by the hardware, then truncated
+ * to the range supported by the hardware. The resulting value after the
+ * rounding and truncation will be returned to the caller (in TMR_LOAD_ACT_NS
+ * and TMR_RELOAD_ACT_NS).
+ */
+#define MC_CMD_SET_EVQ_TMR 0x120
+#undef MC_CMD_0x120_PRIVILEGE_CTG
+
+#define MC_CMD_0x120_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_SET_EVQ_TMR_IN msgrequest */
+#define    MC_CMD_SET_EVQ_TMR_IN_LEN 16
+/* Function-relative queue instance */
+#define       MC_CMD_SET_EVQ_TMR_IN_INSTANCE_OFST 0
+#define       MC_CMD_SET_EVQ_TMR_IN_INSTANCE_LEN 4
+/* Requested value for timer load (in nanoseconds) */
+#define       MC_CMD_SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS_OFST 4
+#define       MC_CMD_SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS_LEN 4
+/* Requested value for timer reload (in nanoseconds) */
+#define       MC_CMD_SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS_OFST 8
+#define       MC_CMD_SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS_LEN 4
+/* Timer mode. Meanings as per EVQ_TMR_REG.TC_TIMER_VAL */
+#define       MC_CMD_SET_EVQ_TMR_IN_TMR_MODE_OFST 12
+#define       MC_CMD_SET_EVQ_TMR_IN_TMR_MODE_LEN 4
+#define          MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_DIS 0x0 /* enum */
+#define          MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_IMMED_START 0x1 /* enum */
+#define          MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_TRIG_START 0x2 /* enum */
+#define          MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_INT_HLDOFF 0x3 /* enum */
+
+/* MC_CMD_SET_EVQ_TMR_OUT msgresponse */
+#define    MC_CMD_SET_EVQ_TMR_OUT_LEN 8
+/* Actual value for timer load (in nanoseconds) */
+#define       MC_CMD_SET_EVQ_TMR_OUT_TMR_LOAD_ACT_NS_OFST 0
+#define       MC_CMD_SET_EVQ_TMR_OUT_TMR_LOAD_ACT_NS_LEN 4
+/* Actual value for timer reload (in nanoseconds) */
+#define       MC_CMD_SET_EVQ_TMR_OUT_TMR_RELOAD_ACT_NS_OFST 4
+#define       MC_CMD_SET_EVQ_TMR_OUT_TMR_RELOAD_ACT_NS_LEN 4
+
+
+/***********************************/
+/* MC_CMD_GET_EVQ_TMR_PROPERTIES
+ * Query properties about the event queue timers.
+ */
+#define MC_CMD_GET_EVQ_TMR_PROPERTIES 0x122
+#undef MC_CMD_0x122_PRIVILEGE_CTG
+
+#define MC_CMD_0x122_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_EVQ_TMR_PROPERTIES_IN msgrequest */
+#define    MC_CMD_GET_EVQ_TMR_PROPERTIES_IN_LEN 0
+
+/* MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT msgresponse */
+#define    MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_LEN 36
+/* Reserved for future use. */
+#define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_FLAGS_OFST 0
+#define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_FLAGS_LEN 4
+/* For timers updated via writes to EVQ_TMR_REG, this is the time interval (in
+ * nanoseconds) for each increment of the timer load/reload count. The
+ * requested duration of a timer is this value multiplied by the timer
+ * load/reload count.
+ */
+#define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_NS_PER_COUNT_OFST 4
+#define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_NS_PER_COUNT_LEN 4
+/* For timers updated via writes to EVQ_TMR_REG, this is the maximum value
+ * allowed for timer load/reload counts.
+ */
+#define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_MAX_COUNT_OFST 8
+#define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_MAX_COUNT_LEN 4
+/* For timers updated via writes to EVQ_TMR_REG, timer load/reload counts not a
+ * multiple of this step size will be rounded in an implementation defined
+ * manner.
+ */
+#define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_STEP_OFST 12
+#define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_TMR_REG_STEP_LEN 4
+/* Maximum timer duration (in nanoseconds) for timers updated via MCDI. Only
+ * meaningful if MC_CMD_SET_EVQ_TMR is implemented.
+ */
+#define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_MAX_NS_OFST 16
+#define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_MAX_NS_LEN 4
+/* Timer durations requested via MCDI that are not a multiple of this step size
+ * will be rounded up. Only meaningful if MC_CMD_SET_EVQ_TMR is implemented.
+ */
+#define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_STEP_NS_OFST 20
+#define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_MCDI_TMR_STEP_NS_LEN 4
+/* For timers updated using the bug35388 workaround, this is the time interval
+ * (in nanoseconds) for each increment of the timer load/reload count. The
+ * requested duration of a timer is this value multiplied by the timer
+ * load/reload count. This field is only meaningful if the bug35388 workaround
+ * is enabled.
+ */
+#define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_NS_PER_COUNT_OFST 24
+#define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_NS_PER_COUNT_LEN 4
+/* For timers updated using the bug35388 workaround, this is the maximum value
+ * allowed for timer load/reload counts. This field is only meaningful if the
+ * bug35388 workaround is enabled.
+ */
+#define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_MAX_COUNT_OFST 28
+#define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_MAX_COUNT_LEN 4
+/* For timers updated using the bug35388 workaround, timer load/reload counts
+ * not a multiple of this step size will be rounded in an implementation
+ * defined manner. This field is only meaningful if the bug35388 workaround is
+ * enabled.
+ */
+#define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_STEP_OFST 32
+#define       MC_CMD_GET_EVQ_TMR_PROPERTIES_OUT_BUG35388_TMR_STEP_LEN 4
+
+
+/***********************************/
+/* MC_CMD_ALLOCATE_TX_VFIFO_CP
+ * When we use the TX_vFIFO_ULL mode, we can allocate common pools using the
+ * non used switch buffers.
+ */
+#define MC_CMD_ALLOCATE_TX_VFIFO_CP 0x11d
+#undef MC_CMD_0x11d_PRIVILEGE_CTG
+
+#define MC_CMD_0x11d_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_ALLOCATE_TX_VFIFO_CP_IN msgrequest */
+#define    MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_LEN 20
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define       MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INSTANCE_OFST 0
+#define       MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INSTANCE_LEN 4
+/* Will the common pool be used as TX_vFIFO_ULL (1) */
+#define       MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_MODE_OFST 4
+#define       MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_MODE_LEN 4
+#define          MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_ENABLED 0x1 /* enum */
+/* enum: Using this interface without TX_vFIFO_ULL is not supported for now */
+#define          MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_DISABLED 0x0
+/* Number of buffers to reserve for the common pool */
+#define       MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_SIZE_OFST 8
+#define       MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_SIZE_LEN 4
+/* TX datapath to which the Common Pool is connected to. */
+#define       MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INGRESS_OFST 12
+#define       MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_INGRESS_LEN 4
+/* enum: Extracts information from function */
+#define          MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_USE_FUNCTION_VALUE -0x1
+/* Network port or RX Engine to which the common pool connects. */
+#define       MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_EGRESS_OFST 16
+#define       MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_EGRESS_LEN 4
+/* enum: Extracts information from function */
+/*               MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_USE_FUNCTION_VALUE -0x1 */
+#define          MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT0 0x0 /* enum */
+#define          MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT1 0x1 /* enum */
+#define          MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT2 0x2 /* enum */
+#define          MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_PORT3 0x3 /* enum */
+/* enum: To enable Switch loopback with Rx engine 0 */
+#define          MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_RX_ENGINE0 0x4
+/* enum: To enable Switch loopback with Rx engine 1 */
+#define          MC_CMD_ALLOCATE_TX_VFIFO_CP_IN_RX_ENGINE1 0x5
+
+/* MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT msgresponse */
+#define    MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_LEN 4
+/* ID of the common pool allocated */
+#define       MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_CP_ID_OFST 0
+#define       MC_CMD_ALLOCATE_TX_VFIFO_CP_OUT_CP_ID_LEN 4
+
+
+/***********************************/
+/* MC_CMD_ALLOCATE_TX_VFIFO_VFIFO
+ * When we use the TX_vFIFO_ULL mode, we can allocate vFIFOs using the
+ * previously allocated common pools.
+ */
+#define MC_CMD_ALLOCATE_TX_VFIFO_VFIFO 0x11e
+#undef MC_CMD_0x11e_PRIVILEGE_CTG
+
+#define MC_CMD_0x11e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN msgrequest */
+#define    MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_LEN 20
+/* Common pool previously allocated to which the new vFIFO will be associated
+ */
+#define       MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_CP_OFST 0
+#define       MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_CP_LEN 4
+/* Port or RX engine to associate the vFIFO egress */
+#define       MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_EGRESS_OFST 4
+#define       MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_EGRESS_LEN 4
+/* enum: Extracts information from common pool */
+#define          MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_USE_CP_VALUE -0x1
+#define          MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT0 0x0 /* enum */
+#define          MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT1 0x1 /* enum */
+#define          MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT2 0x2 /* enum */
+#define          MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PORT3 0x3 /* enum */
+/* enum: To enable Switch loopback with Rx engine 0 */
+#define          MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_RX_ENGINE0 0x4
+/* enum: To enable Switch loopback with Rx engine 1 */
+#define          MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_RX_ENGINE1 0x5
+/* Minimum number of buffers that the pool must have */
+#define       MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_SIZE_OFST 8
+#define       MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_SIZE_LEN 4
+/* enum: Do not check the space available */
+#define          MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_NO_MINIMUM 0x0
+/* Will the vFIFO be used as TX_vFIFO_ULL */
+#define       MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_MODE_OFST 12
+#define       MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_MODE_LEN 4
+/* Network priority of the vFIFO,if applicable */
+#define       MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PRIORITY_OFST 16
+#define       MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_PRIORITY_LEN 4
+/* enum: Search for the lowest unused priority */
+#define          MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_IN_LOWEST_AVAILABLE -0x1
+
+/* MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT msgresponse */
+#define    MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_LEN 8
+/* Short vFIFO ID */
+#define       MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_VID_OFST 0
+#define       MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_VID_LEN 4
+/* Network priority of the vFIFO */
+#define       MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_PRIORITY_OFST 4
+#define       MC_CMD_ALLOCATE_TX_VFIFO_VFIFO_OUT_PRIORITY_LEN 4
+
+
+/***********************************/
+/* MC_CMD_TEARDOWN_TX_VFIFO_VF
+ * This interface clears the configuration of the given vFIFO and leaves it
+ * ready to be re-used.
+ */
+#define MC_CMD_TEARDOWN_TX_VFIFO_VF 0x11f
+#undef MC_CMD_0x11f_PRIVILEGE_CTG
+
+#define MC_CMD_0x11f_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_TEARDOWN_TX_VFIFO_VF_IN msgrequest */
+#define    MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_LEN 4
+/* Short vFIFO ID */
+#define       MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_VFIFO_OFST 0
+#define       MC_CMD_TEARDOWN_TX_VFIFO_VF_IN_VFIFO_LEN 4
+
+/* MC_CMD_TEARDOWN_TX_VFIFO_VF_OUT msgresponse */
+#define    MC_CMD_TEARDOWN_TX_VFIFO_VF_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_DEALLOCATE_TX_VFIFO_CP
+ * This interface clears the configuration of the given common pool and leaves
+ * it ready to be re-used.
+ */
+#define MC_CMD_DEALLOCATE_TX_VFIFO_CP 0x121
+#undef MC_CMD_0x121_PRIVILEGE_CTG
+
+#define MC_CMD_0x121_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN msgrequest */
+#define    MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_LEN 4
+/* Common pool ID given when pool allocated */
+#define       MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_POOL_ID_OFST 0
+#define       MC_CMD_DEALLOCATE_TX_VFIFO_CP_IN_POOL_ID_LEN 4
+
+/* MC_CMD_DEALLOCATE_TX_VFIFO_CP_OUT msgresponse */
+#define    MC_CMD_DEALLOCATE_TX_VFIFO_CP_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS
+ * This interface allows the host to find out how many common pool buffers are
+ * not yet assigned.
+ */
+#define MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS 0x124
+#undef MC_CMD_0x124_PRIVILEGE_CTG
+
+#define MC_CMD_0x124_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_IN msgrequest */
+#define    MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_IN_LEN 0
+
+/* MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT msgresponse */
+#define    MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_LEN 8
+/* Available buffers for the ENG to NET vFIFOs. */
+#define       MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_NET_OFST 0
+#define       MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_NET_LEN 4
+/* Available buffers for the ENG to ENG and NET to ENG vFIFOs. */
+#define       MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_ENG_OFST 4
+#define       MC_CMD_SWITCH_GET_UNASSIGNED_BUFFERS_OUT_ENG_LEN 4
+
+
+/***********************************/
+/* MC_CMD_SUC_VERSION
+ * Get the version of the SUC
+ */
+#define MC_CMD_SUC_VERSION 0x134
+#undef MC_CMD_0x134_PRIVILEGE_CTG
+
+#define MC_CMD_0x134_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_SUC_VERSION_IN msgrequest */
+#define    MC_CMD_SUC_VERSION_IN_LEN 0
+
+/* MC_CMD_SUC_VERSION_OUT msgresponse */
+#define    MC_CMD_SUC_VERSION_OUT_LEN 24
+/* The SUC firmware version as four numbers - a.b.c.d */
+#define       MC_CMD_SUC_VERSION_OUT_VERSION_OFST 0
+#define       MC_CMD_SUC_VERSION_OUT_VERSION_LEN 4
+#define       MC_CMD_SUC_VERSION_OUT_VERSION_NUM 4
+/* The date, in seconds since the Unix epoch, when the firmware image was
+ * built.
+ */
+#define       MC_CMD_SUC_VERSION_OUT_BUILD_DATE_OFST 16
+#define       MC_CMD_SUC_VERSION_OUT_BUILD_DATE_LEN 4
+/* The ID of the SUC chip. This is specific to the platform but typically
+ * indicates family, memory sizes etc. See SF-116728-SW for further details.
+ */
+#define       MC_CMD_SUC_VERSION_OUT_CHIP_ID_OFST 20
+#define       MC_CMD_SUC_VERSION_OUT_CHIP_ID_LEN 4
+
+/* MC_CMD_SUC_BOOT_VERSION_IN msgrequest: Get the version of the SUC boot
+ * loader.
+ */
+#define    MC_CMD_SUC_BOOT_VERSION_IN_LEN 4
+#define       MC_CMD_SUC_BOOT_VERSION_IN_MAGIC_OFST 0
+#define       MC_CMD_SUC_BOOT_VERSION_IN_MAGIC_LEN 4
+/* enum: Requests the SUC boot version. */
+#define          MC_CMD_SUC_VERSION_GET_BOOT_VERSION 0xb007700b
+
+/* MC_CMD_SUC_BOOT_VERSION_OUT msgresponse */
+#define    MC_CMD_SUC_BOOT_VERSION_OUT_LEN 4
+/* The SUC boot version */
+#define       MC_CMD_SUC_BOOT_VERSION_OUT_VERSION_OFST 0
+#define       MC_CMD_SUC_BOOT_VERSION_OUT_VERSION_LEN 4
+
+
+/***********************************/
+/* MC_CMD_GET_RX_PREFIX_ID
+ * This command is part of the mechanism for configuring the format of the RX
+ * packet prefix. It takes as input a bitmask of the fields the host would like
+ * to be in the prefix. If the hardware supports RX prefixes with that
+ * combination of fields, then this command returns a list of prefix-ids,
+ * opaque identifiers suitable for use in the RX_PREFIX_ID field of a
+ * MC_CMD_INIT_RXQ_V5_IN message. If the combination of fields is not
+ * supported, returns ENOTSUP. If the firmware can't create any new prefix-ids
+ * due to resource constraints, returns ENOSPC.
+ */
+#define MC_CMD_GET_RX_PREFIX_ID 0x13b
+#undef MC_CMD_0x13b_PRIVILEGE_CTG
+
+#define MC_CMD_0x13b_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_RX_PREFIX_ID_IN msgrequest */
+#define    MC_CMD_GET_RX_PREFIX_ID_IN_LEN 8
+/* Field bitmask. */
+#define       MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_OFST 0
+#define       MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_LEN 8
+#define       MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_LO_OFST 0
+#define       MC_CMD_GET_RX_PREFIX_ID_IN_FIELDS_HI_OFST 4
+#define        MC_CMD_GET_RX_PREFIX_ID_IN_LENGTH_OFST 0
+#define        MC_CMD_GET_RX_PREFIX_ID_IN_LENGTH_LBN 0
+#define        MC_CMD_GET_RX_PREFIX_ID_IN_LENGTH_WIDTH 1
+#define        MC_CMD_GET_RX_PREFIX_ID_IN_RSS_HASH_VALID_OFST 0
+#define        MC_CMD_GET_RX_PREFIX_ID_IN_RSS_HASH_VALID_LBN 1
+#define        MC_CMD_GET_RX_PREFIX_ID_IN_RSS_HASH_VALID_WIDTH 1
+#define        MC_CMD_GET_RX_PREFIX_ID_IN_USER_FLAG_OFST 0
+#define        MC_CMD_GET_RX_PREFIX_ID_IN_USER_FLAG_LBN 2
+#define        MC_CMD_GET_RX_PREFIX_ID_IN_USER_FLAG_WIDTH 1
+#define        MC_CMD_GET_RX_PREFIX_ID_IN_CLASS_OFST 0
+#define        MC_CMD_GET_RX_PREFIX_ID_IN_CLASS_LBN 3
+#define        MC_CMD_GET_RX_PREFIX_ID_IN_CLASS_WIDTH 1
+#define        MC_CMD_GET_RX_PREFIX_ID_IN_PARTIAL_TSTAMP_OFST 0
+#define        MC_CMD_GET_RX_PREFIX_ID_IN_PARTIAL_TSTAMP_LBN 4
+#define        MC_CMD_GET_RX_PREFIX_ID_IN_PARTIAL_TSTAMP_WIDTH 1
+#define        MC_CMD_GET_RX_PREFIX_ID_IN_RSS_HASH_OFST 0
+#define        MC_CMD_GET_RX_PREFIX_ID_IN_RSS_HASH_LBN 5
+#define        MC_CMD_GET_RX_PREFIX_ID_IN_RSS_HASH_WIDTH 1
+#define        MC_CMD_GET_RX_PREFIX_ID_IN_USER_MARK_OFST 0
+#define        MC_CMD_GET_RX_PREFIX_ID_IN_USER_MARK_LBN 6
+#define        MC_CMD_GET_RX_PREFIX_ID_IN_USER_MARK_WIDTH 1
+#define        MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_VPORT_OFST 0
+#define        MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_VPORT_LBN 7
+#define        MC_CMD_GET_RX_PREFIX_ID_IN_INGRESS_VPORT_WIDTH 1
+#define        MC_CMD_GET_RX_PREFIX_ID_IN_CSUM_FRAME_OFST 0
+#define        MC_CMD_GET_RX_PREFIX_ID_IN_CSUM_FRAME_LBN 8
+#define        MC_CMD_GET_RX_PREFIX_ID_IN_CSUM_FRAME_WIDTH 1
+#define        MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIP_TCI_OFST 0
+#define        MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIP_TCI_LBN 9
+#define        MC_CMD_GET_RX_PREFIX_ID_IN_VLAN_STRIP_TCI_WIDTH 1
+
+/* MC_CMD_GET_RX_PREFIX_ID_OUT msgresponse */
+#define    MC_CMD_GET_RX_PREFIX_ID_OUT_LENMIN 8
+#define    MC_CMD_GET_RX_PREFIX_ID_OUT_LENMAX 252
+#define    MC_CMD_GET_RX_PREFIX_ID_OUT_LENMAX_MCDI2 1020
+#define    MC_CMD_GET_RX_PREFIX_ID_OUT_LEN(num) (4+4*(num))
+#define    MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_NUM(len) (((len)-4)/4)
+/* Number of prefix-ids returned */
+#define       MC_CMD_GET_RX_PREFIX_ID_OUT_NUM_RX_PREFIX_IDS_OFST 0
+#define       MC_CMD_GET_RX_PREFIX_ID_OUT_NUM_RX_PREFIX_IDS_LEN 4
+/* Opaque prefix identifiers which can be passed into MC_CMD_INIT_RXQ_V5 or
+ * MC_CMD_QUERY_PREFIX_ID
+ */
+#define       MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_OFST 4
+#define       MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_LEN 4
+#define       MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_MINNUM 1
+#define       MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_MAXNUM 62
+#define       MC_CMD_GET_RX_PREFIX_ID_OUT_RX_PREFIX_ID_MAXNUM_MCDI2 254
+
+/* RX_PREFIX_FIELD_INFO structuredef: Information about a single RX prefix
+ * field
+ */
+#define    RX_PREFIX_FIELD_INFO_LEN 4
+/* The offset of the field from the start of the prefix, in bits */
+#define       RX_PREFIX_FIELD_INFO_OFFSET_BITS_OFST 0
+#define       RX_PREFIX_FIELD_INFO_OFFSET_BITS_LEN 2
+#define       RX_PREFIX_FIELD_INFO_OFFSET_BITS_LBN 0
+#define       RX_PREFIX_FIELD_INFO_OFFSET_BITS_WIDTH 16
+/* The width of the field, in bits */
+#define       RX_PREFIX_FIELD_INFO_WIDTH_BITS_OFST 2
+#define       RX_PREFIX_FIELD_INFO_WIDTH_BITS_LEN 1
+#define       RX_PREFIX_FIELD_INFO_WIDTH_BITS_LBN 16
+#define       RX_PREFIX_FIELD_INFO_WIDTH_BITS_WIDTH 8
+/* The type of the field. These enum values are in the same order as the fields
+ * in the MC_CMD_GET_RX_PREFIX_ID_IN bitmask
+ */
+#define       RX_PREFIX_FIELD_INFO_TYPE_OFST 3
+#define       RX_PREFIX_FIELD_INFO_TYPE_LEN 1
+#define          RX_PREFIX_FIELD_INFO_LENGTH 0x0 /* enum */
+#define          RX_PREFIX_FIELD_INFO_RSS_HASH_VALID 0x1 /* enum */
+#define          RX_PREFIX_FIELD_INFO_USER_FLAG 0x2 /* enum */
+#define          RX_PREFIX_FIELD_INFO_CLASS 0x3 /* enum */
+#define          RX_PREFIX_FIELD_INFO_PARTIAL_TSTAMP 0x4 /* enum */
+#define          RX_PREFIX_FIELD_INFO_RSS_HASH 0x5 /* enum */
+#define          RX_PREFIX_FIELD_INFO_USER_MARK 0x6 /* enum */
+#define          RX_PREFIX_FIELD_INFO_INGRESS_VPORT 0x7 /* enum */
+#define          RX_PREFIX_FIELD_INFO_CSUM_FRAME 0x8 /* enum */
+#define          RX_PREFIX_FIELD_INFO_VLAN_STRIP_TCI 0x9 /* enum */
+#define       RX_PREFIX_FIELD_INFO_TYPE_LBN 24
+#define       RX_PREFIX_FIELD_INFO_TYPE_WIDTH 8
+
+/* RX_PREFIX_FIXED_RESPONSE structuredef: Information about an RX prefix in
+ * which every field has a fixed offset and width
+ */
+#define    RX_PREFIX_FIXED_RESPONSE_LENMIN 4
+#define    RX_PREFIX_FIXED_RESPONSE_LENMAX 252
+#define    RX_PREFIX_FIXED_RESPONSE_LENMAX_MCDI2 1020
+#define    RX_PREFIX_FIXED_RESPONSE_LEN(num) (4+4*(num))
+#define    RX_PREFIX_FIXED_RESPONSE_FIELDS_NUM(len) (((len)-4)/4)
+/* Length of the RX prefix in bytes */
+#define       RX_PREFIX_FIXED_RESPONSE_PREFIX_LENGTH_BYTES_OFST 0
+#define       RX_PREFIX_FIXED_RESPONSE_PREFIX_LENGTH_BYTES_LEN 1
+#define       RX_PREFIX_FIXED_RESPONSE_PREFIX_LENGTH_BYTES_LBN 0
+#define       RX_PREFIX_FIXED_RESPONSE_PREFIX_LENGTH_BYTES_WIDTH 8
+/* Number of fields present in the prefix */
+#define       RX_PREFIX_FIXED_RESPONSE_FIELD_COUNT_OFST 1
+#define       RX_PREFIX_FIXED_RESPONSE_FIELD_COUNT_LEN 1
+#define       RX_PREFIX_FIXED_RESPONSE_FIELD_COUNT_LBN 8
+#define       RX_PREFIX_FIXED_RESPONSE_FIELD_COUNT_WIDTH 8
+#define       RX_PREFIX_FIXED_RESPONSE_RESERVED_OFST 2
+#define       RX_PREFIX_FIXED_RESPONSE_RESERVED_LEN 2
+#define       RX_PREFIX_FIXED_RESPONSE_RESERVED_LBN 16
+#define       RX_PREFIX_FIXED_RESPONSE_RESERVED_WIDTH 16
+/* Array of RX_PREFIX_FIELD_INFO structures, of length FIELD_COUNT */
+#define       RX_PREFIX_FIXED_RESPONSE_FIELDS_OFST 4
+#define       RX_PREFIX_FIXED_RESPONSE_FIELDS_LEN 4
+#define       RX_PREFIX_FIXED_RESPONSE_FIELDS_MINNUM 0
+#define       RX_PREFIX_FIXED_RESPONSE_FIELDS_MAXNUM 62
+#define       RX_PREFIX_FIXED_RESPONSE_FIELDS_MAXNUM_MCDI2 254
+#define       RX_PREFIX_FIXED_RESPONSE_FIELDS_LBN 32
+#define       RX_PREFIX_FIXED_RESPONSE_FIELDS_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_QUERY_RX_PREFIX_ID
+ * This command takes an RX prefix id (obtained from MC_CMD_GET_RX_PREFIX_ID)
+ * and returns a description of the RX prefix of packets delievered to an RXQ
+ * created with that prefix id
+ */
+#define MC_CMD_QUERY_RX_PREFIX_ID 0x13c
+#undef MC_CMD_0x13c_PRIVILEGE_CTG
+
+#define MC_CMD_0x13c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_QUERY_RX_PREFIX_ID_IN msgrequest */
+#define    MC_CMD_QUERY_RX_PREFIX_ID_IN_LEN 4
+/* Prefix id to query */
+#define       MC_CMD_QUERY_RX_PREFIX_ID_IN_RX_PREFIX_ID_OFST 0
+#define       MC_CMD_QUERY_RX_PREFIX_ID_IN_RX_PREFIX_ID_LEN 4
+
+/* MC_CMD_QUERY_RX_PREFIX_ID_OUT msgresponse */
+#define    MC_CMD_QUERY_RX_PREFIX_ID_OUT_LENMIN 4
+#define    MC_CMD_QUERY_RX_PREFIX_ID_OUT_LENMAX 252
+#define    MC_CMD_QUERY_RX_PREFIX_ID_OUT_LENMAX_MCDI2 1020
+#define    MC_CMD_QUERY_RX_PREFIX_ID_OUT_LEN(num) (4+1*(num))
+#define    MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_NUM(len) (((len)-4)/1)
+/* An enum describing the structure of this response. */
+#define       MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_TYPE_OFST 0
+#define       MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_TYPE_LEN 1
+/* enum: The response is of format RX_PREFIX_FIXED_RESPONSE */
+#define          MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_TYPE_FIXED 0x0
+#define       MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESERVED_OFST 1
+#define       MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESERVED_LEN 3
+/* The response. Its format is as defined by the RESPONSE_TYPE value */
+#define       MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_OFST 4
+#define       MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_LEN 1
+#define       MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_MINNUM 0
+#define       MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_MAXNUM 248
+#define       MC_CMD_QUERY_RX_PREFIX_ID_OUT_RESPONSE_MAXNUM_MCDI2 1016
+
+
+/***********************************/
+/* MC_CMD_BUNDLE
+ * A command to perform various bundle-related operations on insecure cards.
+ */
+#define MC_CMD_BUNDLE 0x13d
+#undef MC_CMD_0x13d_PRIVILEGE_CTG
+
+#define MC_CMD_0x13d_PRIVILEGE_CTG SRIOV_CTG_INSECURE
+
+/* MC_CMD_BUNDLE_IN msgrequest */
+#define    MC_CMD_BUNDLE_IN_LEN 4
+/* Sub-command code */
+#define       MC_CMD_BUNDLE_IN_OP_OFST 0
+#define       MC_CMD_BUNDLE_IN_OP_LEN 4
+/* enum: Get the current host access mode set on component partitions. */
+#define          MC_CMD_BUNDLE_IN_OP_COMPONENT_ACCESS_GET 0x0
+/* enum: Set the host access mode set on component partitions. */
+#define          MC_CMD_BUNDLE_IN_OP_COMPONENT_ACCESS_SET 0x1
+
+/* MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_IN msgrequest: Retrieve the current
+ * access mode on component partitions such as MC_FIRMWARE, SUC_FIRMWARE and
+ * EXPANSION_UEFI. This command only works on engineering (insecure) cards. On
+ * secure adapters, this command returns MC_CMD_ERR_EPERM.
+ */
+#define    MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_IN_LEN 4
+/* Sub-command code. Must be OP_COMPONENT_ACCESS_GET. */
+#define       MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_IN_OP_OFST 0
+#define       MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_IN_OP_LEN 4
+
+/* MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_OUT msgresponse: Returns the access
+ * control mode.
+ */
+#define    MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_OUT_LEN 4
+/* Access mode of component partitions. */
+#define       MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_OUT_ACCESS_MODE_OFST 0
+#define       MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_OUT_ACCESS_MODE_LEN 4
+/* enum: Component partitions are read-only from the host. */
+#define          MC_CMD_BUNDLE_COMPONENTS_READ_ONLY 0x0
+/* enum: Component partitions can read read-from written-to by the host. */
+#define          MC_CMD_BUNDLE_COMPONENTS_READ_WRITE 0x1
+
+/* MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN msgrequest: The component
+ * partitions such as MC_FIRMWARE, SUC_FIRMWARE, EXPANSION_UEFI are set as
+ * read-only on firmware built with bundle support. This command marks these
+ * partitions as read/writeable. The access status set by this command does not
+ * persist across MC reboots. This command only works on engineering (insecure)
+ * cards. On secure adapters, this command returns MC_CMD_ERR_EPERM.
+ */
+#define    MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN_LEN 8
+/* Sub-command code. Must be OP_COMPONENT_ACCESS_SET. */
+#define       MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN_OP_OFST 0
+#define       MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN_OP_LEN 4
+/* Access mode of component partitions. */
+#define       MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN_ACCESS_MODE_OFST 4
+#define       MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_IN_ACCESS_MODE_LEN 4
+/*            Enum values, see field(s): */
+/*               MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_GET_OUT/ACCESS_MODE */
+
+/* MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_OUT msgresponse */
+#define    MC_CMD_BUNDLE_OP_COMPONENT_ACCESS_SET_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_VPD
+ * Read all VPD starting from a given address
+ */
+#define MC_CMD_GET_VPD 0x165
+#undef MC_CMD_0x165_PRIVILEGE_CTG
+
+#define MC_CMD_0x165_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_VPD_IN msgresponse */
+#define    MC_CMD_GET_VPD_IN_LEN 4
+/* VPD address to start from. In case VPD is longer than MCDI buffer
+ * (unlikely), user can make multiple calls with different starting addresses.
+ */
+#define       MC_CMD_GET_VPD_IN_ADDR_OFST 0
+#define       MC_CMD_GET_VPD_IN_ADDR_LEN 4
+
+/* MC_CMD_GET_VPD_OUT msgresponse */
+#define    MC_CMD_GET_VPD_OUT_LENMIN 0
+#define    MC_CMD_GET_VPD_OUT_LENMAX 252
+#define    MC_CMD_GET_VPD_OUT_LENMAX_MCDI2 1020
+#define    MC_CMD_GET_VPD_OUT_LEN(num) (0+1*(num))
+#define    MC_CMD_GET_VPD_OUT_DATA_NUM(len) (((len)-0)/1)
+/* VPD data returned. */
+#define       MC_CMD_GET_VPD_OUT_DATA_OFST 0
+#define       MC_CMD_GET_VPD_OUT_DATA_LEN 1
+#define       MC_CMD_GET_VPD_OUT_DATA_MINNUM 0
+#define       MC_CMD_GET_VPD_OUT_DATA_MAXNUM 252
+#define       MC_CMD_GET_VPD_OUT_DATA_MAXNUM_MCDI2 1020
+
+
+/***********************************/
+/* MC_CMD_GET_NCSI_INFO
+ * Provide information about the NC-SI stack
+ */
+#define MC_CMD_GET_NCSI_INFO 0x167
+#undef MC_CMD_0x167_PRIVILEGE_CTG
+
+#define MC_CMD_0x167_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_NCSI_INFO_IN msgrequest */
+#define    MC_CMD_GET_NCSI_INFO_IN_LEN 8
+/* Operation to be performed */
+#define       MC_CMD_GET_NCSI_INFO_IN_OP_OFST 0
+#define       MC_CMD_GET_NCSI_INFO_IN_OP_LEN 4
+/* enum: Information on the link settings. */
+#define          MC_CMD_GET_NCSI_INFO_IN_OP_LINK 0x0
+/* enum: Statistics associated with the channel */
+#define          MC_CMD_GET_NCSI_INFO_IN_OP_STATISTICS 0x1
+/* The NC-SI channel on which the operation is to be performed */
+#define       MC_CMD_GET_NCSI_INFO_IN_CHANNEL_OFST 4
+#define       MC_CMD_GET_NCSI_INFO_IN_CHANNEL_LEN 4
+
+/* MC_CMD_GET_NCSI_INFO_LINK_OUT msgresponse */
+#define    MC_CMD_GET_NCSI_INFO_LINK_OUT_LEN 12
+/* Settings as received from BMC. */
+#define       MC_CMD_GET_NCSI_INFO_LINK_OUT_SETTINGS_OFST 0
+#define       MC_CMD_GET_NCSI_INFO_LINK_OUT_SETTINGS_LEN 4
+/* Advertised capabilities applied to channel. */
+#define       MC_CMD_GET_NCSI_INFO_LINK_OUT_ADV_CAP_OFST 4
+#define       MC_CMD_GET_NCSI_INFO_LINK_OUT_ADV_CAP_LEN 4
+/* General status */
+#define       MC_CMD_GET_NCSI_INFO_LINK_OUT_STATUS_OFST 8
+#define       MC_CMD_GET_NCSI_INFO_LINK_OUT_STATUS_LEN 4
+#define        MC_CMD_GET_NCSI_INFO_LINK_OUT_STATE_OFST 8
+#define        MC_CMD_GET_NCSI_INFO_LINK_OUT_STATE_LBN 0
+#define        MC_CMD_GET_NCSI_INFO_LINK_OUT_STATE_WIDTH 2
+#define        MC_CMD_GET_NCSI_INFO_LINK_OUT_ENABLE_OFST 8
+#define        MC_CMD_GET_NCSI_INFO_LINK_OUT_ENABLE_LBN 2
+#define        MC_CMD_GET_NCSI_INFO_LINK_OUT_ENABLE_WIDTH 1
+#define        MC_CMD_GET_NCSI_INFO_LINK_OUT_NETWORK_TX_OFST 8
+#define        MC_CMD_GET_NCSI_INFO_LINK_OUT_NETWORK_TX_LBN 3
+#define        MC_CMD_GET_NCSI_INFO_LINK_OUT_NETWORK_TX_WIDTH 1
+#define        MC_CMD_GET_NCSI_INFO_LINK_OUT_ATTACHED_OFST 8
+#define        MC_CMD_GET_NCSI_INFO_LINK_OUT_ATTACHED_LBN 4
+#define        MC_CMD_GET_NCSI_INFO_LINK_OUT_ATTACHED_WIDTH 1
+
+/* MC_CMD_GET_NCSI_INFO_STATISTICS_OUT msgresponse */
+#define    MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_LEN 28
+/* The number of NC-SI commands received. */
+#define       MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMDS_RX_OFST 0
+#define       MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMDS_RX_LEN 4
+/* The number of NC-SI commands dropped. */
+#define       MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_PKTS_DROPPED_OFST 4
+#define       MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_PKTS_DROPPED_LEN 4
+/* The number of invalid NC-SI commands received. */
+#define       MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMD_TYPE_ERRS_OFST 8
+#define       MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMD_TYPE_ERRS_LEN 4
+/* The number of checksum errors seen. */
+#define       MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMD_CSUM_ERRS_OFST 12
+#define       MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_CMD_CSUM_ERRS_LEN 4
+/* The number of NC-SI requests received. */
+#define       MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_RX_PKTS_OFST 16
+#define       MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_RX_PKTS_LEN 4
+/* The number of NC-SI responses sent (includes AENs) */
+#define       MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_TX_PKTS_OFST 20
+#define       MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_NCSI_TX_PKTS_LEN 4
+/* The number of NC-SI AENs sent */
+#define       MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_AENS_SENT_OFST 24
+#define       MC_CMD_GET_NCSI_INFO_STATISTICS_OUT_AENS_SENT_LEN 4
+
+
+/* CLOCK_INFO structuredef: Information about a single hardware clock */
+#define    CLOCK_INFO_LEN 28
+/* Enumeration that uniquely identifies the clock */
+#define       CLOCK_INFO_CLOCK_ID_OFST 0
+#define       CLOCK_INFO_CLOCK_ID_LEN 2
+/* enum: The Riverhead CMC (card MC) */
+#define          CLOCK_INFO_CLOCK_CMC 0x0
+/* enum: The Riverhead NMC (network MC) */
+#define          CLOCK_INFO_CLOCK_NMC 0x1
+/* enum: The Riverhead SDNET slice main logic */
+#define          CLOCK_INFO_CLOCK_SDNET 0x2
+/* enum: The Riverhead SDNET LUT */
+#define          CLOCK_INFO_CLOCK_SDNET_LUT 0x3
+/* enum: The Riverhead SDNET control logic */
+#define          CLOCK_INFO_CLOCK_SDNET_CTRL 0x4
+/* enum: The Riverhead Streaming SubSystem */
+#define          CLOCK_INFO_CLOCK_SSS 0x5
+/* enum: The Riverhead network MAC and associated CSR registers */
+#define          CLOCK_INFO_CLOCK_MAC 0x6
+#define       CLOCK_INFO_CLOCK_ID_LBN 0
+#define       CLOCK_INFO_CLOCK_ID_WIDTH 16
+/* Assorted flags */
+#define       CLOCK_INFO_FLAGS_OFST 2
+#define       CLOCK_INFO_FLAGS_LEN 2
+#define        CLOCK_INFO_SETTABLE_OFST 2
+#define        CLOCK_INFO_SETTABLE_LBN 0
+#define        CLOCK_INFO_SETTABLE_WIDTH 1
+#define       CLOCK_INFO_FLAGS_LBN 16
+#define       CLOCK_INFO_FLAGS_WIDTH 16
+/* The frequency in HZ */
+#define       CLOCK_INFO_FREQUENCY_OFST 4
+#define       CLOCK_INFO_FREQUENCY_LEN 8
+#define       CLOCK_INFO_FREQUENCY_LO_OFST 4
+#define       CLOCK_INFO_FREQUENCY_HI_OFST 8
+#define       CLOCK_INFO_FREQUENCY_LBN 32
+#define       CLOCK_INFO_FREQUENCY_WIDTH 64
+/* Human-readable ASCII name for clock, with NUL termination */
+#define       CLOCK_INFO_NAME_OFST 12
+#define       CLOCK_INFO_NAME_LEN 1
+#define       CLOCK_INFO_NAME_NUM 16
+#define       CLOCK_INFO_NAME_LBN 96
+#define       CLOCK_INFO_NAME_WIDTH 8
+
+
+/***********************************/
+/* MC_CMD_GET_CLOCKS_INFO
+ * Get information about the device clocks
+ */
+#define MC_CMD_GET_CLOCKS_INFO 0x166
+#undef MC_CMD_0x166_PRIVILEGE_CTG
+
+#define MC_CMD_0x166_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_CLOCKS_INFO_IN msgrequest */
+#define    MC_CMD_GET_CLOCKS_INFO_IN_LEN 0
+
+/* MC_CMD_GET_CLOCKS_INFO_OUT msgresponse */
+#define    MC_CMD_GET_CLOCKS_INFO_OUT_LENMIN 0
+#define    MC_CMD_GET_CLOCKS_INFO_OUT_LENMAX 252
+#define    MC_CMD_GET_CLOCKS_INFO_OUT_LENMAX_MCDI2 1008
+#define    MC_CMD_GET_CLOCKS_INFO_OUT_LEN(num) (0+28*(num))
+#define    MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_NUM(len) (((len)-0)/28)
+/* An array of CLOCK_INFO structures. */
+#define       MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_OFST 0
+#define       MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_LEN 28
+#define       MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_MINNUM 0
+#define       MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_MAXNUM 9
+#define       MC_CMD_GET_CLOCKS_INFO_OUT_INFOS_MAXNUM_MCDI2 36
+
+
+/***********************************/
+/* MC_CMD_VNIC_ENCAP_RULE_ADD
+ * Add a rule for detecting encapsulations in the VNIC stage. Currently this only affects checksum validation in VNIC RX - on TX the send descriptor explicitly specifies encapsulation. These rules are per-VNIC, i.e. only apply to the current driver. If a rule matches, then the packet is considered to have the corresponding encapsulation type, and the inner packet is parsed. It is up to the driver to ensure that overlapping rules are not inserted. (If a packet would match multiple rules, a random one of them will be used.) A rule with the exact same match criteria may not be inserted twice (EALREADY). Only a limited number MATCH_FLAGS values are supported, use MC_CMD_GET_PARSER_DISP_INFO with OP OP_GET_SUPPORTED_VNIC_ENCAP_RULE_MATCHES to get a list of supported combinations. Each driver may only have a limited set of active rules - returns ENOSPC if the caller's table is full.
+ */
+#define MC_CMD_VNIC_ENCAP_RULE_ADD 0x16d
+#undef MC_CMD_0x16d_PRIVILEGE_CTG
+
+#define MC_CMD_0x16d_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VNIC_ENCAP_RULE_ADD_IN msgrequest */
+#define    MC_CMD_VNIC_ENCAP_RULE_ADD_IN_LEN 36
+/* Set to MAE_MPORT_SELECTOR_ASSIGNED. In the future this may be relaxed. */
+#define       MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MPORT_SELECTOR_OFST 0
+#define       MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MPORT_SELECTOR_LEN 4
+/* Any non-zero bits other than the ones named below or an unsupported
+ * combination will cause the NIC to return EOPNOTSUPP. In the future more
+ * flags may be added.
+ */
+#define       MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_FLAGS_OFST 4
+#define       MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_FLAGS_LEN 4
+#define        MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_ETHER_TYPE_OFST 4
+#define        MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_ETHER_TYPE_LBN 0
+#define        MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_ETHER_TYPE_WIDTH 1
+#define        MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_OUTER_VLAN_OFST 4
+#define        MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_OUTER_VLAN_LBN 1
+#define        MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_OUTER_VLAN_WIDTH 1
+#define        MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_IP_OFST 4
+#define        MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_IP_LBN 2
+#define        MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_IP_WIDTH 1
+#define        MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_IP_PROTO_OFST 4
+#define        MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_IP_PROTO_LBN 3
+#define        MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_IP_PROTO_WIDTH 1
+#define        MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_PORT_OFST 4
+#define        MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_PORT_LBN 4
+#define        MC_CMD_VNIC_ENCAP_RULE_ADD_IN_MATCH_DST_PORT_WIDTH 1
+/* Only if MATCH_ETHER_TYPE is set. Ethertype value as bytes in network order.
+ * Currently only IPv4 (0x0800) and IPv6 (0x86DD) ethertypes may be used.
+ */
+#define       MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ETHER_TYPE_OFST 8
+#define       MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ETHER_TYPE_LEN 2
+/* Only if MATCH_OUTER_VLAN is set. VID value as bytes in network order.
+ * (Deprecated)
+ */
+#define       MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_LBN 80
+#define       MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_WIDTH 12
+/* Only if MATCH_OUTER_VLAN is set. Aligned wrapper for OUTER_VLAN_VID. */
+#define       MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_WORD_OFST 10
+#define       MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_WORD_LEN 2
+#define        MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_VID_OFST 10
+#define        MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_VID_LBN 0
+#define        MC_CMD_VNIC_ENCAP_RULE_ADD_IN_OUTER_VLAN_VID_WIDTH 12
+/* Only if MATCH_DST_IP is set. IP address as bytes in network order. In the
+ * case of IPv4, the IP should be in the first 4 bytes and all other bytes
+ * should be zero.
+ */
+#define       MC_CMD_VNIC_ENCAP_RULE_ADD_IN_DST_IP_OFST 12
+#define       MC_CMD_VNIC_ENCAP_RULE_ADD_IN_DST_IP_LEN 16
+/* Only if MATCH_IP_PROTO is set. Currently only UDP proto (17) may be used. */
+#define       MC_CMD_VNIC_ENCAP_RULE_ADD_IN_IP_PROTO_OFST 28
+#define       MC_CMD_VNIC_ENCAP_RULE_ADD_IN_IP_PROTO_LEN 1
+/* Actions that should be applied to packets match the rule. */
+#define       MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ACTION_FLAGS_OFST 29
+#define       MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ACTION_FLAGS_LEN 1
+#define        MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STRIP_OUTER_VLAN_OFST 29
+#define        MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STRIP_OUTER_VLAN_LBN 0
+#define        MC_CMD_VNIC_ENCAP_RULE_ADD_IN_STRIP_OUTER_VLAN_WIDTH 1
+/* Only if MATCH_DST_PORT is set. Port number as bytes in network order. */
+#define       MC_CMD_VNIC_ENCAP_RULE_ADD_IN_DST_PORT_OFST 30
+#define       MC_CMD_VNIC_ENCAP_RULE_ADD_IN_DST_PORT_LEN 2
+/* Resulting encapsulation type, as per MAE_MCDI_ENCAP_TYPE enumeration. */
+#define       MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ENCAP_TYPE_OFST 32
+#define       MC_CMD_VNIC_ENCAP_RULE_ADD_IN_ENCAP_TYPE_LEN 4
+
+/* MC_CMD_VNIC_ENCAP_RULE_ADD_OUT msgresponse */
+#define    MC_CMD_VNIC_ENCAP_RULE_ADD_OUT_LEN 4
+/* Handle to inserted rule. Used for removing the rule. */
+#define       MC_CMD_VNIC_ENCAP_RULE_ADD_OUT_HANDLE_OFST 0
+#define       MC_CMD_VNIC_ENCAP_RULE_ADD_OUT_HANDLE_LEN 4
+
+
+/***********************************/
+/* MC_CMD_VNIC_ENCAP_RULE_REMOVE
+ * Remove a VNIC encapsulation rule. Packets which would have previously matched the rule will then be considered as unencapsulated. Returns EALREADY if the input HANDLE doesn't correspond to an existing rule.
+ */
+#define MC_CMD_VNIC_ENCAP_RULE_REMOVE 0x16e
+#undef MC_CMD_0x16e_PRIVILEGE_CTG
+
+#define MC_CMD_0x16e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VNIC_ENCAP_RULE_REMOVE_IN msgrequest */
+#define    MC_CMD_VNIC_ENCAP_RULE_REMOVE_IN_LEN 4
+/* Handle which was returned by MC_CMD_VNIC_ENCAP_RULE_ADD. */
+#define       MC_CMD_VNIC_ENCAP_RULE_REMOVE_IN_HANDLE_OFST 0
+#define       MC_CMD_VNIC_ENCAP_RULE_REMOVE_IN_HANDLE_LEN 4
+
+/* MC_CMD_VNIC_ENCAP_RULE_REMOVE_OUT msgresponse */
+#define    MC_CMD_VNIC_ENCAP_RULE_REMOVE_OUT_LEN 0
+
+/* FUNCTION_PERSONALITY structuredef: The meanings of the personalities are
+ * defined in SF-120734-TC with more information in SF-122717-TC.
+ */
+#define    FUNCTION_PERSONALITY_LEN 4
+#define       FUNCTION_PERSONALITY_ID_OFST 0
+#define       FUNCTION_PERSONALITY_ID_LEN 4
+/* enum: Function has no assigned personality */
+#define          FUNCTION_PERSONALITY_NULL 0x0
+/* enum: Function has an EF100-style function control window and VI windows
+ * with both EF100 and vDPA doorbells.
+ */
+#define          FUNCTION_PERSONALITY_EF100 0x1
+/* enum: Function has virtio net device configuration registers and doorbells
+ * for virtio queue pairs.
+ */
+#define          FUNCTION_PERSONALITY_VIRTIO_NET 0x2
+/* enum: Function has virtio block device configuration registers and a
+ * doorbell for a single virtqueue.
+ */
+#define          FUNCTION_PERSONALITY_VIRTIO_BLK 0x3
+/* enum: Function is a Xilinx acceleration device - management function */
+#define          FUNCTION_PERSONALITY_ACCEL_MGMT 0x4
+/* enum: Function is a Xilinx acceleration device - user function */
+#define          FUNCTION_PERSONALITY_ACCEL_USR 0x5
+#define       FUNCTION_PERSONALITY_ID_LBN 0
+#define       FUNCTION_PERSONALITY_ID_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_VIRTIO_GET_FEATURES
+ * Get a list of the virtio features supported by the device.
+ */
+#define MC_CMD_VIRTIO_GET_FEATURES 0x168
+#undef MC_CMD_0x168_PRIVILEGE_CTG
+
+#define MC_CMD_0x168_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VIRTIO_GET_FEATURES_IN msgrequest */
+#define    MC_CMD_VIRTIO_GET_FEATURES_IN_LEN 4
+/* Type of device to get features for. Matches the device id as defined by the
+ * virtio spec.
+ */
+#define       MC_CMD_VIRTIO_GET_FEATURES_IN_DEVICE_ID_OFST 0
+#define       MC_CMD_VIRTIO_GET_FEATURES_IN_DEVICE_ID_LEN 4
+/* enum: Reserved. Do not use. */
+#define          MC_CMD_VIRTIO_GET_FEATURES_IN_RESERVED 0x0
+/* enum: Net device. */
+#define          MC_CMD_VIRTIO_GET_FEATURES_IN_NET 0x1
+/* enum: Block device. */
+#define          MC_CMD_VIRTIO_GET_FEATURES_IN_BLOCK 0x2
+
+/* MC_CMD_VIRTIO_GET_FEATURES_OUT msgresponse */
+#define    MC_CMD_VIRTIO_GET_FEATURES_OUT_LEN 8
+/* Features supported by the device. The result is a bitfield in the format of
+ * the feature bits of the specified device type as defined in the virtIO 1.1
+ * specification ( https://docs.oasis-
+ * open.org/virtio/virtio/v1.1/csprd01/virtio-v1.1-csprd01.pdf )
+ */
+#define       MC_CMD_VIRTIO_GET_FEATURES_OUT_FEATURES_OFST 0
+#define       MC_CMD_VIRTIO_GET_FEATURES_OUT_FEATURES_LEN 8
+#define       MC_CMD_VIRTIO_GET_FEATURES_OUT_FEATURES_LO_OFST 0
+#define       MC_CMD_VIRTIO_GET_FEATURES_OUT_FEATURES_HI_OFST 4
+
+
+/***********************************/
+/* MC_CMD_VIRTIO_TEST_FEATURES
+ * Query whether a given set of features is supported. Fails with ENOSUP if the
+ * driver requests a feature the device doesn't support. Fails with EINVAL if
+ * the driver fails to request a feature which the device requires.
+ */
+#define MC_CMD_VIRTIO_TEST_FEATURES 0x169
+#undef MC_CMD_0x169_PRIVILEGE_CTG
+
+#define MC_CMD_0x169_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VIRTIO_TEST_FEATURES_IN msgrequest */
+#define    MC_CMD_VIRTIO_TEST_FEATURES_IN_LEN 16
+/* Type of device to test features for. Matches the device id as defined by the
+ * virtio spec.
+ */
+#define       MC_CMD_VIRTIO_TEST_FEATURES_IN_DEVICE_ID_OFST 0
+#define       MC_CMD_VIRTIO_TEST_FEATURES_IN_DEVICE_ID_LEN 4
+/*            Enum values, see field(s): */
+/*               MC_CMD_VIRTIO_GET_FEATURES/MC_CMD_VIRTIO_GET_FEATURES_IN/DEVICE_ID */
+#define       MC_CMD_VIRTIO_TEST_FEATURES_IN_RESERVED_OFST 4
+#define       MC_CMD_VIRTIO_TEST_FEATURES_IN_RESERVED_LEN 4
+/* Features requested. Same format as the returned value from
+ * MC_CMD_VIRTIO_GET_FEATURES.
+ */
+#define       MC_CMD_VIRTIO_TEST_FEATURES_IN_FEATURES_OFST 8
+#define       MC_CMD_VIRTIO_TEST_FEATURES_IN_FEATURES_LEN 8
+#define       MC_CMD_VIRTIO_TEST_FEATURES_IN_FEATURES_LO_OFST 8
+#define       MC_CMD_VIRTIO_TEST_FEATURES_IN_FEATURES_HI_OFST 12
+
+/* MC_CMD_VIRTIO_TEST_FEATURES_OUT msgresponse */
+#define    MC_CMD_VIRTIO_TEST_FEATURES_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VIRTIO_INIT_QUEUE
+ * Create a virtio virtqueue. Fails with EALREADY if the queue already exists.
+ * Fails with ENOSUP if a feature is requested that isn't supported. Fails with
+ * EINVAL if a required feature isn't requested, or any other parameter is
+ * invalid.
+ */
+#define MC_CMD_VIRTIO_INIT_QUEUE 0x16a
+#undef MC_CMD_0x16a_PRIVILEGE_CTG
+
+#define MC_CMD_0x16a_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VIRTIO_INIT_QUEUE_REQ msgrequest */
+#define    MC_CMD_VIRTIO_INIT_QUEUE_REQ_LEN 68
+/* Type of virtqueue to create. A network rxq and a txq can exist at the same
+ * time on a single VI.
+ */
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_QUEUE_TYPE_OFST 0
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_QUEUE_TYPE_LEN 1
+/* enum: A network device receive queue */
+#define          MC_CMD_VIRTIO_INIT_QUEUE_REQ_NET_RXQ 0x0
+/* enum: A network device transmit queue */
+#define          MC_CMD_VIRTIO_INIT_QUEUE_REQ_NET_TXQ 0x1
+/* enum: A block device request queue */
+#define          MC_CMD_VIRTIO_INIT_QUEUE_REQ_BLOCK 0x2
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_RESERVED_OFST 1
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_RESERVED_LEN 1
+/* If the calling function is a PF and this field is not VF_NULL, create the
+ * queue on the specified child VF instead of on the PF.
+ */
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_TARGET_VF_OFST 2
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_TARGET_VF_LEN 2
+/* enum: No VF, create queue on the PF. */
+#define          MC_CMD_VIRTIO_INIT_QUEUE_REQ_VF_NULL 0xffff
+/* Desired instance. This is the function-local index of the associated VI, not
+ * the virtqueue number as counted by the virtqueue spec.
+ */
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_INSTANCE_OFST 4
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_INSTANCE_LEN 4
+/* Queue size, in entries. Must be a power of two. */
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_SIZE_OFST 8
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_SIZE_LEN 4
+/* Flags */
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_FLAGS_OFST 12
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_FLAGS_LEN 4
+#define        MC_CMD_VIRTIO_INIT_QUEUE_REQ_USE_PASID_OFST 12
+#define        MC_CMD_VIRTIO_INIT_QUEUE_REQ_USE_PASID_LBN 0
+#define        MC_CMD_VIRTIO_INIT_QUEUE_REQ_USE_PASID_WIDTH 1
+/* Address of the descriptor table in the virtqueue. */
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_DESC_TBL_ADDR_OFST 16
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_DESC_TBL_ADDR_LEN 8
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_DESC_TBL_ADDR_LO_OFST 16
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_DESC_TBL_ADDR_HI_OFST 20
+/* Address of the available ring in the virtqueue. */
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_AVAIL_RING_ADDR_OFST 24
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_AVAIL_RING_ADDR_LEN 8
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_AVAIL_RING_ADDR_LO_OFST 24
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_AVAIL_RING_ADDR_HI_OFST 28
+/* Address of the used ring in the virtqueue. */
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_USED_RING_ADDR_OFST 32
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_USED_RING_ADDR_LEN 8
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_USED_RING_ADDR_LO_OFST 32
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_USED_RING_ADDR_HI_OFST 36
+/* PASID to use on PCIe transactions involving this queue. Ignored if the
+ * USE_PASID flag is not set.
+ */
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_PASID_OFST 40
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_PASID_LEN 4
+/* Which MSIX vector to use for this virtqueue, or NO_VECTOR if MSIX should not
+ * be used.
+ */
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_MSIX_VECTOR_OFST 44
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_MSIX_VECTOR_LEN 2
+/* enum: Do not enable interrupts for this virtqueue */
+#define          MC_CMD_VIRTIO_INIT_QUEUE_REQ_NO_VECTOR 0xffff
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_RESERVED2_OFST 46
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_RESERVED2_LEN 2
+/* Virtio features to apply to this queue. Same format as the in the virtio
+ * spec and in the return from MC_CMD_VIRTIO_GET_FEATURES. Must be a subset of
+ * the features returned from MC_CMD_VIRTIO_GET_FEATURES. Features are per-
+ * queue because with vDPA multiple queues on the same function can be passed
+ * through to different virtual hosts as independent devices.
+ */
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_FEATURES_OFST 48
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_FEATURES_LEN 8
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_FEATURES_LO_OFST 48
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_FEATURES_HI_OFST 52
+/*            Enum values, see field(s): */
+/*               MC_CMD_VIRTIO_GET_FEATURES/MC_CMD_VIRTIO_GET_FEATURES_OUT/FEATURES */
+/* The inital producer index for this queue's used ring. If this queue is being
+ * created to be migrated into, this should be the FINAL_PIDX value returned by
+ * MC_CMD_VIRTIO_FINI_QUEUE of the queue being migrated from. Otherwise, it
+ * should be zero.
+ */
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_INITIAL_PIDX_OFST 56
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_INITIAL_PIDX_LEN 4
+/* The inital consumer index for this queue's available ring. If this queue is
+ * being created to be migrated into, this should be the FINAL_CIDX value
+ * returned by MC_CMD_VIRTIO_FINI_QUEUE of the queue being migrated from.
+ * Otherwise, it should be zero.
+ */
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_INITIAL_CIDX_OFST 60
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_INITIAL_CIDX_LEN 4
+/* A MAE_MPORT_SELECTOR defining which mport this queue should be associated
+ * with. Use MAE_MPORT_SELECTOR_ASSIGNED to request the default mport for the
+ * function this queue is being created on.
+ */
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_MPORT_SELECTOR_OFST 64
+#define       MC_CMD_VIRTIO_INIT_QUEUE_REQ_MPORT_SELECTOR_LEN 4
+
+/* MC_CMD_VIRTIO_INIT_QUEUE_RESP msgresponse */
+#define    MC_CMD_VIRTIO_INIT_QUEUE_RESP_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VIRTIO_FINI_QUEUE
+ * Destroy a virtio virtqueue
+ */
+#define MC_CMD_VIRTIO_FINI_QUEUE 0x16b
+#undef MC_CMD_0x16b_PRIVILEGE_CTG
+
+#define MC_CMD_0x16b_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VIRTIO_FINI_QUEUE_REQ msgrequest */
+#define    MC_CMD_VIRTIO_FINI_QUEUE_REQ_LEN 8
+/* Type of virtqueue to destroy. */
+#define       MC_CMD_VIRTIO_FINI_QUEUE_REQ_QUEUE_TYPE_OFST 0
+#define       MC_CMD_VIRTIO_FINI_QUEUE_REQ_QUEUE_TYPE_LEN 1
+/*            Enum values, see field(s): */
+/*               MC_CMD_VIRTIO_INIT_QUEUE/MC_CMD_VIRTIO_INIT_QUEUE_REQ/QUEUE_TYPE */
+#define       MC_CMD_VIRTIO_FINI_QUEUE_REQ_RESERVED_OFST 1
+#define       MC_CMD_VIRTIO_FINI_QUEUE_REQ_RESERVED_LEN 1
+/* If the calling function is a PF and this field is not VF_NULL, destroy the
+ * queue on the specified child VF instead of on the PF.
+ */
+#define       MC_CMD_VIRTIO_FINI_QUEUE_REQ_TARGET_VF_OFST 2
+#define       MC_CMD_VIRTIO_FINI_QUEUE_REQ_TARGET_VF_LEN 2
+/* enum: No VF, destroy the queue on the PF. */
+#define          MC_CMD_VIRTIO_FINI_QUEUE_REQ_VF_NULL 0xffff
+/* Instance to destroy */
+#define       MC_CMD_VIRTIO_FINI_QUEUE_REQ_INSTANCE_OFST 4
+#define       MC_CMD_VIRTIO_FINI_QUEUE_REQ_INSTANCE_LEN 4
+
+/* MC_CMD_VIRTIO_FINI_QUEUE_RESP msgresponse */
+#define    MC_CMD_VIRTIO_FINI_QUEUE_RESP_LEN 8
+/* The producer index of the used ring when the queue was stopped. */
+#define       MC_CMD_VIRTIO_FINI_QUEUE_RESP_FINAL_PIDX_OFST 0
+#define       MC_CMD_VIRTIO_FINI_QUEUE_RESP_FINAL_PIDX_LEN 4
+/* The consumer index of the available ring when the queue was stopped. */
+#define       MC_CMD_VIRTIO_FINI_QUEUE_RESP_FINAL_CIDX_OFST 4
+#define       MC_CMD_VIRTIO_FINI_QUEUE_RESP_FINAL_CIDX_LEN 4
+
+
+/***********************************/
+/* MC_CMD_VIRTIO_GET_DOORBELL_OFFSET
+ * Get the offset in the BAR of the doorbells for a VI. Doesn't require the
+ * queue(s) to be allocated.
+ */
+#define MC_CMD_VIRTIO_GET_DOORBELL_OFFSET 0x16c
+#undef MC_CMD_0x16c_PRIVILEGE_CTG
+
+#define MC_CMD_0x16c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VIRTIO_GET_DOORBELL_OFFSET_REQ msgrequest */
+#define    MC_CMD_VIRTIO_GET_DOORBELL_OFFSET_REQ_LEN 8
+/* Type of device to get information for. Matches the device id as defined by
+ * the virtio spec.
+ */
+#define       MC_CMD_VIRTIO_GET_DOORBELL_OFFSET_REQ_DEVICE_ID_OFST 0
+#define       MC_CMD_VIRTIO_GET_DOORBELL_OFFSET_REQ_DEVICE_ID_LEN 1
+/*            Enum values, see field(s): */
+/*               MC_CMD_VIRTIO_GET_FEATURES/MC_CMD_VIRTIO_GET_FEATURES_IN/DEVICE_ID */
+#define       MC_CMD_VIRTIO_GET_DOORBELL_OFFSET_REQ_RESERVED_OFST 1
+#define       MC_CMD_VIRTIO_GET_DOORBELL_OFFSET_REQ_RESERVED_LEN 1
+/* If the calling function is a PF and this field is not VF_NULL, query the VI
+ * on the specified child VF instead of on the PF.
+ */
+#define       MC_CMD_VIRTIO_GET_DOORBELL_OFFSET_REQ_TARGET_VF_OFST 2
+#define       MC_CMD_VIRTIO_GET_DOORBELL_OFFSET_REQ_TARGET_VF_LEN 2
+/* enum: No VF, query the PF. */
+#define          MC_CMD_VIRTIO_GET_DOORBELL_OFFSET_REQ_VF_NULL 0xffff
+/* VI instance to query */
+#define       MC_CMD_VIRTIO_GET_DOORBELL_OFFSET_REQ_INSTANCE_OFST 4
+#define       MC_CMD_VIRTIO_GET_DOORBELL_OFFSET_REQ_INSTANCE_LEN 4
+
+/* MC_CMD_VIRTIO_GET_NET_DOORBELL_OFFSET_RESP msgresponse */
+#define    MC_CMD_VIRTIO_GET_NET_DOORBELL_OFFSET_RESP_LEN 8
+/* Offset of RX doorbell in BAR */
+#define       MC_CMD_VIRTIO_GET_NET_DOORBELL_OFFSET_RESP_RX_DBL_OFFSET_OFST 0
+#define       MC_CMD_VIRTIO_GET_NET_DOORBELL_OFFSET_RESP_RX_DBL_OFFSET_LEN 4
+/* Offset of TX doorbell in BAR */
+#define       MC_CMD_VIRTIO_GET_NET_DOORBELL_OFFSET_RESP_TX_DBL_OFFSET_OFST 4
+#define       MC_CMD_VIRTIO_GET_NET_DOORBELL_OFFSET_RESP_TX_DBL_OFFSET_LEN 4
+
+/* MC_CMD_VIRTIO_GET_BLOCK_DOORBELL_OFFSET_RESP msgresponse */
+#define    MC_CMD_VIRTIO_GET_BLOCK_DOORBELL_OFFSET_RESP_LEN 4
+/* Offset of request doorbell in BAR */
+#define       MC_CMD_VIRTIO_GET_BLOCK_DOORBELL_OFFSET_RESP_DBL_OFFSET_OFST 0
+#define       MC_CMD_VIRTIO_GET_BLOCK_DOORBELL_OFFSET_RESP_DBL_OFFSET_LEN 4
+
+/* PCIE_FUNCTION structuredef: Structure representing a PCIe function ID
+ * (interface/PF/VF tuple)
+ */
+#define    PCIE_FUNCTION_LEN 8
+/* PCIe PF function number */
+#define       PCIE_FUNCTION_PF_OFST 0
+#define       PCIE_FUNCTION_PF_LEN 2
+/* enum: Wildcard value representing any available function (e.g in resource
+ * allocation requests)
+ */
+#define          PCIE_FUNCTION_PF_ANY 0xfffe
+/* enum: Value representing invalid (null) function */
+#define          PCIE_FUNCTION_PF_NULL 0xffff
+#define       PCIE_FUNCTION_PF_LBN 0
+#define       PCIE_FUNCTION_PF_WIDTH 16
+/* PCIe VF Function number (PF relative) */
+#define       PCIE_FUNCTION_VF_OFST 2
+#define       PCIE_FUNCTION_VF_LEN 2
+/* enum: Wildcard value representing any available function (e.g in resource
+ * allocation requests)
+ */
+#define          PCIE_FUNCTION_VF_ANY 0xfffe
+/* enum: Function is a PF (when PF != PF_NULL) or invalid function (when PF ==
+ * PF_NULL)
+ */
+#define          PCIE_FUNCTION_VF_NULL 0xffff
+#define       PCIE_FUNCTION_VF_LBN 16
+#define       PCIE_FUNCTION_VF_WIDTH 16
+/* PCIe interface of the function */
+#define       PCIE_FUNCTION_INTF_OFST 4
+#define       PCIE_FUNCTION_INTF_LEN 4
+/* enum: Host PCIe interface */
+#define          PCIE_FUNCTION_INTF_HOST 0x0
+/* enum: Application Processor interface */
+#define          PCIE_FUNCTION_INTF_AP 0x1
+#define       PCIE_FUNCTION_INTF_LBN 32
+#define       PCIE_FUNCTION_INTF_WIDTH 32
+
+
+/***********************************/
+/* MC_CMD_DESC_PROXY_FUNC_CREATE
+ * Descriptor proxy functions are abstract devices that forward all request
+ * submitted to the host PCIe function (descriptors submitted to Virtio or
+ * EF100 queues) to be handled on another function (most commonly on the
+ * embedded Application Processor), via EF100 descriptor proxy, memory-to-
+ * memory and descriptor-to-completion mechanisms. Primary user is Virtio-blk
+ * subsystem, see SF-122927-TC. This function allocates a new descriptor proxy
+ * function on the host and assigns a user-defined label. The actual function
+ * configuration is not persisted until the caller configures it with
+ * MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN and commits with
+ * MC_CMD_DESC_PROXY_FUNC_COMMIT_IN.
+ */
+#define MC_CMD_DESC_PROXY_FUNC_CREATE 0x172
+#undef MC_CMD_0x172_PRIVILEGE_CTG
+
+#define MC_CMD_0x172_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DESC_PROXY_FUNC_CREATE_IN msgrequest */
+#define    MC_CMD_DESC_PROXY_FUNC_CREATE_IN_LEN 52
+/* PCIe Function ID to allocate (as struct PCIE_FUNCTION). Set to
+ * {PF_ANY,VF_ANY,interface} for "any available function" Set to
+ * {PF_ANY,VF_NULL,interface} for "any available PF"
+ */
+#define       MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_OFST 0
+#define       MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_LEN 8
+#define       MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_LO_OFST 0
+#define       MC_CMD_DESC_PROXY_FUNC_CREATE_IN_FUNC_HI_OFST 4
+/* The personality to set. The meanings of the personalities are defined in
+ * SF-120734-TC with more information in SF-122717-TC. At present, we only
+ * support proxying for VIRTIO_BLK
+ */
+#define       MC_CMD_DESC_PROXY_FUNC_CREATE_IN_PERSONALITY_OFST 8
+#define       MC_CMD_DESC_PROXY_FUNC_CREATE_IN_PERSONALITY_LEN 4
+/*            Enum values, see field(s): */
+/*               FUNCTION_PERSONALITY/ID */
+/* User-defined label (zero-terminated ASCII string) to uniquely identify the
+ * function
+ */
+#define       MC_CMD_DESC_PROXY_FUNC_CREATE_IN_LABEL_OFST 12
+#define       MC_CMD_DESC_PROXY_FUNC_CREATE_IN_LABEL_LEN 40
+
+/* MC_CMD_DESC_PROXY_FUNC_CREATE_OUT msgresponse */
+#define    MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_LEN 12
+/* Handle to the descriptor proxy function */
+#define       MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_HANDLE_OFST 0
+#define       MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_HANDLE_LEN 4
+/* Allocated function ID (as struct PCIE_FUNCTION) */
+#define       MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_OFST 4
+#define       MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_LEN 8
+#define       MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_LO_OFST 4
+#define       MC_CMD_DESC_PROXY_FUNC_CREATE_OUT_FUNC_HI_OFST 8
+
+
+/***********************************/
+/* MC_CMD_DESC_PROXY_FUNC_DESTROY
+ * Remove an existing descriptor proxy function. Underlying function
+ * personality and configuration reverts back to factory default. Function
+ * configuration is committed immediately to specified store and any function
+ * ownership is released.
+ */
+#define MC_CMD_DESC_PROXY_FUNC_DESTROY 0x173
+#undef MC_CMD_0x173_PRIVILEGE_CTG
+
+#define MC_CMD_0x173_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DESC_PROXY_FUNC_DESTROY_IN msgrequest */
+#define    MC_CMD_DESC_PROXY_FUNC_DESTROY_IN_LEN 44
+/* User-defined label (zero-terminated ASCII string) to uniquely identify the
+ * function
+ */
+#define       MC_CMD_DESC_PROXY_FUNC_DESTROY_IN_LABEL_OFST 0
+#define       MC_CMD_DESC_PROXY_FUNC_DESTROY_IN_LABEL_LEN 40
+/* Store from which to remove function configuration */
+#define       MC_CMD_DESC_PROXY_FUNC_DESTROY_IN_STORE_OFST 40
+#define       MC_CMD_DESC_PROXY_FUNC_DESTROY_IN_STORE_LEN 4
+/*            Enum values, see field(s): */
+/*               MC_CMD_DESC_PROXY_FUNC_COMMIT/MC_CMD_DESC_PROXY_FUNC_COMMIT_IN/STORE */
+
+/* MC_CMD_DESC_PROXY_FUNC_DESTROY_OUT msgresponse */
+#define    MC_CMD_DESC_PROXY_FUNC_DESTROY_OUT_LEN 0
+
+/* VIRTIO_BLK_CONFIG structuredef: Virtio block device configuration. See
+ * Virtio specification v1.1, Sections 5.2.3 and 6 for definition of feature
+ * bits. See Virtio specification v1.1, Section 5.2.4 (struct
+ * virtio_blk_config) for definition of remaining configuration fields
+ */
+#define    VIRTIO_BLK_CONFIG_LEN 68
+/* Virtio block device features to advertise, per Virtio 1.1, 5.2.3 and 6 */
+#define       VIRTIO_BLK_CONFIG_FEATURES_OFST 0
+#define       VIRTIO_BLK_CONFIG_FEATURES_LEN 8
+#define       VIRTIO_BLK_CONFIG_FEATURES_LO_OFST 0
+#define       VIRTIO_BLK_CONFIG_FEATURES_HI_OFST 4
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_BARRIER_OFST 0
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_BARRIER_LBN 0
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_BARRIER_WIDTH 1
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SIZE_MAX_OFST 0
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SIZE_MAX_LBN 1
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SIZE_MAX_WIDTH 1
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SEG_MAX_OFST 0
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SEG_MAX_LBN 2
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SEG_MAX_WIDTH 1
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_GEOMETRY_OFST 0
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_GEOMETRY_LBN 4
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_GEOMETRY_WIDTH 1
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_RO_OFST 0
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_RO_LBN 5
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_RO_WIDTH 1
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_BLK_SIZE_OFST 0
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_BLK_SIZE_LBN 6
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_BLK_SIZE_WIDTH 1
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SCSI_OFST 0
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SCSI_LBN 7
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_SCSI_WIDTH 1
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_FLUSH_OFST 0
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_FLUSH_LBN 9
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_FLUSH_WIDTH 1
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_TOPOLOGY_OFST 0
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_TOPOLOGY_LBN 10
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_TOPOLOGY_WIDTH 1
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_CONFIG_WCE_OFST 0
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_CONFIG_WCE_LBN 11
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_CONFIG_WCE_WIDTH 1
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_MQ_OFST 0
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_MQ_LBN 12
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_MQ_WIDTH 1
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_DISCARD_OFST 0
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_DISCARD_LBN 13
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_DISCARD_WIDTH 1
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_WRITE_ZEROES_OFST 0
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_WRITE_ZEROES_LBN 14
+#define        VIRTIO_BLK_CONFIG_VIRTIO_BLK_F_WRITE_ZEROES_WIDTH 1
+#define        VIRTIO_BLK_CONFIG_VIRTIO_F_RING_INDIRECT_DESC_OFST 0
+#define        VIRTIO_BLK_CONFIG_VIRTIO_F_RING_INDIRECT_DESC_LBN 28
+#define        VIRTIO_BLK_CONFIG_VIRTIO_F_RING_INDIRECT_DESC_WIDTH 1
+#define        VIRTIO_BLK_CONFIG_VIRTIO_F_RING_EVENT_IDX_OFST 0
+#define        VIRTIO_BLK_CONFIG_VIRTIO_F_RING_EVENT_IDX_LBN 29
+#define        VIRTIO_BLK_CONFIG_VIRTIO_F_RING_EVENT_IDX_WIDTH 1
+#define        VIRTIO_BLK_CONFIG_VIRTIO_F_VERSION_1_OFST 0
+#define        VIRTIO_BLK_CONFIG_VIRTIO_F_VERSION_1_LBN 32
+#define        VIRTIO_BLK_CONFIG_VIRTIO_F_VERSION_1_WIDTH 1
+#define        VIRTIO_BLK_CONFIG_VIRTIO_F_ACCESS_PLATFORM_OFST 0
+#define        VIRTIO_BLK_CONFIG_VIRTIO_F_ACCESS_PLATFORM_LBN 33
+#define        VIRTIO_BLK_CONFIG_VIRTIO_F_ACCESS_PLATFORM_WIDTH 1
+#define        VIRTIO_BLK_CONFIG_VIRTIO_F_RING_PACKED_OFST 0
+#define        VIRTIO_BLK_CONFIG_VIRTIO_F_RING_PACKED_LBN 34
+#define        VIRTIO_BLK_CONFIG_VIRTIO_F_RING_PACKED_WIDTH 1
+#define        VIRTIO_BLK_CONFIG_VIRTIO_F_IN_ORDER_OFST 0
+#define        VIRTIO_BLK_CONFIG_VIRTIO_F_IN_ORDER_LBN 35
+#define        VIRTIO_BLK_CONFIG_VIRTIO_F_IN_ORDER_WIDTH 1
+#define        VIRTIO_BLK_CONFIG_VIRTIO_F_ORDER_PLATFORM_OFST 0
+#define        VIRTIO_BLK_CONFIG_VIRTIO_F_ORDER_PLATFORM_LBN 36
+#define        VIRTIO_BLK_CONFIG_VIRTIO_F_ORDER_PLATFORM_WIDTH 1
+#define        VIRTIO_BLK_CONFIG_VIRTIO_F_SR_IOV_OFST 0
+#define        VIRTIO_BLK_CONFIG_VIRTIO_F_SR_IOV_LBN 37
+#define        VIRTIO_BLK_CONFIG_VIRTIO_F_SR_IOV_WIDTH 1
+#define        VIRTIO_BLK_CONFIG_VIRTIO_F_NOTIFICATION_DATA_OFST 0
+#define        VIRTIO_BLK_CONFIG_VIRTIO_F_NOTIFICATION_DATA_LBN 38
+#define        VIRTIO_BLK_CONFIG_VIRTIO_F_NOTIFICATION_DATA_WIDTH 1
+#define       VIRTIO_BLK_CONFIG_FEATURES_LBN 0
+#define       VIRTIO_BLK_CONFIG_FEATURES_WIDTH 64
+/* The capacity of the device (expressed in 512-byte sectors) */
+#define       VIRTIO_BLK_CONFIG_CAPACITY_OFST 8
+#define       VIRTIO_BLK_CONFIG_CAPACITY_LEN 8
+#define       VIRTIO_BLK_CONFIG_CAPACITY_LO_OFST 8
+#define       VIRTIO_BLK_CONFIG_CAPACITY_HI_OFST 12
+#define       VIRTIO_BLK_CONFIG_CAPACITY_LBN 64
+#define       VIRTIO_BLK_CONFIG_CAPACITY_WIDTH 64
+/* Maximum size of any single segment. Only valid when VIRTIO_BLK_F_SIZE_MAX is
+ * set.
+ */
+#define       VIRTIO_BLK_CONFIG_SIZE_MAX_OFST 16
+#define       VIRTIO_BLK_CONFIG_SIZE_MAX_LEN 4
+#define       VIRTIO_BLK_CONFIG_SIZE_MAX_LBN 128
+#define       VIRTIO_BLK_CONFIG_SIZE_MAX_WIDTH 32
+/* Maximum number of segments in a request. Only valid when
+ * VIRTIO_BLK_F_SEG_MAX is set.
+ */
+#define       VIRTIO_BLK_CONFIG_SEG_MAX_OFST 20
+#define       VIRTIO_BLK_CONFIG_SEG_MAX_LEN 4
+#define       VIRTIO_BLK_CONFIG_SEG_MAX_LBN 160
+#define       VIRTIO_BLK_CONFIG_SEG_MAX_WIDTH 32
+/* Disk-style geometry - cylinders. Only valid when VIRTIO_BLK_F_GEOMETRY is
+ * set.
+ */
+#define       VIRTIO_BLK_CONFIG_CYLINDERS_OFST 24
+#define       VIRTIO_BLK_CONFIG_CYLINDERS_LEN 2
+#define       VIRTIO_BLK_CONFIG_CYLINDERS_LBN 192
+#define       VIRTIO_BLK_CONFIG_CYLINDERS_WIDTH 16
+/* Disk-style geometry - heads. Only valid when VIRTIO_BLK_F_GEOMETRY is set.
+ */
+#define       VIRTIO_BLK_CONFIG_HEADS_OFST 26
+#define       VIRTIO_BLK_CONFIG_HEADS_LEN 1
+#define       VIRTIO_BLK_CONFIG_HEADS_LBN 208
+#define       VIRTIO_BLK_CONFIG_HEADS_WIDTH 8
+/* Disk-style geometry - sectors. Only valid when VIRTIO_BLK_F_GEOMETRY is set.
+ */
+#define       VIRTIO_BLK_CONFIG_SECTORS_OFST 27
+#define       VIRTIO_BLK_CONFIG_SECTORS_LEN 1
+#define       VIRTIO_BLK_CONFIG_SECTORS_LBN 216
+#define       VIRTIO_BLK_CONFIG_SECTORS_WIDTH 8
+/* Block size of disk. Only valid when VIRTIO_BLK_F_BLK_SIZE is set. */
+#define       VIRTIO_BLK_CONFIG_BLK_SIZE_OFST 28
+#define       VIRTIO_BLK_CONFIG_BLK_SIZE_LEN 4
+#define       VIRTIO_BLK_CONFIG_BLK_SIZE_LBN 224
+#define       VIRTIO_BLK_CONFIG_BLK_SIZE_WIDTH 32
+/* Block topology - number of logical blocks per physical block (log2). Only
+ * valid when VIRTIO_BLK_F_TOPOLOGY is set.
+ */
+#define       VIRTIO_BLK_CONFIG_PHYSICAL_BLOCK_EXP_OFST 32
+#define       VIRTIO_BLK_CONFIG_PHYSICAL_BLOCK_EXP_LEN 1
+#define       VIRTIO_BLK_CONFIG_PHYSICAL_BLOCK_EXP_LBN 256
+#define       VIRTIO_BLK_CONFIG_PHYSICAL_BLOCK_EXP_WIDTH 8
+/* Block topology - offset of first aligned logical block. Only valid when
+ * VIRTIO_BLK_F_TOPOLOGY is set.
+ */
+#define       VIRTIO_BLK_CONFIG_ALIGNMENT_OFFSET_OFST 33
+#define       VIRTIO_BLK_CONFIG_ALIGNMENT_OFFSET_LEN 1
+#define       VIRTIO_BLK_CONFIG_ALIGNMENT_OFFSET_LBN 264
+#define       VIRTIO_BLK_CONFIG_ALIGNMENT_OFFSET_WIDTH 8
+/* Block topology - suggested minimum I/O size in blocks. Only valid when
+ * VIRTIO_BLK_F_TOPOLOGY is set.
+ */
+#define       VIRTIO_BLK_CONFIG_MIN_IO_SIZE_OFST 34
+#define       VIRTIO_BLK_CONFIG_MIN_IO_SIZE_LEN 2
+#define       VIRTIO_BLK_CONFIG_MIN_IO_SIZE_LBN 272
+#define       VIRTIO_BLK_CONFIG_MIN_IO_SIZE_WIDTH 16
+/* Block topology - optimal (suggested maximum) I/O size in blocks. Only valid
+ * when VIRTIO_BLK_F_TOPOLOGY is set.
+ */
+#define       VIRTIO_BLK_CONFIG_OPT_IO_SIZE_OFST 36
+#define       VIRTIO_BLK_CONFIG_OPT_IO_SIZE_LEN 4
+#define       VIRTIO_BLK_CONFIG_OPT_IO_SIZE_LBN 288
+#define       VIRTIO_BLK_CONFIG_OPT_IO_SIZE_WIDTH 32
+/* Unused, set to zero. Note that virtio_blk_config.writeback is volatile and
+ * not carried in config data.
+ */
+#define       VIRTIO_BLK_CONFIG_UNUSED0_OFST 40
+#define       VIRTIO_BLK_CONFIG_UNUSED0_LEN 2
+#define       VIRTIO_BLK_CONFIG_UNUSED0_LBN 320
+#define       VIRTIO_BLK_CONFIG_UNUSED0_WIDTH 16
+/* Number of queues. Only valid if the VIRTIO_BLK_F_MQ feature is negotiated.
+ */
+#define       VIRTIO_BLK_CONFIG_NUM_QUEUES_OFST 42
+#define       VIRTIO_BLK_CONFIG_NUM_QUEUES_LEN 2
+#define       VIRTIO_BLK_CONFIG_NUM_QUEUES_LBN 336
+#define       VIRTIO_BLK_CONFIG_NUM_QUEUES_WIDTH 16
+/* Maximum discard sectors size, in 512-byte units. Only valid if
+ * VIRTIO_BLK_F_DISCARD is set.
+ */
+#define       VIRTIO_BLK_CONFIG_MAX_DISCARD_SECTORS_OFST 44
+#define       VIRTIO_BLK_CONFIG_MAX_DISCARD_SECTORS_LEN 4
+#define       VIRTIO_BLK_CONFIG_MAX_DISCARD_SECTORS_LBN 352
+#define       VIRTIO_BLK_CONFIG_MAX_DISCARD_SECTORS_WIDTH 32
+/* Maximum discard segment number. Only valid if VIRTIO_BLK_F_DISCARD is set.
+ */
+#define       VIRTIO_BLK_CONFIG_MAX_DISCARD_SEG_OFST 48
+#define       VIRTIO_BLK_CONFIG_MAX_DISCARD_SEG_LEN 4
+#define       VIRTIO_BLK_CONFIG_MAX_DISCARD_SEG_LBN 384
+#define       VIRTIO_BLK_CONFIG_MAX_DISCARD_SEG_WIDTH 32
+/* Discard sector alignment, in 512-byte units. Only valid if
+ * VIRTIO_BLK_F_DISCARD is set.
+ */
+#define       VIRTIO_BLK_CONFIG_DISCARD_SECTOR_ALIGNMENT_OFST 52
+#define       VIRTIO_BLK_CONFIG_DISCARD_SECTOR_ALIGNMENT_LEN 4
+#define       VIRTIO_BLK_CONFIG_DISCARD_SECTOR_ALIGNMENT_LBN 416
+#define       VIRTIO_BLK_CONFIG_DISCARD_SECTOR_ALIGNMENT_WIDTH 32
+/* Maximum write zeroes sectors size, in 512-byte units. Only valid if
+ * VIRTIO_BLK_F_WRITE_ZEROES is set.
+ */
+#define       VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SECTORS_OFST 56
+#define       VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SECTORS_LEN 4
+#define       VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SECTORS_LBN 448
+#define       VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SECTORS_WIDTH 32
+/* Maximum write zeroes segment number. Only valid if VIRTIO_BLK_F_WRITE_ZEROES
+ * is set.
+ */
+#define       VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SEG_OFST 60
+#define       VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SEG_LEN 4
+#define       VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SEG_LBN 480
+#define       VIRTIO_BLK_CONFIG_MAX_WRITE_ZEROES_SEG_WIDTH 32
+/* Write zeroes request can result in deallocating one or more sectors. Only
+ * valid if VIRTIO_BLK_F_WRITE_ZEROES is set.
+ */
+#define       VIRTIO_BLK_CONFIG_WRITE_ZEROES_MAY_UNMAP_OFST 64
+#define       VIRTIO_BLK_CONFIG_WRITE_ZEROES_MAY_UNMAP_LEN 1
+#define       VIRTIO_BLK_CONFIG_WRITE_ZEROES_MAY_UNMAP_LBN 512
+#define       VIRTIO_BLK_CONFIG_WRITE_ZEROES_MAY_UNMAP_WIDTH 8
+/* Unused, set to zero. */
+#define       VIRTIO_BLK_CONFIG_UNUSED1_OFST 65
+#define       VIRTIO_BLK_CONFIG_UNUSED1_LEN 3
+#define       VIRTIO_BLK_CONFIG_UNUSED1_LBN 520
+#define       VIRTIO_BLK_CONFIG_UNUSED1_WIDTH 24
+
+
+/***********************************/
+/* MC_CMD_DESC_PROXY_FUNC_CONFIG_SET
+ * Set configuration for an existing descriptor proxy function. Configuration
+ * data must match function personality. The actual function configuration is
+ * not persisted until the caller commits with MC_CMD_DESC_PROXY_FUNC_COMMIT_IN
+ */
+#define MC_CMD_DESC_PROXY_FUNC_CONFIG_SET 0x174
+#undef MC_CMD_0x174_PRIVILEGE_CTG
+
+#define MC_CMD_0x174_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN msgrequest */
+#define    MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_LENMIN 20
+#define    MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_LENMAX 252
+#define    MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_LENMAX_MCDI2 1020
+#define    MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_LEN(num) (20+1*(num))
+#define    MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_CONFIG_NUM(len) (((len)-20)/1)
+/* Handle to descriptor proxy function (as returned by
+ * MC_CMD_DESC_PROXY_FUNC_OPEN)
+ */
+#define       MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_HANDLE_OFST 0
+#define       MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_HANDLE_LEN 4
+/* Reserved for future extension, set to zero. */
+#define       MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_RESERVED_OFST 4
+#define       MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_RESERVED_LEN 16
+/* Configuration data. Format of configuration data is determined implicitly
+ * from function personality referred to by HANDLE. Currently, only supported
+ * format is VIRTIO_BLK_CONFIG.
+ */
+#define       MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_CONFIG_OFST 20
+#define       MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_CONFIG_LEN 1
+#define       MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_CONFIG_MINNUM 0
+#define       MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_CONFIG_MAXNUM 232
+#define       MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_IN_CONFIG_MAXNUM_MCDI2 1000
+
+/* MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_OUT msgresponse */
+#define    MC_CMD_DESC_PROXY_FUNC_CONFIG_SET_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_DESC_PROXY_FUNC_COMMIT
+ * Commit function configuration to non-volatile or volatile store. Once
+ * configuration is applied to hardware (which may happen immediately or on
+ * next function/device reset) a DESC_PROXY_FUNC_CONFIG_SET MCDI event will be
+ * delivered to callers MCDI event queue.
+ */
+#define MC_CMD_DESC_PROXY_FUNC_COMMIT 0x175
+#undef MC_CMD_0x175_PRIVILEGE_CTG
+
+#define MC_CMD_0x175_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DESC_PROXY_FUNC_COMMIT_IN msgrequest */
+#define    MC_CMD_DESC_PROXY_FUNC_COMMIT_IN_LEN 8
+/* Handle to descriptor proxy function (as returned by
+ * MC_CMD_DESC_PROXY_FUNC_OPEN)
+ */
+#define       MC_CMD_DESC_PROXY_FUNC_COMMIT_IN_HANDLE_OFST 0
+#define       MC_CMD_DESC_PROXY_FUNC_COMMIT_IN_HANDLE_LEN 4
+#define       MC_CMD_DESC_PROXY_FUNC_COMMIT_IN_STORE_OFST 4
+#define       MC_CMD_DESC_PROXY_FUNC_COMMIT_IN_STORE_LEN 4
+/* enum: Store into non-volatile (dynamic) config */
+#define          MC_CMD_DESC_PROXY_FUNC_COMMIT_IN_NON_VOLATILE 0x0
+/* enum: Store into volatile (ephemeral) config */
+#define          MC_CMD_DESC_PROXY_FUNC_COMMIT_IN_VOLATILE 0x1
+
+/* MC_CMD_DESC_PROXY_FUNC_COMMIT_OUT msgresponse */
+#define    MC_CMD_DESC_PROXY_FUNC_COMMIT_OUT_LEN 4
+/* Generation count to be delivered in an event once configuration becomes live
+ */
+#define       MC_CMD_DESC_PROXY_FUNC_COMMIT_OUT_CONFIG_GENERATION_OFST 0
+#define       MC_CMD_DESC_PROXY_FUNC_COMMIT_OUT_CONFIG_GENERATION_LEN 4
+
+
+/***********************************/
+/* MC_CMD_DESC_PROXY_FUNC_OPEN
+ * Retrieve a handle for an existing descriptor proxy function. Returns an
+ * integer handle, valid until function is deallocated, MC rebooted or power-
+ * cycle. Returns ENODEV if no function with given label exists.
+ */
+#define MC_CMD_DESC_PROXY_FUNC_OPEN 0x176
+#undef MC_CMD_0x176_PRIVILEGE_CTG
+
+#define MC_CMD_0x176_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DESC_PROXY_FUNC_OPEN_IN msgrequest */
+#define    MC_CMD_DESC_PROXY_FUNC_OPEN_IN_LEN 40
+/* User-defined label (zero-terminated ASCII string) to uniquely identify the
+ * function
+ */
+#define       MC_CMD_DESC_PROXY_FUNC_OPEN_IN_LABEL_OFST 0
+#define       MC_CMD_DESC_PROXY_FUNC_OPEN_IN_LABEL_LEN 40
+
+/* MC_CMD_DESC_PROXY_FUNC_OPEN_OUT msgresponse */
+#define    MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_LENMIN 40
+#define    MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_LENMAX 252
+#define    MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_LENMAX_MCDI2 1020
+#define    MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_LEN(num) (40+1*(num))
+#define    MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_NUM(len) (((len)-40)/1)
+/* Handle to the descriptor proxy function */
+#define       MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_HANDLE_OFST 0
+#define       MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_HANDLE_LEN 4
+/* PCIe Function ID (as struct PCIE_FUNCTION) */
+#define       MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_OFST 4
+#define       MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_LEN 8
+#define       MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_LO_OFST 4
+#define       MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_FUNC_HI_OFST 8
+/* Function personality */
+#define       MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_PERSONALITY_OFST 12
+#define       MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_PERSONALITY_LEN 4
+/*            Enum values, see field(s): */
+/*               FUNCTION_PERSONALITY/ID */
+/* Function configuration state */
+#define       MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_STATUS_OFST 16
+#define       MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_STATUS_LEN 4
+/* enum: Function configuration is visible to the host (live) */
+#define          MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_LIVE 0x0
+/* enum: Function configuration is pending reset */
+#define          MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_PENDING 0x1
+/* Generation count to be delivered in an event once the configuration becomes
+ * live (if status is "pending")
+ */
+#define       MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_GENERATION_OFST 20
+#define       MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_GENERATION_LEN 4
+/* Reserved for future extension, set to zero. */
+#define       MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_RESERVED_OFST 24
+#define       MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_RESERVED_LEN 16
+/* Configuration data corresponding to function personality. Currently, only
+ * supported format is VIRTIO_BLK_CONFIG
+ */
+#define       MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_OFST 40
+#define       MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_LEN 1
+#define       MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_MINNUM 0
+#define       MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_MAXNUM 212
+#define       MC_CMD_DESC_PROXY_FUNC_OPEN_OUT_CONFIG_MAXNUM_MCDI2 980
+
+
+/***********************************/
+/* MC_CMD_DESC_PROXY_FUNC_CLOSE
+ * Releases a handle for an open descriptor proxy function. If proxying was
+ * enabled on the device, the caller is expected to gracefully stop it using
+ * MC_CMD_DESC_PROXY_FUNC_DISABLE prior to calling this function. Closing an
+ * active device without disabling proxying will result in forced close, which
+ * will put the device into a failed state and signal the host driver of the
+ * error (for virtio, DEVICE_NEEDS_RESET flag would be set on the host side)
+ */
+#define MC_CMD_DESC_PROXY_FUNC_CLOSE 0x1a1
+#undef MC_CMD_0x1a1_PRIVILEGE_CTG
+
+#define MC_CMD_0x1a1_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DESC_PROXY_FUNC_CLOSE_IN msgrequest */
+#define    MC_CMD_DESC_PROXY_FUNC_CLOSE_IN_LEN 4
+/* Handle to the descriptor proxy function */
+#define       MC_CMD_DESC_PROXY_FUNC_CLOSE_IN_HANDLE_OFST 0
+#define       MC_CMD_DESC_PROXY_FUNC_CLOSE_IN_HANDLE_LEN 4
+
+/* MC_CMD_DESC_PROXY_FUNC_CLOSE_OUT msgresponse */
+#define    MC_CMD_DESC_PROXY_FUNC_CLOSE_OUT_LEN 0
+
+/* DESC_PROXY_FUNC_MAP structuredef */
+#define    DESC_PROXY_FUNC_MAP_LEN 52
+/* PCIe function ID (as struct PCIE_FUNCTION) */
+#define       DESC_PROXY_FUNC_MAP_FUNC_OFST 0
+#define       DESC_PROXY_FUNC_MAP_FUNC_LEN 8
+#define       DESC_PROXY_FUNC_MAP_FUNC_LO_OFST 0
+#define       DESC_PROXY_FUNC_MAP_FUNC_HI_OFST 4
+#define       DESC_PROXY_FUNC_MAP_FUNC_LBN 0
+#define       DESC_PROXY_FUNC_MAP_FUNC_WIDTH 64
+/* Function personality */
+#define       DESC_PROXY_FUNC_MAP_PERSONALITY_OFST 8
+#define       DESC_PROXY_FUNC_MAP_PERSONALITY_LEN 4
+/*            Enum values, see field(s): */
+/*               FUNCTION_PERSONALITY/ID */
+#define       DESC_PROXY_FUNC_MAP_PERSONALITY_LBN 64
+#define       DESC_PROXY_FUNC_MAP_PERSONALITY_WIDTH 32
+/* User-defined label (zero-terminated ASCII string) to uniquely identify the
+ * function
+ */
+#define       DESC_PROXY_FUNC_MAP_LABEL_OFST 12
+#define       DESC_PROXY_FUNC_MAP_LABEL_LEN 40
+#define       DESC_PROXY_FUNC_MAP_LABEL_LBN 96
+#define       DESC_PROXY_FUNC_MAP_LABEL_WIDTH 320
+
+
+/***********************************/
+/* MC_CMD_DESC_PROXY_FUNC_ENUM
+ * Enumerate existing descriptor proxy functions
+ */
+#define MC_CMD_DESC_PROXY_FUNC_ENUM 0x177
+#undef MC_CMD_0x177_PRIVILEGE_CTG
+
+#define MC_CMD_0x177_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DESC_PROXY_FUNC_ENUM_IN msgrequest */
+#define    MC_CMD_DESC_PROXY_FUNC_ENUM_IN_LEN 4
+/* Starting index, set to 0 on first request. See
+ * MC_CMD_DESC_PROXY_FUNC_ENUM_OUT/FLAGS.
+ */
+#define       MC_CMD_DESC_PROXY_FUNC_ENUM_IN_START_IDX_OFST 0
+#define       MC_CMD_DESC_PROXY_FUNC_ENUM_IN_START_IDX_LEN 4
+
+/* MC_CMD_DESC_PROXY_FUNC_ENUM_OUT msgresponse */
+#define    MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_LENMIN 4
+#define    MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_LENMAX 212
+#define    MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_LENMAX_MCDI2 992
+#define    MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_LEN(num) (4+52*(num))
+#define    MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_NUM(len) (((len)-4)/52)
+#define       MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FLAGS_OFST 0
+#define       MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FLAGS_LEN 4
+#define        MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_MORE_DATA_OFST 0
+#define        MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_MORE_DATA_LBN 0
+#define        MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_MORE_DATA_WIDTH 1
+/* Function map, as array of DESC_PROXY_FUNC_MAP */
+#define       MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_OFST 4
+#define       MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_LEN 52
+#define       MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_MINNUM 0
+#define       MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_MAXNUM 4
+#define       MC_CMD_DESC_PROXY_FUNC_ENUM_OUT_FUNC_MAP_MAXNUM_MCDI2 19
+
+
+/***********************************/
+/* MC_CMD_DESC_PROXY_FUNC_ENABLE
+ * Enable descriptor proxying for function into target event queue. Returns VI
+ * allocation info for the proxy source function, so that the caller can map
+ * absolute VI IDs from descriptor proxy events back to the originating
+ * function.
+ */
+#define MC_CMD_DESC_PROXY_FUNC_ENABLE 0x178
+#undef MC_CMD_0x178_PRIVILEGE_CTG
+
+#define MC_CMD_0x178_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DESC_PROXY_FUNC_ENABLE_IN msgrequest */
+#define    MC_CMD_DESC_PROXY_FUNC_ENABLE_IN_LEN 8
+/* Handle to descriptor proxy function (as returned by
+ * MC_CMD_DESC_PROXY_FUNC_OPEN)
+ */
+#define       MC_CMD_DESC_PROXY_FUNC_ENABLE_IN_HANDLE_OFST 0
+#define       MC_CMD_DESC_PROXY_FUNC_ENABLE_IN_HANDLE_LEN 4
+/* Descriptor proxy sink queue (caller function relative). Must be extended
+ * width event queue
+ */
+#define       MC_CMD_DESC_PROXY_FUNC_ENABLE_IN_TARGET_EVQ_OFST 4
+#define       MC_CMD_DESC_PROXY_FUNC_ENABLE_IN_TARGET_EVQ_LEN 4
+
+/* MC_CMD_DESC_PROXY_FUNC_ENABLE_OUT msgresponse */
+#define    MC_CMD_DESC_PROXY_FUNC_ENABLE_OUT_LEN 8
+/* The number of VIs allocated on the function */
+#define       MC_CMD_DESC_PROXY_FUNC_ENABLE_OUT_VI_COUNT_OFST 0
+#define       MC_CMD_DESC_PROXY_FUNC_ENABLE_OUT_VI_COUNT_LEN 4
+/* The base absolute VI number allocated to the function. */
+#define       MC_CMD_DESC_PROXY_FUNC_ENABLE_OUT_VI_BASE_OFST 4
+#define       MC_CMD_DESC_PROXY_FUNC_ENABLE_OUT_VI_BASE_LEN 4
+
+
+/***********************************/
+/* MC_CMD_DESC_PROXY_FUNC_DISABLE
+ * Disable descriptor proxying for function
+ */
+#define MC_CMD_DESC_PROXY_FUNC_DISABLE 0x179
+#undef MC_CMD_0x179_PRIVILEGE_CTG
+
+#define MC_CMD_0x179_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_DESC_PROXY_FUNC_DISABLE_IN msgrequest */
+#define    MC_CMD_DESC_PROXY_FUNC_DISABLE_IN_LEN 4
+/* Handle to descriptor proxy function (as returned by
+ * MC_CMD_DESC_PROXY_FUNC_OPEN)
+ */
+#define       MC_CMD_DESC_PROXY_FUNC_DISABLE_IN_HANDLE_OFST 0
+#define       MC_CMD_DESC_PROXY_FUNC_DISABLE_IN_HANDLE_LEN 4
+
+/* MC_CMD_DESC_PROXY_FUNC_DISABLE_OUT msgresponse */
+#define    MC_CMD_DESC_PROXY_FUNC_DISABLE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_ADDR_SPC_ID
+ * Get Address space identifier for use in mem2mem descriptors for a given
+ * target. See SF-120734-TC for details on ADDR_SPC_IDs and mem2mem
+ * descriptors.
+ */
+#define MC_CMD_GET_ADDR_SPC_ID 0x1a0
+#undef MC_CMD_0x1a0_PRIVILEGE_CTG
+
+#define MC_CMD_0x1a0_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_GET_ADDR_SPC_ID_IN msgrequest */
+#define    MC_CMD_GET_ADDR_SPC_ID_IN_LEN 16
+/* Resource type to get ADDR_SPC_ID for */
+#define       MC_CMD_GET_ADDR_SPC_ID_IN_TYPE_OFST 0
+#define       MC_CMD_GET_ADDR_SPC_ID_IN_TYPE_LEN 4
+/* enum: Address space ID for host/AP memory DMA over the same interface this
+ * MCDI was called on
+ */
+#define          MC_CMD_GET_ADDR_SPC_ID_IN_SELF 0x0
+/* enum: Address space ID for host/AP memory DMA via PCI interface and function
+ * specified by FUNC
+ */
+#define          MC_CMD_GET_ADDR_SPC_ID_IN_PCI_FUNC 0x1
+/* enum: Address space ID for host/AP memory DMA via PCI interface and function
+ * specified by FUNC with PASID value specified by PASID
+ */
+#define          MC_CMD_GET_ADDR_SPC_ID_IN_PCI_FUNC_PASID 0x2
+/* enum: Address space ID for host/AP memory DMA via PCI interface and function
+ * specified by FUNC with PASID value of relative VI specified by VI
+ */
+#define          MC_CMD_GET_ADDR_SPC_ID_IN_REL_VI 0x3
+/* enum: Address space ID for host/AP memory DMA via PCI interface, function
+ * and PASID value of absolute VI specified by VI
+ */
+#define          MC_CMD_GET_ADDR_SPC_ID_IN_ABS_VI 0x4
+/* enum: Address space ID for host memory DMA via PCI interface and function of
+ * descriptor proxy function specified by HANDLE
+ */
+#define          MC_CMD_GET_ADDR_SPC_ID_IN_DESC_PROXY_HANDLE 0x5
+/* enum: Address space ID for DMA to/from MC memory */
+#define          MC_CMD_GET_ADDR_SPC_ID_IN_MC_MEM 0x6
+/* enum: Address space ID for DMA to/from other SmartNIC memory (on-chip, DDR)
+ */
+#define          MC_CMD_GET_ADDR_SPC_ID_IN_NIC_MEM 0x7
+/* PCIe Function ID (as struct PCIE_FUNCTION). Only valid if TYPE is PCI_FUNC,
+ * PCI_FUNC_PASID or REL_VI.
+ */
+#define       MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_OFST 4
+#define       MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_LEN 8
+#define       MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_LO_OFST 4
+#define       MC_CMD_GET_ADDR_SPC_ID_IN_FUNC_HI_OFST 8
+/* PASID value. Only valid if TYPE is PCI_FUNC_PASID. */
+#define       MC_CMD_GET_ADDR_SPC_ID_IN_PASID_OFST 12
+#define       MC_CMD_GET_ADDR_SPC_ID_IN_PASID_LEN 4
+/* Relative or absolute VI number. Only valid if TYPE is REL_VI or ABS_VI */
+#define       MC_CMD_GET_ADDR_SPC_ID_IN_VI_OFST 12
+#define       MC_CMD_GET_ADDR_SPC_ID_IN_VI_LEN 4
+/* Descriptor proxy function handle. Only valid if TYPE is DESC_PROXY_HANDLE.
+ */
+#define       MC_CMD_GET_ADDR_SPC_ID_IN_HANDLE_OFST 4
+#define       MC_CMD_GET_ADDR_SPC_ID_IN_HANDLE_LEN 4
+
+/* MC_CMD_GET_ADDR_SPC_ID_OUT msgresponse */
+#define    MC_CMD_GET_ADDR_SPC_ID_OUT_LEN 8
+/* Address Space ID for the requested target. Only the lower 36 bits are valid
+ * in the current SmartNIC implementation.
+ */
+#define       MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_OFST 0
+#define       MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_LEN 8
+#define       MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_LO_OFST 0
+#define       MC_CMD_GET_ADDR_SPC_ID_OUT_ADDR_SPC_ID_HI_OFST 4
+
+
+#endif /* MCDI_PCOL_H */
diff --git a/drivers/net/ethernet/sfc/siena/mcdi_port.c b/drivers/net/ethernet/sfc/siena/mcdi_port.c
new file mode 100644
index 000000000000..94c6a345c0b1
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/mcdi_port.c
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2009-2013 Solarflare Communications Inc.
+ */
+
+/*
+ * Driver for PHY related operations via MCDI.
+ */
+
+#include <linux/slab.h>
+#include "efx.h"
+#include "mcdi_port.h"
+#include "mcdi.h"
+#include "mcdi_pcol.h"
+#include "nic.h"
+#include "selftest.h"
+#include "mcdi_port_common.h"
+
+static int efx_mcdi_mdio_read(struct net_device *net_dev,
+			      int prtad, int devad, u16 addr)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+	MCDI_DECLARE_BUF(inbuf, MC_CMD_MDIO_READ_IN_LEN);
+	MCDI_DECLARE_BUF(outbuf, MC_CMD_MDIO_READ_OUT_LEN);
+	size_t outlen;
+	int rc;
+
+	MCDI_SET_DWORD(inbuf, MDIO_READ_IN_BUS, efx->mdio_bus);
+	MCDI_SET_DWORD(inbuf, MDIO_READ_IN_PRTAD, prtad);
+	MCDI_SET_DWORD(inbuf, MDIO_READ_IN_DEVAD, devad);
+	MCDI_SET_DWORD(inbuf, MDIO_READ_IN_ADDR, addr);
+
+	rc = efx_mcdi_rpc(efx, MC_CMD_MDIO_READ, inbuf, sizeof(inbuf),
+			  outbuf, sizeof(outbuf), &outlen);
+	if (rc)
+		return rc;
+
+	if (MCDI_DWORD(outbuf, MDIO_READ_OUT_STATUS) !=
+	    MC_CMD_MDIO_STATUS_GOOD)
+		return -EIO;
+
+	return (u16)MCDI_DWORD(outbuf, MDIO_READ_OUT_VALUE);
+}
+
+static int efx_mcdi_mdio_write(struct net_device *net_dev,
+			       int prtad, int devad, u16 addr, u16 value)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+	MCDI_DECLARE_BUF(inbuf, MC_CMD_MDIO_WRITE_IN_LEN);
+	MCDI_DECLARE_BUF(outbuf, MC_CMD_MDIO_WRITE_OUT_LEN);
+	size_t outlen;
+	int rc;
+
+	MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_BUS, efx->mdio_bus);
+	MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_PRTAD, prtad);
+	MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_DEVAD, devad);
+	MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_ADDR, addr);
+	MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_VALUE, value);
+
+	rc = efx_mcdi_rpc(efx, MC_CMD_MDIO_WRITE, inbuf, sizeof(inbuf),
+			  outbuf, sizeof(outbuf), &outlen);
+	if (rc)
+		return rc;
+
+	if (MCDI_DWORD(outbuf, MDIO_WRITE_OUT_STATUS) !=
+	    MC_CMD_MDIO_STATUS_GOOD)
+		return -EIO;
+
+	return 0;
+}
+
+u32 efx_mcdi_phy_get_caps(struct efx_nic *efx)
+{
+	struct efx_mcdi_phy_data *phy_data = efx->phy_data;
+
+	return phy_data->supported_cap;
+}
+
+bool efx_mcdi_mac_check_fault(struct efx_nic *efx)
+{
+	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
+	size_t outlength;
+	int rc;
+
+	BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
+
+	rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
+			  outbuf, sizeof(outbuf), &outlength);
+	if (rc)
+		return true;
+
+	return MCDI_DWORD(outbuf, GET_LINK_OUT_MAC_FAULT) != 0;
+}
+
+int efx_mcdi_port_probe(struct efx_nic *efx)
+{
+	int rc;
+
+	/* Set up MDIO structure for PHY */
+	efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
+	efx->mdio.mdio_read = efx_mcdi_mdio_read;
+	efx->mdio.mdio_write = efx_mcdi_mdio_write;
+
+	/* Fill out MDIO structure, loopback modes, and initial link state */
+	rc = efx_mcdi_phy_probe(efx);
+	if (rc != 0)
+		return rc;
+
+	return efx_mcdi_mac_init_stats(efx);
+}
+
+void efx_mcdi_port_remove(struct efx_nic *efx)
+{
+	efx_mcdi_phy_remove(efx);
+	efx_mcdi_mac_fini_stats(efx);
+}
diff --git a/drivers/net/ethernet/sfc/siena/mcdi_port.h b/drivers/net/ethernet/sfc/siena/mcdi_port.h
new file mode 100644
index 000000000000..07863ddbe740
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/mcdi_port.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2008-2013 Solarflare Communications Inc.
+ * Copyright 2019-2020 Xilinx Inc.
+ */
+
+#ifndef EFX_MCDI_PORT_H
+#define EFX_MCDI_PORT_H
+
+#include "net_driver.h"
+
+u32 efx_mcdi_phy_get_caps(struct efx_nic *efx);
+bool efx_mcdi_mac_check_fault(struct efx_nic *efx);
+int efx_mcdi_port_probe(struct efx_nic *efx);
+void efx_mcdi_port_remove(struct efx_nic *efx);
+
+#endif /* EFX_MCDI_PORT_H */
diff --git a/drivers/net/ethernet/sfc/siena/mcdi_port_common.c b/drivers/net/ethernet/sfc/siena/mcdi_port_common.c
new file mode 100644
index 000000000000..899cc1671004
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/mcdi_port_common.c
@@ -0,0 +1,1301 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include "mcdi_port_common.h"
+#include "efx_common.h"
+#include "nic.h"
+
+int efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_data *cfg)
+{
+	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_CFG_OUT_LEN);
+	size_t outlen;
+	int rc;
+
+	BUILD_BUG_ON(MC_CMD_GET_PHY_CFG_IN_LEN != 0);
+	BUILD_BUG_ON(MC_CMD_GET_PHY_CFG_OUT_NAME_LEN != sizeof(cfg->name));
+
+	rc = efx_mcdi_rpc(efx, MC_CMD_GET_PHY_CFG, NULL, 0,
+			  outbuf, sizeof(outbuf), &outlen);
+	if (rc)
+		goto fail;
+
+	if (outlen < MC_CMD_GET_PHY_CFG_OUT_LEN) {
+		rc = -EIO;
+		goto fail;
+	}
+
+	cfg->flags = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_FLAGS);
+	cfg->type = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_TYPE);
+	cfg->supported_cap =
+		MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_SUPPORTED_CAP);
+	cfg->channel = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_CHANNEL);
+	cfg->port = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_PRT);
+	cfg->stats_mask = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_STATS_MASK);
+	memcpy(cfg->name, MCDI_PTR(outbuf, GET_PHY_CFG_OUT_NAME),
+	       sizeof(cfg->name));
+	cfg->media = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_MEDIA_TYPE);
+	cfg->mmd_mask = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_MMD_MASK);
+	memcpy(cfg->revision, MCDI_PTR(outbuf, GET_PHY_CFG_OUT_REVISION),
+	       sizeof(cfg->revision));
+
+	return 0;
+
+fail:
+	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+	return rc;
+}
+
+void efx_link_set_advertising(struct efx_nic *efx,
+			      const unsigned long *advertising)
+{
+	memcpy(efx->link_advertising, advertising,
+	       sizeof(__ETHTOOL_DECLARE_LINK_MODE_MASK()));
+
+	efx->link_advertising[0] |= ADVERTISED_Autoneg;
+	if (advertising[0] & ADVERTISED_Pause)
+		efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX);
+	else
+		efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX);
+	if (advertising[0] & ADVERTISED_Asym_Pause)
+		efx->wanted_fc ^= EFX_FC_TX;
+}
+
+int efx_mcdi_set_link(struct efx_nic *efx, u32 capabilities,
+		      u32 flags, u32 loopback_mode, u32 loopback_speed)
+{
+	MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_LINK_IN_LEN);
+
+	BUILD_BUG_ON(MC_CMD_SET_LINK_OUT_LEN != 0);
+
+	MCDI_SET_DWORD(inbuf, SET_LINK_IN_CAP, capabilities);
+	MCDI_SET_DWORD(inbuf, SET_LINK_IN_FLAGS, flags);
+	MCDI_SET_DWORD(inbuf, SET_LINK_IN_LOOPBACK_MODE, loopback_mode);
+	MCDI_SET_DWORD(inbuf, SET_LINK_IN_LOOPBACK_SPEED, loopback_speed);
+
+	return efx_mcdi_rpc(efx, MC_CMD_SET_LINK, inbuf, sizeof(inbuf),
+			  NULL, 0, NULL);
+}
+
+int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes)
+{
+	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LOOPBACK_MODES_OUT_LEN);
+	size_t outlen;
+	int rc;
+
+	rc = efx_mcdi_rpc(efx, MC_CMD_GET_LOOPBACK_MODES, NULL, 0,
+			  outbuf, sizeof(outbuf), &outlen);
+	if (rc)
+		goto fail;
+
+	if (outlen < (MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_OFST +
+		      MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LEN)) {
+		rc = -EIO;
+		goto fail;
+	}
+
+	*loopback_modes = MCDI_QWORD(outbuf, GET_LOOPBACK_MODES_OUT_SUGGESTED);
+
+	return 0;
+
+fail:
+	netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+	return rc;
+}
+
+void mcdi_to_ethtool_linkset(u32 media, u32 cap, unsigned long *linkset)
+{
+	#define SET_BIT(name)	__set_bit(ETHTOOL_LINK_MODE_ ## name ## _BIT, \
+					  linkset)
+
+	bitmap_zero(linkset, __ETHTOOL_LINK_MODE_MASK_NBITS);
+	switch (media) {
+	case MC_CMD_MEDIA_KX4:
+		SET_BIT(Backplane);
+		if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
+			SET_BIT(1000baseKX_Full);
+		if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
+			SET_BIT(10000baseKX4_Full);
+		if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
+			SET_BIT(40000baseKR4_Full);
+		break;
+
+	case MC_CMD_MEDIA_XFP:
+	case MC_CMD_MEDIA_SFP_PLUS:
+	case MC_CMD_MEDIA_QSFP_PLUS:
+		SET_BIT(FIBRE);
+		if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN)) {
+			SET_BIT(1000baseT_Full);
+			SET_BIT(1000baseX_Full);
+		}
+		if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN)) {
+			SET_BIT(10000baseCR_Full);
+			SET_BIT(10000baseLR_Full);
+			SET_BIT(10000baseSR_Full);
+		}
+		if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) {
+			SET_BIT(40000baseCR4_Full);
+			SET_BIT(40000baseSR4_Full);
+		}
+		if (cap & (1 << MC_CMD_PHY_CAP_100000FDX_LBN)) {
+			SET_BIT(100000baseCR4_Full);
+			SET_BIT(100000baseSR4_Full);
+		}
+		if (cap & (1 << MC_CMD_PHY_CAP_25000FDX_LBN)) {
+			SET_BIT(25000baseCR_Full);
+			SET_BIT(25000baseSR_Full);
+		}
+		if (cap & (1 << MC_CMD_PHY_CAP_50000FDX_LBN))
+			SET_BIT(50000baseCR2_Full);
+		break;
+
+	case MC_CMD_MEDIA_BASE_T:
+		SET_BIT(TP);
+		if (cap & (1 << MC_CMD_PHY_CAP_10HDX_LBN))
+			SET_BIT(10baseT_Half);
+		if (cap & (1 << MC_CMD_PHY_CAP_10FDX_LBN))
+			SET_BIT(10baseT_Full);
+		if (cap & (1 << MC_CMD_PHY_CAP_100HDX_LBN))
+			SET_BIT(100baseT_Half);
+		if (cap & (1 << MC_CMD_PHY_CAP_100FDX_LBN))
+			SET_BIT(100baseT_Full);
+		if (cap & (1 << MC_CMD_PHY_CAP_1000HDX_LBN))
+			SET_BIT(1000baseT_Half);
+		if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
+			SET_BIT(1000baseT_Full);
+		if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
+			SET_BIT(10000baseT_Full);
+		break;
+	}
+
+	if (cap & (1 << MC_CMD_PHY_CAP_PAUSE_LBN))
+		SET_BIT(Pause);
+	if (cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN))
+		SET_BIT(Asym_Pause);
+	if (cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
+		SET_BIT(Autoneg);
+
+	#undef SET_BIT
+}
+
+u32 ethtool_linkset_to_mcdi_cap(const unsigned long *linkset)
+{
+	u32 result = 0;
+
+	#define TEST_BIT(name)	test_bit(ETHTOOL_LINK_MODE_ ## name ## _BIT, \
+					 linkset)
+
+	if (TEST_BIT(10baseT_Half))
+		result |= (1 << MC_CMD_PHY_CAP_10HDX_LBN);
+	if (TEST_BIT(10baseT_Full))
+		result |= (1 << MC_CMD_PHY_CAP_10FDX_LBN);
+	if (TEST_BIT(100baseT_Half))
+		result |= (1 << MC_CMD_PHY_CAP_100HDX_LBN);
+	if (TEST_BIT(100baseT_Full))
+		result |= (1 << MC_CMD_PHY_CAP_100FDX_LBN);
+	if (TEST_BIT(1000baseT_Half))
+		result |= (1 << MC_CMD_PHY_CAP_1000HDX_LBN);
+	if (TEST_BIT(1000baseT_Full) || TEST_BIT(1000baseKX_Full) ||
+			TEST_BIT(1000baseX_Full))
+		result |= (1 << MC_CMD_PHY_CAP_1000FDX_LBN);
+	if (TEST_BIT(10000baseT_Full) || TEST_BIT(10000baseKX4_Full) ||
+			TEST_BIT(10000baseCR_Full) || TEST_BIT(10000baseLR_Full) ||
+			TEST_BIT(10000baseSR_Full))
+		result |= (1 << MC_CMD_PHY_CAP_10000FDX_LBN);
+	if (TEST_BIT(40000baseCR4_Full) || TEST_BIT(40000baseKR4_Full) ||
+			TEST_BIT(40000baseSR4_Full))
+		result |= (1 << MC_CMD_PHY_CAP_40000FDX_LBN);
+	if (TEST_BIT(100000baseCR4_Full) || TEST_BIT(100000baseSR4_Full))
+		result |= (1 << MC_CMD_PHY_CAP_100000FDX_LBN);
+	if (TEST_BIT(25000baseCR_Full) || TEST_BIT(25000baseSR_Full))
+		result |= (1 << MC_CMD_PHY_CAP_25000FDX_LBN);
+	if (TEST_BIT(50000baseCR2_Full))
+		result |= (1 << MC_CMD_PHY_CAP_50000FDX_LBN);
+	if (TEST_BIT(Pause))
+		result |= (1 << MC_CMD_PHY_CAP_PAUSE_LBN);
+	if (TEST_BIT(Asym_Pause))
+		result |= (1 << MC_CMD_PHY_CAP_ASYM_LBN);
+	if (TEST_BIT(Autoneg))
+		result |= (1 << MC_CMD_PHY_CAP_AN_LBN);
+
+	#undef TEST_BIT
+
+	return result;
+}
+
+u32 efx_get_mcdi_phy_flags(struct efx_nic *efx)
+{
+	struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
+	enum efx_phy_mode mode, supported;
+	u32 flags;
+
+	/* TODO: Advertise the capabilities supported by this PHY */
+	supported = 0;
+	if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_TXDIS_LBN))
+		supported |= PHY_MODE_TX_DISABLED;
+	if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_LOWPOWER_LBN))
+		supported |= PHY_MODE_LOW_POWER;
+	if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_POWEROFF_LBN))
+		supported |= PHY_MODE_OFF;
+
+	mode = efx->phy_mode & supported;
+
+	flags = 0;
+	if (mode & PHY_MODE_TX_DISABLED)
+		flags |= (1 << MC_CMD_SET_LINK_IN_TXDIS_LBN);
+	if (mode & PHY_MODE_LOW_POWER)
+		flags |= (1 << MC_CMD_SET_LINK_IN_LOWPOWER_LBN);
+	if (mode & PHY_MODE_OFF)
+		flags |= (1 << MC_CMD_SET_LINK_IN_POWEROFF_LBN);
+
+	return flags;
+}
+
+u8 mcdi_to_ethtool_media(u32 media)
+{
+	switch (media) {
+	case MC_CMD_MEDIA_XAUI:
+	case MC_CMD_MEDIA_CX4:
+	case MC_CMD_MEDIA_KX4:
+		return PORT_OTHER;
+
+	case MC_CMD_MEDIA_XFP:
+	case MC_CMD_MEDIA_SFP_PLUS:
+	case MC_CMD_MEDIA_QSFP_PLUS:
+		return PORT_FIBRE;
+
+	case MC_CMD_MEDIA_BASE_T:
+		return PORT_TP;
+
+	default:
+		return PORT_OTHER;
+	}
+}
+
+void efx_mcdi_phy_decode_link(struct efx_nic *efx,
+			      struct efx_link_state *link_state,
+			      u32 speed, u32 flags, u32 fcntl)
+{
+	switch (fcntl) {
+	case MC_CMD_FCNTL_AUTO:
+		WARN_ON(1);	/* This is not a link mode */
+		link_state->fc = EFX_FC_AUTO | EFX_FC_TX | EFX_FC_RX;
+		break;
+	case MC_CMD_FCNTL_BIDIR:
+		link_state->fc = EFX_FC_TX | EFX_FC_RX;
+		break;
+	case MC_CMD_FCNTL_RESPOND:
+		link_state->fc = EFX_FC_RX;
+		break;
+	default:
+		WARN_ON(1);
+		fallthrough;
+	case MC_CMD_FCNTL_OFF:
+		link_state->fc = 0;
+		break;
+	}
+
+	link_state->up = !!(flags & (1 << MC_CMD_GET_LINK_OUT_LINK_UP_LBN));
+	link_state->fd = !!(flags & (1 << MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN));
+	link_state->speed = speed;
+}
+
+/* The semantics of the ethtool FEC mode bitmask are not well defined,
+ * particularly the meaning of combinations of bits.  Which means we get to
+ * define our own semantics, as follows:
+ * OFF overrides any other bits, and means "disable all FEC" (with the
+ * exception of 25G KR4/CR4, where it is not possible to reject it if AN
+ * partner requests it).
+ * AUTO on its own means use cable requirements and link partner autoneg with
+ * fw-default preferences for the cable type.
+ * AUTO and either RS or BASER means use the specified FEC type if cable and
+ * link partner support it, otherwise autoneg/fw-default.
+ * RS or BASER alone means use the specified FEC type if cable and link partner
+ * support it and either requests it, otherwise no FEC.
+ * Both RS and BASER (whether AUTO or not) means use FEC if cable and link
+ * partner support it, preferring RS to BASER.
+ */
+u32 ethtool_fec_caps_to_mcdi(u32 supported_cap, u32 ethtool_cap)
+{
+	u32 ret = 0;
+
+	if (ethtool_cap & ETHTOOL_FEC_OFF)
+		return 0;
+
+	if (ethtool_cap & ETHTOOL_FEC_AUTO)
+		ret |= ((1 << MC_CMD_PHY_CAP_BASER_FEC_LBN) |
+			(1 << MC_CMD_PHY_CAP_25G_BASER_FEC_LBN) |
+			(1 << MC_CMD_PHY_CAP_RS_FEC_LBN)) & supported_cap;
+	if (ethtool_cap & ETHTOOL_FEC_RS &&
+	    supported_cap & (1 << MC_CMD_PHY_CAP_RS_FEC_LBN))
+		ret |= (1 << MC_CMD_PHY_CAP_RS_FEC_LBN) |
+		       (1 << MC_CMD_PHY_CAP_RS_FEC_REQUESTED_LBN);
+	if (ethtool_cap & ETHTOOL_FEC_BASER) {
+		if (supported_cap & (1 << MC_CMD_PHY_CAP_BASER_FEC_LBN))
+			ret |= (1 << MC_CMD_PHY_CAP_BASER_FEC_LBN) |
+			       (1 << MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_LBN);
+		if (supported_cap & (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_LBN))
+			ret |= (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_LBN) |
+			       (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_LBN);
+	}
+	return ret;
+}
+
+/* Invert ethtool_fec_caps_to_mcdi.  There are two combinations that function
+ * can never produce, (baser xor rs) and neither req; the implementation below
+ * maps both of those to AUTO.  This should never matter, and it's not clear
+ * what a better mapping would be anyway.
+ */
+u32 mcdi_fec_caps_to_ethtool(u32 caps, bool is_25g)
+{
+	bool rs = caps & (1 << MC_CMD_PHY_CAP_RS_FEC_LBN),
+	     rs_req = caps & (1 << MC_CMD_PHY_CAP_RS_FEC_REQUESTED_LBN),
+	     baser = is_25g ? caps & (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_LBN)
+			    : caps & (1 << MC_CMD_PHY_CAP_BASER_FEC_LBN),
+	     baser_req = is_25g ? caps & (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_LBN)
+				: caps & (1 << MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_LBN);
+
+	if (!baser && !rs)
+		return ETHTOOL_FEC_OFF;
+	return (rs_req ? ETHTOOL_FEC_RS : 0) |
+	       (baser_req ? ETHTOOL_FEC_BASER : 0) |
+	       (baser == baser_req && rs == rs_req ? 0 : ETHTOOL_FEC_AUTO);
+}
+
+/* Verify that the forced flow control settings (!EFX_FC_AUTO) are
+ * supported by the link partner. Warn the user if this isn't the case
+ */
+void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa)
+{
+	struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
+	u32 rmtadv;
+
+	/* The link partner capabilities are only relevant if the
+	 * link supports flow control autonegotiation
+	 */
+	if (~phy_cfg->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
+		return;
+
+	/* If flow control autoneg is supported and enabled, then fine */
+	if (efx->wanted_fc & EFX_FC_AUTO)
+		return;
+
+	rmtadv = 0;
+	if (lpa & (1 << MC_CMD_PHY_CAP_PAUSE_LBN))
+		rmtadv |= ADVERTISED_Pause;
+	if (lpa & (1 << MC_CMD_PHY_CAP_ASYM_LBN))
+		rmtadv |=  ADVERTISED_Asym_Pause;
+
+	if ((efx->wanted_fc & EFX_FC_TX) && rmtadv == ADVERTISED_Asym_Pause)
+		netif_err(efx, link, efx->net_dev,
+			  "warning: link partner doesn't support pause frames");
+}
+
+bool efx_mcdi_phy_poll(struct efx_nic *efx)
+{
+	struct efx_link_state old_state = efx->link_state;
+	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
+	int rc;
+
+	WARN_ON(!mutex_is_locked(&efx->mac_lock));
+
+	BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
+
+	rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
+			  outbuf, sizeof(outbuf), NULL);
+	if (rc)
+		efx->link_state.up = false;
+	else
+		efx_mcdi_phy_decode_link(
+			efx, &efx->link_state,
+			MCDI_DWORD(outbuf, GET_LINK_OUT_LINK_SPEED),
+			MCDI_DWORD(outbuf, GET_LINK_OUT_FLAGS),
+			MCDI_DWORD(outbuf, GET_LINK_OUT_FCNTL));
+
+	return !efx_link_state_equal(&efx->link_state, &old_state);
+}
+
+int efx_mcdi_phy_probe(struct efx_nic *efx)
+{
+	struct efx_mcdi_phy_data *phy_data;
+	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
+	u32 caps;
+	int rc;
+
+	/* Initialise and populate phy_data */
+	phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
+	if (phy_data == NULL)
+		return -ENOMEM;
+
+	rc = efx_mcdi_get_phy_cfg(efx, phy_data);
+	if (rc != 0)
+		goto fail;
+
+	/* Read initial link advertisement */
+	BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
+	rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
+			  outbuf, sizeof(outbuf), NULL);
+	if (rc)
+		goto fail;
+
+	/* Fill out nic state */
+	efx->phy_data = phy_data;
+	efx->phy_type = phy_data->type;
+
+	efx->mdio_bus = phy_data->channel;
+	efx->mdio.prtad = phy_data->port;
+	efx->mdio.mmds = phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22);
+	efx->mdio.mode_support = 0;
+	if (phy_data->mmd_mask & (1 << MC_CMD_MMD_CLAUSE22))
+		efx->mdio.mode_support |= MDIO_SUPPORTS_C22;
+	if (phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22))
+		efx->mdio.mode_support |= MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
+
+	caps = MCDI_DWORD(outbuf, GET_LINK_OUT_CAP);
+	if (caps & (1 << MC_CMD_PHY_CAP_AN_LBN))
+		mcdi_to_ethtool_linkset(phy_data->media, caps,
+					efx->link_advertising);
+	else
+		phy_data->forced_cap = caps;
+
+	/* Assert that we can map efx -> mcdi loopback modes */
+	BUILD_BUG_ON(LOOPBACK_NONE != MC_CMD_LOOPBACK_NONE);
+	BUILD_BUG_ON(LOOPBACK_DATA != MC_CMD_LOOPBACK_DATA);
+	BUILD_BUG_ON(LOOPBACK_GMAC != MC_CMD_LOOPBACK_GMAC);
+	BUILD_BUG_ON(LOOPBACK_XGMII != MC_CMD_LOOPBACK_XGMII);
+	BUILD_BUG_ON(LOOPBACK_XGXS != MC_CMD_LOOPBACK_XGXS);
+	BUILD_BUG_ON(LOOPBACK_XAUI != MC_CMD_LOOPBACK_XAUI);
+	BUILD_BUG_ON(LOOPBACK_GMII != MC_CMD_LOOPBACK_GMII);
+	BUILD_BUG_ON(LOOPBACK_SGMII != MC_CMD_LOOPBACK_SGMII);
+	BUILD_BUG_ON(LOOPBACK_XGBR != MC_CMD_LOOPBACK_XGBR);
+	BUILD_BUG_ON(LOOPBACK_XFI != MC_CMD_LOOPBACK_XFI);
+	BUILD_BUG_ON(LOOPBACK_XAUI_FAR != MC_CMD_LOOPBACK_XAUI_FAR);
+	BUILD_BUG_ON(LOOPBACK_GMII_FAR != MC_CMD_LOOPBACK_GMII_FAR);
+	BUILD_BUG_ON(LOOPBACK_SGMII_FAR != MC_CMD_LOOPBACK_SGMII_FAR);
+	BUILD_BUG_ON(LOOPBACK_XFI_FAR != MC_CMD_LOOPBACK_XFI_FAR);
+	BUILD_BUG_ON(LOOPBACK_GPHY != MC_CMD_LOOPBACK_GPHY);
+	BUILD_BUG_ON(LOOPBACK_PHYXS != MC_CMD_LOOPBACK_PHYXS);
+	BUILD_BUG_ON(LOOPBACK_PCS != MC_CMD_LOOPBACK_PCS);
+	BUILD_BUG_ON(LOOPBACK_PMAPMD != MC_CMD_LOOPBACK_PMAPMD);
+	BUILD_BUG_ON(LOOPBACK_XPORT != MC_CMD_LOOPBACK_XPORT);
+	BUILD_BUG_ON(LOOPBACK_XGMII_WS != MC_CMD_LOOPBACK_XGMII_WS);
+	BUILD_BUG_ON(LOOPBACK_XAUI_WS != MC_CMD_LOOPBACK_XAUI_WS);
+	BUILD_BUG_ON(LOOPBACK_XAUI_WS_FAR != MC_CMD_LOOPBACK_XAUI_WS_FAR);
+	BUILD_BUG_ON(LOOPBACK_XAUI_WS_NEAR != MC_CMD_LOOPBACK_XAUI_WS_NEAR);
+	BUILD_BUG_ON(LOOPBACK_GMII_WS != MC_CMD_LOOPBACK_GMII_WS);
+	BUILD_BUG_ON(LOOPBACK_XFI_WS != MC_CMD_LOOPBACK_XFI_WS);
+	BUILD_BUG_ON(LOOPBACK_XFI_WS_FAR != MC_CMD_LOOPBACK_XFI_WS_FAR);
+	BUILD_BUG_ON(LOOPBACK_PHYXS_WS != MC_CMD_LOOPBACK_PHYXS_WS);
+
+	rc = efx_mcdi_loopback_modes(efx, &efx->loopback_modes);
+	if (rc != 0)
+		goto fail;
+	/* The MC indicates that LOOPBACK_NONE is a valid loopback mode,
+	 * but by convention we don't
+	 */
+	efx->loopback_modes &= ~(1 << LOOPBACK_NONE);
+
+	/* Set the initial link mode */
+	efx_mcdi_phy_decode_link(efx, &efx->link_state,
+				 MCDI_DWORD(outbuf, GET_LINK_OUT_LINK_SPEED),
+				 MCDI_DWORD(outbuf, GET_LINK_OUT_FLAGS),
+				 MCDI_DWORD(outbuf, GET_LINK_OUT_FCNTL));
+
+	/* Record the initial FEC configuration (or nearest approximation
+	 * representable in the ethtool configuration space)
+	 */
+	efx->fec_config = mcdi_fec_caps_to_ethtool(caps,
+						   efx->link_state.speed == 25000 ||
+						   efx->link_state.speed == 50000);
+
+	/* Default to Autonegotiated flow control if the PHY supports it */
+	efx->wanted_fc = EFX_FC_RX | EFX_FC_TX;
+	if (phy_data->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
+		efx->wanted_fc |= EFX_FC_AUTO;
+	efx_link_set_wanted_fc(efx, efx->wanted_fc);
+
+	return 0;
+
+fail:
+	kfree(phy_data);
+	return rc;
+}
+
+void efx_mcdi_phy_remove(struct efx_nic *efx)
+{
+	struct efx_mcdi_phy_data *phy_data = efx->phy_data;
+
+	efx->phy_data = NULL;
+	kfree(phy_data);
+}
+
+void efx_mcdi_phy_get_link_ksettings(struct efx_nic *efx, struct ethtool_link_ksettings *cmd)
+{
+	struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
+	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
+	int rc;
+
+	cmd->base.speed = efx->link_state.speed;
+	cmd->base.duplex = efx->link_state.fd;
+	cmd->base.port = mcdi_to_ethtool_media(phy_cfg->media);
+	cmd->base.phy_address = phy_cfg->port;
+	cmd->base.autoneg = !!(efx->link_advertising[0] & ADVERTISED_Autoneg);
+	cmd->base.mdio_support = (efx->mdio.mode_support &
+			      (MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22));
+
+	mcdi_to_ethtool_linkset(phy_cfg->media, phy_cfg->supported_cap,
+				cmd->link_modes.supported);
+	memcpy(cmd->link_modes.advertising, efx->link_advertising,
+	       sizeof(__ETHTOOL_DECLARE_LINK_MODE_MASK()));
+
+	BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
+	rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
+			  outbuf, sizeof(outbuf), NULL);
+	if (rc)
+		return;
+	mcdi_to_ethtool_linkset(phy_cfg->media,
+				MCDI_DWORD(outbuf, GET_LINK_OUT_LP_CAP),
+				cmd->link_modes.lp_advertising);
+}
+
+int efx_mcdi_phy_set_link_ksettings(struct efx_nic *efx, const struct ethtool_link_ksettings *cmd)
+{
+	struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
+	u32 caps;
+	int rc;
+
+	if (cmd->base.autoneg) {
+		caps = (ethtool_linkset_to_mcdi_cap(cmd->link_modes.advertising) |
+			1 << MC_CMD_PHY_CAP_AN_LBN);
+	} else if (cmd->base.duplex) {
+		switch (cmd->base.speed) {
+		case 10:     caps = 1 << MC_CMD_PHY_CAP_10FDX_LBN;     break;
+		case 100:    caps = 1 << MC_CMD_PHY_CAP_100FDX_LBN;    break;
+		case 1000:   caps = 1 << MC_CMD_PHY_CAP_1000FDX_LBN;   break;
+		case 10000:  caps = 1 << MC_CMD_PHY_CAP_10000FDX_LBN;  break;
+		case 40000:  caps = 1 << MC_CMD_PHY_CAP_40000FDX_LBN;  break;
+		case 100000: caps = 1 << MC_CMD_PHY_CAP_100000FDX_LBN; break;
+		case 25000:  caps = 1 << MC_CMD_PHY_CAP_25000FDX_LBN;  break;
+		case 50000:  caps = 1 << MC_CMD_PHY_CAP_50000FDX_LBN;  break;
+		default:     return -EINVAL;
+		}
+	} else {
+		switch (cmd->base.speed) {
+		case 10:     caps = 1 << MC_CMD_PHY_CAP_10HDX_LBN;     break;
+		case 100:    caps = 1 << MC_CMD_PHY_CAP_100HDX_LBN;    break;
+		case 1000:   caps = 1 << MC_CMD_PHY_CAP_1000HDX_LBN;   break;
+		default:     return -EINVAL;
+		}
+	}
+
+	caps |= ethtool_fec_caps_to_mcdi(phy_cfg->supported_cap, efx->fec_config);
+
+	rc = efx_mcdi_set_link(efx, caps, efx_get_mcdi_phy_flags(efx),
+			       efx->loopback_mode, 0);
+	if (rc)
+		return rc;
+
+	if (cmd->base.autoneg) {
+		efx_link_set_advertising(efx, cmd->link_modes.advertising);
+		phy_cfg->forced_cap = 0;
+	} else {
+		efx_link_clear_advertising(efx);
+		phy_cfg->forced_cap = caps;
+	}
+	return 0;
+}
+
+int efx_mcdi_phy_get_fecparam(struct efx_nic *efx, struct ethtool_fecparam *fec)
+{
+	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_V2_LEN);
+	u32 caps, active, speed; /* MCDI format */
+	bool is_25g = false;
+	size_t outlen;
+	int rc;
+
+	BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
+	rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
+			  outbuf, sizeof(outbuf), &outlen);
+	if (rc)
+		return rc;
+	if (outlen < MC_CMD_GET_LINK_OUT_V2_LEN)
+		return -EOPNOTSUPP;
+
+	/* behaviour for 25G/50G links depends on 25G BASER bit */
+	speed = MCDI_DWORD(outbuf, GET_LINK_OUT_V2_LINK_SPEED);
+	is_25g = speed == 25000 || speed == 50000;
+
+	caps = MCDI_DWORD(outbuf, GET_LINK_OUT_V2_CAP);
+	fec->fec = mcdi_fec_caps_to_ethtool(caps, is_25g);
+	/* BASER is never supported on 100G */
+	if (speed == 100000)
+		fec->fec &= ~ETHTOOL_FEC_BASER;
+
+	active = MCDI_DWORD(outbuf, GET_LINK_OUT_V2_FEC_TYPE);
+	switch (active) {
+	case MC_CMD_FEC_NONE:
+		fec->active_fec = ETHTOOL_FEC_OFF;
+		break;
+	case MC_CMD_FEC_BASER:
+		fec->active_fec = ETHTOOL_FEC_BASER;
+		break;
+	case MC_CMD_FEC_RS:
+		fec->active_fec = ETHTOOL_FEC_RS;
+		break;
+	default:
+		netif_warn(efx, hw, efx->net_dev,
+			   "Firmware reports unrecognised FEC_TYPE %u\n",
+			   active);
+		/* We don't know what firmware has picked.  AUTO is as good a
+		 * "can't happen" value as any other.
+		 */
+		fec->active_fec = ETHTOOL_FEC_AUTO;
+		break;
+	}
+
+	return 0;
+}
+
+/* Basic validation to ensure that the caps we are going to attempt to set are
+ * in fact supported by the adapter.  Note that 'no FEC' is always supported.
+ */
+static int ethtool_fec_supported(u32 supported_cap, u32 ethtool_cap)
+{
+	if (ethtool_cap & ETHTOOL_FEC_OFF)
+		return 0;
+
+	if (ethtool_cap &&
+	    !ethtool_fec_caps_to_mcdi(supported_cap, ethtool_cap))
+		return -EINVAL;
+	return 0;
+}
+
+int efx_mcdi_phy_set_fecparam(struct efx_nic *efx, const struct ethtool_fecparam *fec)
+{
+	struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
+	u32 caps;
+	int rc;
+
+	rc = ethtool_fec_supported(phy_cfg->supported_cap, fec->fec);
+	if (rc)
+		return rc;
+
+	/* Work out what efx_mcdi_phy_set_link_ksettings() would produce from
+	 * saved advertising bits
+	 */
+	if (test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, efx->link_advertising))
+		caps = (ethtool_linkset_to_mcdi_cap(efx->link_advertising) |
+			1 << MC_CMD_PHY_CAP_AN_LBN);
+	else
+		caps = phy_cfg->forced_cap;
+
+	caps |= ethtool_fec_caps_to_mcdi(phy_cfg->supported_cap, fec->fec);
+	rc = efx_mcdi_set_link(efx, caps, efx_get_mcdi_phy_flags(efx),
+			       efx->loopback_mode, 0);
+	if (rc)
+		return rc;
+
+	/* Record the new FEC setting for subsequent set_link calls */
+	efx->fec_config = fec->fec;
+	return 0;
+}
+
+int efx_mcdi_phy_test_alive(struct efx_nic *efx)
+{
+	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_STATE_OUT_LEN);
+	size_t outlen;
+	int rc;
+
+	BUILD_BUG_ON(MC_CMD_GET_PHY_STATE_IN_LEN != 0);
+
+	rc = efx_mcdi_rpc(efx, MC_CMD_GET_PHY_STATE, NULL, 0,
+			  outbuf, sizeof(outbuf), &outlen);
+	if (rc)
+		return rc;
+
+	if (outlen < MC_CMD_GET_PHY_STATE_OUT_LEN)
+		return -EIO;
+	if (MCDI_DWORD(outbuf, GET_PHY_STATE_OUT_STATE) != MC_CMD_PHY_STATE_OK)
+		return -EINVAL;
+
+	return 0;
+}
+
+int efx_mcdi_port_reconfigure(struct efx_nic *efx)
+{
+	struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
+	u32 caps = (efx->link_advertising[0] ?
+		    ethtool_linkset_to_mcdi_cap(efx->link_advertising) :
+		    phy_cfg->forced_cap);
+
+	caps |= ethtool_fec_caps_to_mcdi(phy_cfg->supported_cap, efx->fec_config);
+
+	return efx_mcdi_set_link(efx, caps, efx_get_mcdi_phy_flags(efx),
+				 efx->loopback_mode, 0);
+}
+
+static const char *const mcdi_sft9001_cable_diag_names[] = {
+	"cable.pairA.length",
+	"cable.pairB.length",
+	"cable.pairC.length",
+	"cable.pairD.length",
+	"cable.pairA.status",
+	"cable.pairB.status",
+	"cable.pairC.status",
+	"cable.pairD.status",
+};
+
+static int efx_mcdi_bist(struct efx_nic *efx, unsigned int bist_mode,
+			 int *results)
+{
+	unsigned int retry, i, count = 0;
+	size_t outlen;
+	u32 status;
+	MCDI_DECLARE_BUF(inbuf, MC_CMD_START_BIST_IN_LEN);
+	MCDI_DECLARE_BUF(outbuf, MC_CMD_POLL_BIST_OUT_SFT9001_LEN);
+	u8 *ptr;
+	int rc;
+
+	BUILD_BUG_ON(MC_CMD_START_BIST_OUT_LEN != 0);
+	MCDI_SET_DWORD(inbuf, START_BIST_IN_TYPE, bist_mode);
+	rc = efx_mcdi_rpc(efx, MC_CMD_START_BIST,
+			  inbuf, MC_CMD_START_BIST_IN_LEN, NULL, 0, NULL);
+	if (rc)
+		goto out;
+
+	/* Wait up to 10s for BIST to finish */
+	for (retry = 0; retry < 100; ++retry) {
+		BUILD_BUG_ON(MC_CMD_POLL_BIST_IN_LEN != 0);
+		rc = efx_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0,
+				  outbuf, sizeof(outbuf), &outlen);
+		if (rc)
+			goto out;
+
+		status = MCDI_DWORD(outbuf, POLL_BIST_OUT_RESULT);
+		if (status != MC_CMD_POLL_BIST_RUNNING)
+			goto finished;
+
+		msleep(100);
+	}
+
+	rc = -ETIMEDOUT;
+	goto out;
+
+finished:
+	results[count++] = (status == MC_CMD_POLL_BIST_PASSED) ? 1 : -1;
+
+	/* SFT9001 specific cable diagnostics output */
+	if (efx->phy_type == PHY_TYPE_SFT9001B &&
+	    (bist_mode == MC_CMD_PHY_BIST_CABLE_SHORT ||
+	     bist_mode == MC_CMD_PHY_BIST_CABLE_LONG)) {
+		ptr = MCDI_PTR(outbuf, POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A);
+		if (status == MC_CMD_POLL_BIST_PASSED &&
+		    outlen >= MC_CMD_POLL_BIST_OUT_SFT9001_LEN) {
+			for (i = 0; i < 8; i++) {
+				results[count + i] =
+					EFX_DWORD_FIELD(((efx_dword_t *)ptr)[i],
+							EFX_DWORD_0);
+			}
+		}
+		count += 8;
+	}
+	rc = count;
+
+out:
+	return rc;
+}
+
+int efx_mcdi_phy_run_tests(struct efx_nic *efx, int *results, unsigned int flags)
+{
+	struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
+	u32 mode;
+	int rc;
+
+	if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_LBN)) {
+		rc = efx_mcdi_bist(efx, MC_CMD_PHY_BIST, results);
+		if (rc < 0)
+			return rc;
+
+		results += rc;
+	}
+
+	/* If we support both LONG and SHORT, then run each in response to
+	 * break or not. Otherwise, run the one we support
+	 */
+	mode = 0;
+	if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_LBN)) {
+		if ((flags & ETH_TEST_FL_OFFLINE) &&
+		    (phy_cfg->flags &
+		     (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_LBN)))
+			mode = MC_CMD_PHY_BIST_CABLE_LONG;
+		else
+			mode = MC_CMD_PHY_BIST_CABLE_SHORT;
+	} else if (phy_cfg->flags &
+		   (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_LBN))
+		mode = MC_CMD_PHY_BIST_CABLE_LONG;
+
+	if (mode != 0) {
+		rc = efx_mcdi_bist(efx, mode, results);
+		if (rc < 0)
+			return rc;
+		results += rc;
+	}
+
+	return 0;
+}
+
+const char *efx_mcdi_phy_test_name(struct efx_nic *efx, unsigned int index)
+{
+	struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
+
+	if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_LBN)) {
+		if (index == 0)
+			return "bist";
+		--index;
+	}
+
+	if (phy_cfg->flags & ((1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_LBN) |
+			      (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_LBN))) {
+		if (index == 0)
+			return "cable";
+		--index;
+
+		if (efx->phy_type == PHY_TYPE_SFT9001B) {
+			if (index < ARRAY_SIZE(mcdi_sft9001_cable_diag_names))
+				return mcdi_sft9001_cable_diag_names[index];
+			index -= ARRAY_SIZE(mcdi_sft9001_cable_diag_names);
+		}
+	}
+
+	return NULL;
+}
+
+#define SFP_PAGE_SIZE		128
+#define SFF_DIAG_TYPE_OFFSET	92
+#define SFF_DIAG_ADDR_CHANGE	BIT(2)
+#define SFF_8079_NUM_PAGES	2
+#define SFF_8472_NUM_PAGES	4
+#define SFF_8436_NUM_PAGES	5
+#define SFF_DMT_LEVEL_OFFSET	94
+
+/** efx_mcdi_phy_get_module_eeprom_page() - Get a single page of module eeprom
+ * @efx:	NIC context
+ * @page:	EEPROM page number
+ * @data:	Destination data pointer
+ * @offset:	Offset in page to copy from in to data
+ * @space:	Space available in data
+ *
+ * Return:
+ *   >=0 - amount of data copied
+ *   <0  - error
+ */
+static int efx_mcdi_phy_get_module_eeprom_page(struct efx_nic *efx,
+					       unsigned int page,
+					       u8 *data, ssize_t offset,
+					       ssize_t space)
+{
+	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX);
+	MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN);
+	unsigned int payload_len;
+	unsigned int to_copy;
+	size_t outlen;
+	int rc;
+
+	if (offset > SFP_PAGE_SIZE)
+		return -EINVAL;
+
+	to_copy = min(space, SFP_PAGE_SIZE - offset);
+
+	MCDI_SET_DWORD(inbuf, GET_PHY_MEDIA_INFO_IN_PAGE, page);
+	rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_PHY_MEDIA_INFO,
+				inbuf, sizeof(inbuf),
+				outbuf, sizeof(outbuf),
+				&outlen);
+
+	if (rc)
+		return rc;
+
+	if (outlen < (MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST +
+			SFP_PAGE_SIZE))
+		return -EIO;
+
+	payload_len = MCDI_DWORD(outbuf, GET_PHY_MEDIA_INFO_OUT_DATALEN);
+	if (payload_len != SFP_PAGE_SIZE)
+		return -EIO;
+
+	memcpy(data, MCDI_PTR(outbuf, GET_PHY_MEDIA_INFO_OUT_DATA) + offset,
+	       to_copy);
+
+	return to_copy;
+}
+
+static int efx_mcdi_phy_get_module_eeprom_byte(struct efx_nic *efx,
+					       unsigned int page,
+					       u8 byte)
+{
+	u8 data;
+	int rc;
+
+	rc = efx_mcdi_phy_get_module_eeprom_page(efx, page, &data, byte, 1);
+	if (rc == 1)
+		return data;
+
+	return rc;
+}
+
+static int efx_mcdi_phy_diag_type(struct efx_nic *efx)
+{
+	/* Page zero of the EEPROM includes the diagnostic type at byte 92. */
+	return efx_mcdi_phy_get_module_eeprom_byte(efx, 0,
+						   SFF_DIAG_TYPE_OFFSET);
+}
+
+static int efx_mcdi_phy_sff_8472_level(struct efx_nic *efx)
+{
+	/* Page zero of the EEPROM includes the DMT level at byte 94. */
+	return efx_mcdi_phy_get_module_eeprom_byte(efx, 0,
+						   SFF_DMT_LEVEL_OFFSET);
+}
+
+static u32 efx_mcdi_phy_module_type(struct efx_nic *efx)
+{
+	struct efx_mcdi_phy_data *phy_data = efx->phy_data;
+
+	if (phy_data->media != MC_CMD_MEDIA_QSFP_PLUS)
+		return phy_data->media;
+
+	/* A QSFP+ NIC may actually have an SFP+ module attached.
+	 * The ID is page 0, byte 0.
+	 */
+	switch (efx_mcdi_phy_get_module_eeprom_byte(efx, 0, 0)) {
+	case 0x3:
+		return MC_CMD_MEDIA_SFP_PLUS;
+	case 0xc:
+	case 0xd:
+		return MC_CMD_MEDIA_QSFP_PLUS;
+	default:
+		return 0;
+	}
+}
+
+int efx_mcdi_phy_get_module_eeprom(struct efx_nic *efx, struct ethtool_eeprom *ee, u8 *data)
+{
+	int rc;
+	ssize_t space_remaining = ee->len;
+	unsigned int page_off;
+	bool ignore_missing;
+	int num_pages;
+	int page;
+
+	switch (efx_mcdi_phy_module_type(efx)) {
+	case MC_CMD_MEDIA_SFP_PLUS:
+		num_pages = efx_mcdi_phy_sff_8472_level(efx) > 0 ?
+				SFF_8472_NUM_PAGES : SFF_8079_NUM_PAGES;
+		page = 0;
+		ignore_missing = false;
+		break;
+	case MC_CMD_MEDIA_QSFP_PLUS:
+		num_pages = SFF_8436_NUM_PAGES;
+		page = -1; /* We obtain the lower page by asking for -1. */
+		ignore_missing = true; /* Ignore missing pages after page 0. */
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	page_off = ee->offset % SFP_PAGE_SIZE;
+	page += ee->offset / SFP_PAGE_SIZE;
+
+	while (space_remaining && (page < num_pages)) {
+		rc = efx_mcdi_phy_get_module_eeprom_page(efx, page,
+							 data, page_off,
+							 space_remaining);
+
+		if (rc > 0) {
+			space_remaining -= rc;
+			data += rc;
+			page_off = 0;
+			page++;
+		} else if (rc == 0) {
+			space_remaining = 0;
+		} else if (ignore_missing && (page > 0)) {
+			int intended_size = SFP_PAGE_SIZE - page_off;
+
+			space_remaining -= intended_size;
+			if (space_remaining < 0) {
+				space_remaining = 0;
+			} else {
+				memset(data, 0, intended_size);
+				data += intended_size;
+				page_off = 0;
+				page++;
+				rc = 0;
+			}
+		} else {
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
+int efx_mcdi_phy_get_module_info(struct efx_nic *efx, struct ethtool_modinfo *modinfo)
+{
+	int sff_8472_level;
+	int diag_type;
+
+	switch (efx_mcdi_phy_module_type(efx)) {
+	case MC_CMD_MEDIA_SFP_PLUS:
+		sff_8472_level = efx_mcdi_phy_sff_8472_level(efx);
+
+		/* If we can't read the diagnostics level we have none. */
+		if (sff_8472_level < 0)
+			return -EOPNOTSUPP;
+
+		/* Check if this module requires the (unsupported) address
+		 * change operation.
+		 */
+		diag_type = efx_mcdi_phy_diag_type(efx);
+
+		if (sff_8472_level == 0 ||
+		    (diag_type & SFF_DIAG_ADDR_CHANGE)) {
+			modinfo->type = ETH_MODULE_SFF_8079;
+			modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
+		} else {
+			modinfo->type = ETH_MODULE_SFF_8472;
+			modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+		}
+		break;
+
+	case MC_CMD_MEDIA_QSFP_PLUS:
+		modinfo->type = ETH_MODULE_SFF_8436;
+		modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+		break;
+
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	return 0;
+}
+
+static unsigned int efx_calc_mac_mtu(struct efx_nic *efx)
+{
+	return EFX_MAX_FRAME_LEN(efx->net_dev->mtu);
+}
+
+int efx_mcdi_set_mac(struct efx_nic *efx)
+{
+	u32 fcntl;
+	MCDI_DECLARE_BUF(cmdbytes, MC_CMD_SET_MAC_IN_LEN);
+
+	BUILD_BUG_ON(MC_CMD_SET_MAC_OUT_LEN != 0);
+
+	/* This has no effect on EF10 */
+	ether_addr_copy(MCDI_PTR(cmdbytes, SET_MAC_IN_ADDR),
+			efx->net_dev->dev_addr);
+
+	MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_MTU, efx_calc_mac_mtu(efx));
+	MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_DRAIN, 0);
+
+	/* Set simple MAC filter for Siena */
+	MCDI_POPULATE_DWORD_1(cmdbytes, SET_MAC_IN_REJECT,
+			      SET_MAC_IN_REJECT_UNCST, efx->unicast_filter);
+
+	MCDI_POPULATE_DWORD_1(cmdbytes, SET_MAC_IN_FLAGS,
+			      SET_MAC_IN_FLAG_INCLUDE_FCS,
+			      !!(efx->net_dev->features & NETIF_F_RXFCS));
+
+	switch (efx->wanted_fc) {
+	case EFX_FC_RX | EFX_FC_TX:
+		fcntl = MC_CMD_FCNTL_BIDIR;
+		break;
+	case EFX_FC_RX:
+		fcntl = MC_CMD_FCNTL_RESPOND;
+		break;
+	default:
+		fcntl = MC_CMD_FCNTL_OFF;
+		break;
+	}
+	if (efx->wanted_fc & EFX_FC_AUTO)
+		fcntl = MC_CMD_FCNTL_AUTO;
+	if (efx->fc_disable)
+		fcntl = MC_CMD_FCNTL_OFF;
+
+	MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_FCNTL, fcntl);
+
+	return efx_mcdi_rpc(efx, MC_CMD_SET_MAC, cmdbytes, sizeof(cmdbytes),
+			    NULL, 0, NULL);
+}
+
+int efx_mcdi_set_mtu(struct efx_nic *efx)
+{
+	MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_MAC_EXT_IN_LEN);
+
+	BUILD_BUG_ON(MC_CMD_SET_MAC_OUT_LEN != 0);
+
+	MCDI_SET_DWORD(inbuf, SET_MAC_EXT_IN_MTU, efx_calc_mac_mtu(efx));
+
+	MCDI_POPULATE_DWORD_1(inbuf, SET_MAC_EXT_IN_CONTROL,
+			      SET_MAC_EXT_IN_CFG_MTU, 1);
+
+	return efx_mcdi_rpc(efx, MC_CMD_SET_MAC, inbuf, sizeof(inbuf),
+			    NULL, 0, NULL);
+}
+
+enum efx_stats_action {
+	EFX_STATS_ENABLE,
+	EFX_STATS_DISABLE,
+	EFX_STATS_PULL,
+};
+
+static int efx_mcdi_mac_stats(struct efx_nic *efx,
+			      enum efx_stats_action action, int clear)
+{
+	MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN);
+	int rc;
+	int change = action == EFX_STATS_PULL ? 0 : 1;
+	int enable = action == EFX_STATS_ENABLE ? 1 : 0;
+	int period = action == EFX_STATS_ENABLE ? 1000 : 0;
+	dma_addr_t dma_addr = efx->stats_buffer.dma_addr;
+	u32 dma_len = action != EFX_STATS_DISABLE ?
+		efx->num_mac_stats * sizeof(u64) : 0;
+
+	BUILD_BUG_ON(MC_CMD_MAC_STATS_OUT_DMA_LEN != 0);
+
+	MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, dma_addr);
+	MCDI_POPULATE_DWORD_7(inbuf, MAC_STATS_IN_CMD,
+			      MAC_STATS_IN_DMA, !!enable,
+			      MAC_STATS_IN_CLEAR, clear,
+			      MAC_STATS_IN_PERIODIC_CHANGE, change,
+			      MAC_STATS_IN_PERIODIC_ENABLE, enable,
+			      MAC_STATS_IN_PERIODIC_CLEAR, 0,
+			      MAC_STATS_IN_PERIODIC_NOEVENT, 1,
+			      MAC_STATS_IN_PERIOD_MS, period);
+	MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
+
+	if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
+		MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, efx->vport_id);
+
+	rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
+				NULL, 0, NULL);
+	/* Expect ENOENT if DMA queues have not been set up */
+	if (rc && (rc != -ENOENT || atomic_read(&efx->active_queues)))
+		efx_mcdi_display_error(efx, MC_CMD_MAC_STATS, sizeof(inbuf),
+				       NULL, 0, rc);
+	return rc;
+}
+
+void efx_mcdi_mac_start_stats(struct efx_nic *efx)
+{
+	__le64 *dma_stats = efx->stats_buffer.addr;
+
+	dma_stats[efx->num_mac_stats - 1] = EFX_MC_STATS_GENERATION_INVALID;
+
+	efx_mcdi_mac_stats(efx, EFX_STATS_ENABLE, 0);
+}
+
+void efx_mcdi_mac_stop_stats(struct efx_nic *efx)
+{
+	efx_mcdi_mac_stats(efx, EFX_STATS_DISABLE, 0);
+}
+
+#define EFX_MAC_STATS_WAIT_US 100
+#define EFX_MAC_STATS_WAIT_ATTEMPTS 10
+
+void efx_mcdi_mac_pull_stats(struct efx_nic *efx)
+{
+	__le64 *dma_stats = efx->stats_buffer.addr;
+	int attempts = EFX_MAC_STATS_WAIT_ATTEMPTS;
+
+	dma_stats[efx->num_mac_stats - 1] = EFX_MC_STATS_GENERATION_INVALID;
+	efx_mcdi_mac_stats(efx, EFX_STATS_PULL, 0);
+
+	while (dma_stats[efx->num_mac_stats - 1] ==
+				EFX_MC_STATS_GENERATION_INVALID &&
+			attempts-- != 0)
+		udelay(EFX_MAC_STATS_WAIT_US);
+}
+
+int efx_mcdi_mac_init_stats(struct efx_nic *efx)
+{
+	int rc;
+
+	if (!efx->num_mac_stats)
+		return 0;
+
+	/* Allocate buffer for stats */
+	rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
+				  efx->num_mac_stats * sizeof(u64), GFP_KERNEL);
+	if (rc) {
+		netif_warn(efx, probe, efx->net_dev,
+			   "failed to allocate DMA buffer: %d\n", rc);
+		return rc;
+	}
+
+	netif_dbg(efx, probe, efx->net_dev,
+		  "stats buffer at %llx (virt %p phys %llx)\n",
+		  (u64) efx->stats_buffer.dma_addr,
+		  efx->stats_buffer.addr,
+		  (u64) virt_to_phys(efx->stats_buffer.addr));
+
+	return 0;
+}
+
+void efx_mcdi_mac_fini_stats(struct efx_nic *efx)
+{
+	efx_nic_free_buffer(efx, &efx->stats_buffer);
+}
+
+/* Get physical port number (EF10 only; on Siena it is same as PF number) */
+int efx_mcdi_port_get_number(struct efx_nic *efx)
+{
+	MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN);
+	int rc;
+
+	rc = efx_mcdi_rpc(efx, MC_CMD_GET_PORT_ASSIGNMENT, NULL, 0,
+			  outbuf, sizeof(outbuf), NULL);
+	if (rc)
+		return rc;
+
+	return MCDI_DWORD(outbuf, GET_PORT_ASSIGNMENT_OUT_PORT);
+}
+
+static unsigned int efx_mcdi_event_link_speed[] = {
+	[MCDI_EVENT_LINKCHANGE_SPEED_100M] = 100,
+	[MCDI_EVENT_LINKCHANGE_SPEED_1G] = 1000,
+	[MCDI_EVENT_LINKCHANGE_SPEED_10G] = 10000,
+	[MCDI_EVENT_LINKCHANGE_SPEED_40G] = 40000,
+	[MCDI_EVENT_LINKCHANGE_SPEED_25G] = 25000,
+	[MCDI_EVENT_LINKCHANGE_SPEED_50G] = 50000,
+	[MCDI_EVENT_LINKCHANGE_SPEED_100G] = 100000,
+};
+
+void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev)
+{
+	u32 flags, fcntl, speed, lpa;
+
+	speed = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_SPEED);
+	EFX_WARN_ON_PARANOID(speed >= ARRAY_SIZE(efx_mcdi_event_link_speed));
+	speed = efx_mcdi_event_link_speed[speed];
+
+	flags = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LINK_FLAGS);
+	fcntl = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_FCNTL);
+	lpa = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LP_CAP);
+
+	/* efx->link_state is only modified by efx_mcdi_phy_get_link(),
+	 * which is only run after flushing the event queues. Therefore, it
+	 * is safe to modify the link state outside of the mac_lock here.
+	 */
+	efx_mcdi_phy_decode_link(efx, &efx->link_state, speed, flags, fcntl);
+
+	efx_mcdi_phy_check_fcntl(efx, lpa);
+
+	efx_link_status_changed(efx);
+}
diff --git a/drivers/net/ethernet/sfc/siena/mcdi_port_common.h b/drivers/net/ethernet/sfc/siena/mcdi_port_common.h
new file mode 100644
index 000000000000..ed31690e591c
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/mcdi_port_common.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+#ifndef EFX_MCDI_PORT_COMMON_H
+#define EFX_MCDI_PORT_COMMON_H
+
+#include "net_driver.h"
+#include "mcdi.h"
+#include "mcdi_pcol.h"
+
+struct efx_mcdi_phy_data {
+	u32 flags;
+	u32 type;
+	u32 supported_cap;
+	u32 channel;
+	u32 port;
+	u32 stats_mask;
+	u8 name[20];
+	u32 media;
+	u32 mmd_mask;
+	u8 revision[20];
+	u32 forced_cap;
+};
+
+int efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_data *cfg);
+void efx_link_set_advertising(struct efx_nic *efx,
+			      const unsigned long *advertising);
+int efx_mcdi_set_link(struct efx_nic *efx, u32 capabilities,
+		      u32 flags, u32 loopback_mode, u32 loopback_speed);
+int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes);
+void mcdi_to_ethtool_linkset(u32 media, u32 cap, unsigned long *linkset);
+u32 ethtool_linkset_to_mcdi_cap(const unsigned long *linkset);
+u32 efx_get_mcdi_phy_flags(struct efx_nic *efx);
+u8 mcdi_to_ethtool_media(u32 media);
+void efx_mcdi_phy_decode_link(struct efx_nic *efx,
+			      struct efx_link_state *link_state,
+			      u32 speed, u32 flags, u32 fcntl);
+u32 ethtool_fec_caps_to_mcdi(u32 supported_cap, u32 ethtool_cap);
+u32 mcdi_fec_caps_to_ethtool(u32 caps, bool is_25g);
+void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa);
+bool efx_mcdi_phy_poll(struct efx_nic *efx);
+int efx_mcdi_phy_probe(struct efx_nic *efx);
+void efx_mcdi_phy_remove(struct efx_nic *efx);
+void efx_mcdi_phy_get_link_ksettings(struct efx_nic *efx, struct ethtool_link_ksettings *cmd);
+int efx_mcdi_phy_set_link_ksettings(struct efx_nic *efx, const struct ethtool_link_ksettings *cmd);
+int efx_mcdi_phy_get_fecparam(struct efx_nic *efx, struct ethtool_fecparam *fec);
+int efx_mcdi_phy_set_fecparam(struct efx_nic *efx, const struct ethtool_fecparam *fec);
+int efx_mcdi_phy_test_alive(struct efx_nic *efx);
+int efx_mcdi_port_reconfigure(struct efx_nic *efx);
+int efx_mcdi_phy_run_tests(struct efx_nic *efx, int *results, unsigned int flags);
+const char *efx_mcdi_phy_test_name(struct efx_nic *efx, unsigned int index);
+int efx_mcdi_phy_get_module_eeprom(struct efx_nic *efx, struct ethtool_eeprom *ee, u8 *data);
+int efx_mcdi_phy_get_module_info(struct efx_nic *efx, struct ethtool_modinfo *modinfo);
+int efx_mcdi_set_mac(struct efx_nic *efx);
+int efx_mcdi_set_mtu(struct efx_nic *efx);
+int efx_mcdi_mac_init_stats(struct efx_nic *efx);
+void efx_mcdi_mac_fini_stats(struct efx_nic *efx);
+int efx_mcdi_port_get_number(struct efx_nic *efx);
+void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev);
+
+#endif
diff --git a/drivers/net/ethernet/sfc/siena/mtd.c b/drivers/net/ethernet/sfc/siena/mtd.c
new file mode 100644
index 000000000000..273c08e5455f
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/mtd.c
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2013 Solarflare Communications Inc.
+ */
+
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/slab.h>
+#include <linux/rtnetlink.h>
+
+#include "net_driver.h"
+#include "efx.h"
+
+#define to_efx_mtd_partition(mtd)				\
+	container_of(mtd, struct efx_mtd_partition, mtd)
+
+/* MTD interface */
+
+static int efx_mtd_erase(struct mtd_info *mtd, struct erase_info *erase)
+{
+	struct efx_nic *efx = mtd->priv;
+
+	return efx->type->mtd_erase(mtd, erase->addr, erase->len);
+}
+
+static void efx_mtd_sync(struct mtd_info *mtd)
+{
+	struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
+	struct efx_nic *efx = mtd->priv;
+	int rc;
+
+	rc = efx->type->mtd_sync(mtd);
+	if (rc)
+		pr_err("%s: %s sync failed (%d)\n",
+		       part->name, part->dev_type_name, rc);
+}
+
+static void efx_mtd_remove_partition(struct efx_mtd_partition *part)
+{
+	int rc;
+
+	for (;;) {
+		rc = mtd_device_unregister(&part->mtd);
+		if (rc != -EBUSY)
+			break;
+		ssleep(1);
+	}
+	WARN_ON(rc);
+	list_del(&part->node);
+}
+
+int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts,
+		size_t n_parts, size_t sizeof_part)
+{
+	struct efx_mtd_partition *part;
+	size_t i;
+
+	for (i = 0; i < n_parts; i++) {
+		part = (struct efx_mtd_partition *)((char *)parts +
+						    i * sizeof_part);
+
+		part->mtd.writesize = 1;
+
+		if (!(part->mtd.flags & MTD_NO_ERASE))
+			part->mtd.flags |= MTD_WRITEABLE;
+
+		part->mtd.owner = THIS_MODULE;
+		part->mtd.priv = efx;
+		part->mtd.name = part->name;
+		part->mtd._erase = efx_mtd_erase;
+		part->mtd._read = efx->type->mtd_read;
+		part->mtd._write = efx->type->mtd_write;
+		part->mtd._sync = efx_mtd_sync;
+
+		efx->type->mtd_rename(part);
+
+		if (mtd_device_register(&part->mtd, NULL, 0))
+			goto fail;
+
+		/* Add to list in order - efx_mtd_remove() depends on this */
+		list_add_tail(&part->node, &efx->mtd_list);
+	}
+
+	return 0;
+
+fail:
+	while (i--) {
+		part = (struct efx_mtd_partition *)((char *)parts +
+						    i * sizeof_part);
+		efx_mtd_remove_partition(part);
+	}
+	/* Failure is unlikely here, but probably means we're out of memory */
+	return -ENOMEM;
+}
+
+void efx_mtd_remove(struct efx_nic *efx)
+{
+	struct efx_mtd_partition *parts, *part, *next;
+
+	WARN_ON(efx_dev_registered(efx));
+
+	if (list_empty(&efx->mtd_list))
+		return;
+
+	parts = list_first_entry(&efx->mtd_list, struct efx_mtd_partition,
+				 node);
+
+	list_for_each_entry_safe(part, next, &efx->mtd_list, node)
+		efx_mtd_remove_partition(part);
+
+	kfree(parts);
+}
+
+void efx_mtd_rename(struct efx_nic *efx)
+{
+	struct efx_mtd_partition *part;
+
+	ASSERT_RTNL();
+
+	list_for_each_entry(part, &efx->mtd_list, node)
+		efx->type->mtd_rename(part);
+}
diff --git a/drivers/net/ethernet/sfc/siena/net_driver.h b/drivers/net/ethernet/sfc/siena/net_driver.h
new file mode 100644
index 000000000000..318db906a154
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/net_driver.h
@@ -0,0 +1,1716 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2005-2013 Solarflare Communications Inc.
+ */
+
+/* Common definitions for all Efx net driver code */
+
+#ifndef EFX_NET_DRIVER_H
+#define EFX_NET_DRIVER_H
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/timer.h>
+#include <linux/mdio.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/highmem.h>
+#include <linux/workqueue.h>
+#include <linux/mutex.h>
+#include <linux/rwsem.h>
+#include <linux/vmalloc.h>
+#include <linux/mtd/mtd.h>
+#include <net/busy_poll.h>
+#include <net/xdp.h>
+
+#include "enum.h"
+#include "bitfield.h"
+#include "filter.h"
+
+/**************************************************************************
+ *
+ * Build definitions
+ *
+ **************************************************************************/
+
+#ifdef DEBUG
+#define EFX_WARN_ON_ONCE_PARANOID(x) WARN_ON_ONCE(x)
+#define EFX_WARN_ON_PARANOID(x) WARN_ON(x)
+#else
+#define EFX_WARN_ON_ONCE_PARANOID(x) do {} while (0)
+#define EFX_WARN_ON_PARANOID(x) do {} while (0)
+#endif
+
+/**************************************************************************
+ *
+ * Efx data structures
+ *
+ **************************************************************************/
+
+#define EFX_MAX_CHANNELS 32U
+#define EFX_MAX_RX_QUEUES EFX_MAX_CHANNELS
+#define EFX_EXTRA_CHANNEL_IOV	0
+#define EFX_EXTRA_CHANNEL_PTP	1
+#define EFX_MAX_EXTRA_CHANNELS	2U
+
+/* Checksum generation is a per-queue option in hardware, so each
+ * queue visible to the networking core is backed by two hardware TX
+ * queues. */
+#define EFX_MAX_TX_TC		2
+#define EFX_MAX_CORE_TX_QUEUES	(EFX_MAX_TX_TC * EFX_MAX_CHANNELS)
+#define EFX_TXQ_TYPE_OUTER_CSUM	1	/* Outer checksum offload */
+#define EFX_TXQ_TYPE_INNER_CSUM	2	/* Inner checksum offload */
+#define EFX_TXQ_TYPE_HIGHPRI	4	/* High-priority (for TC) */
+#define EFX_TXQ_TYPES		8
+/* HIGHPRI is Siena-only, and INNER_CSUM is EF10, so no need for both */
+#define EFX_MAX_TXQ_PER_CHANNEL	4
+#define EFX_MAX_TX_QUEUES	(EFX_MAX_TXQ_PER_CHANNEL * EFX_MAX_CHANNELS)
+
+/* Maximum possible MTU the driver supports */
+#define EFX_MAX_MTU (9 * 1024)
+
+/* Minimum MTU, from RFC791 (IP) */
+#define EFX_MIN_MTU 68
+
+/* Maximum total header length for TSOv2 */
+#define EFX_TSO2_MAX_HDRLEN	208
+
+/* Size of an RX scatter buffer.  Small enough to pack 2 into a 4K page,
+ * and should be a multiple of the cache line size.
+ */
+#define EFX_RX_USR_BUF_SIZE	(2048 - 256)
+
+/* If possible, we should ensure cache line alignment at start and end
+ * of every buffer.  Otherwise, we just need to ensure 4-byte
+ * alignment of the network header.
+ */
+#if NET_IP_ALIGN == 0
+#define EFX_RX_BUF_ALIGNMENT	L1_CACHE_BYTES
+#else
+#define EFX_RX_BUF_ALIGNMENT	4
+#endif
+
+/* Non-standard XDP_PACKET_HEADROOM and tailroom to satisfy XDP_REDIRECT and
+ * still fit two standard MTU size packets into a single 4K page.
+ */
+#define EFX_XDP_HEADROOM	128
+#define EFX_XDP_TAILROOM	SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
+
+/* Forward declare Precision Time Protocol (PTP) support structure. */
+struct efx_ptp_data;
+struct hwtstamp_config;
+
+struct efx_self_tests;
+
+/**
+ * struct efx_buffer - A general-purpose DMA buffer
+ * @addr: host base address of the buffer
+ * @dma_addr: DMA base address of the buffer
+ * @len: Buffer length, in bytes
+ *
+ * The NIC uses these buffers for its interrupt status registers and
+ * MAC stats dumps.
+ */
+struct efx_buffer {
+	void *addr;
+	dma_addr_t dma_addr;
+	unsigned int len;
+};
+
+/**
+ * struct efx_special_buffer - DMA buffer entered into buffer table
+ * @buf: Standard &struct efx_buffer
+ * @index: Buffer index within controller;s buffer table
+ * @entries: Number of buffer table entries
+ *
+ * The NIC has a buffer table that maps buffers of size %EFX_BUF_SIZE.
+ * Event and descriptor rings are addressed via one or more buffer
+ * table entries (and so can be physically non-contiguous, although we
+ * currently do not take advantage of that).  On Falcon and Siena we
+ * have to take care of allocating and initialising the entries
+ * ourselves.  On later hardware this is managed by the firmware and
+ * @index and @entries are left as 0.
+ */
+struct efx_special_buffer {
+	struct efx_buffer buf;
+	unsigned int index;
+	unsigned int entries;
+};
+
+/**
+ * struct efx_tx_buffer - buffer state for a TX descriptor
+ * @skb: When @flags & %EFX_TX_BUF_SKB, the associated socket buffer to be
+ *	freed when descriptor completes
+ * @xdpf: When @flags & %EFX_TX_BUF_XDP, the XDP frame information; its @data
+ *	member is the associated buffer to drop a page reference on.
+ * @option: When @flags & %EFX_TX_BUF_OPTION, an EF10-specific option
+ *	descriptor.
+ * @dma_addr: DMA address of the fragment.
+ * @flags: Flags for allocation and DMA mapping type
+ * @len: Length of this fragment.
+ *	This field is zero when the queue slot is empty.
+ * @unmap_len: Length of this fragment to unmap
+ * @dma_offset: Offset of @dma_addr from the address of the backing DMA mapping.
+ * Only valid if @unmap_len != 0.
+ */
+struct efx_tx_buffer {
+	union {
+		const struct sk_buff *skb;
+		struct xdp_frame *xdpf;
+	};
+	union {
+		efx_qword_t option;    /* EF10 */
+		dma_addr_t dma_addr;
+	};
+	unsigned short flags;
+	unsigned short len;
+	unsigned short unmap_len;
+	unsigned short dma_offset;
+};
+#define EFX_TX_BUF_CONT		1	/* not last descriptor of packet */
+#define EFX_TX_BUF_SKB		2	/* buffer is last part of skb */
+#define EFX_TX_BUF_MAP_SINGLE	8	/* buffer was mapped with dma_map_single() */
+#define EFX_TX_BUF_OPTION	0x10	/* empty buffer for option descriptor */
+#define EFX_TX_BUF_XDP		0x20	/* buffer was sent with XDP */
+#define EFX_TX_BUF_TSO_V3	0x40	/* empty buffer for a TSO_V3 descriptor */
+
+/**
+ * struct efx_tx_queue - An Efx TX queue
+ *
+ * This is a ring buffer of TX fragments.
+ * Since the TX completion path always executes on the same
+ * CPU and the xmit path can operate on different CPUs,
+ * performance is increased by ensuring that the completion
+ * path and the xmit path operate on different cache lines.
+ * This is particularly important if the xmit path is always
+ * executing on one CPU which is different from the completion
+ * path.  There is also a cache line for members which are
+ * read but not written on the fast path.
+ *
+ * @efx: The associated Efx NIC
+ * @queue: DMA queue number
+ * @label: Label for TX completion events.
+ *	Is our index within @channel->tx_queue array.
+ * @type: configuration type of this TX queue.  A bitmask of %EFX_TXQ_TYPE_* flags.
+ * @tso_version: Version of TSO in use for this queue.
+ * @tso_encap: Is encapsulated TSO supported? Supported in TSOv2 on 8000 series.
+ * @channel: The associated channel
+ * @core_txq: The networking core TX queue structure
+ * @buffer: The software buffer ring
+ * @cb_page: Array of pages of copy buffers.  Carved up according to
+ *	%EFX_TX_CB_ORDER into %EFX_TX_CB_SIZE-sized chunks.
+ * @txd: The hardware descriptor ring
+ * @ptr_mask: The size of the ring minus 1.
+ * @piobuf: PIO buffer region for this TX queue (shared with its partner).
+ *	Size of the region is efx_piobuf_size.
+ * @piobuf_offset: Buffer offset to be specified in PIO descriptors
+ * @initialised: Has hardware queue been initialised?
+ * @timestamping: Is timestamping enabled for this channel?
+ * @xdp_tx: Is this an XDP tx queue?
+ * @read_count: Current read pointer.
+ *	This is the number of buffers that have been removed from both rings.
+ * @old_write_count: The value of @write_count when last checked.
+ *	This is here for performance reasons.  The xmit path will
+ *	only get the up-to-date value of @write_count if this
+ *	variable indicates that the queue is empty.  This is to
+ *	avoid cache-line ping-pong between the xmit path and the
+ *	completion path.
+ * @merge_events: Number of TX merged completion events
+ * @completed_timestamp_major: Top part of the most recent tx timestamp.
+ * @completed_timestamp_minor: Low part of the most recent tx timestamp.
+ * @insert_count: Current insert pointer
+ *	This is the number of buffers that have been added to the
+ *	software ring.
+ * @write_count: Current write pointer
+ *	This is the number of buffers that have been added to the
+ *	hardware ring.
+ * @packet_write_count: Completable write pointer
+ *	This is the write pointer of the last packet written.
+ *	Normally this will equal @write_count, but as option descriptors
+ *	don't produce completion events, they won't update this.
+ *	Filled in iff @efx->type->option_descriptors; only used for PIO.
+ *	Thus, this is written and used on EF10, and neither on farch.
+ * @old_read_count: The value of read_count when last checked.
+ *	This is here for performance reasons.  The xmit path will
+ *	only get the up-to-date value of read_count if this
+ *	variable indicates that the queue is full.  This is to
+ *	avoid cache-line ping-pong between the xmit path and the
+ *	completion path.
+ * @tso_bursts: Number of times TSO xmit invoked by kernel
+ * @tso_long_headers: Number of packets with headers too long for standard
+ *	blocks
+ * @tso_packets: Number of packets via the TSO xmit path
+ * @tso_fallbacks: Number of times TSO fallback used
+ * @pushes: Number of times the TX push feature has been used
+ * @pio_packets: Number of times the TX PIO feature has been used
+ * @xmit_pending: Are any packets waiting to be pushed to the NIC
+ * @cb_packets: Number of times the TX copybreak feature has been used
+ * @notify_count: Count of notified descriptors to the NIC
+ * @empty_read_count: If the completion path has seen the queue as empty
+ *	and the transmission path has not yet checked this, the value of
+ *	@read_count bitwise-added to %EFX_EMPTY_COUNT_VALID; otherwise 0.
+ */
+struct efx_tx_queue {
+	/* Members which don't change on the fast path */
+	struct efx_nic *efx ____cacheline_aligned_in_smp;
+	unsigned int queue;
+	unsigned int label;
+	unsigned int type;
+	unsigned int tso_version;
+	bool tso_encap;
+	struct efx_channel *channel;
+	struct netdev_queue *core_txq;
+	struct efx_tx_buffer *buffer;
+	struct efx_buffer *cb_page;
+	struct efx_special_buffer txd;
+	unsigned int ptr_mask;
+	void __iomem *piobuf;
+	unsigned int piobuf_offset;
+	bool initialised;
+	bool timestamping;
+	bool xdp_tx;
+
+	/* Members used mainly on the completion path */
+	unsigned int read_count ____cacheline_aligned_in_smp;
+	unsigned int old_write_count;
+	unsigned int merge_events;
+	unsigned int bytes_compl;
+	unsigned int pkts_compl;
+	u32 completed_timestamp_major;
+	u32 completed_timestamp_minor;
+
+	/* Members used only on the xmit path */
+	unsigned int insert_count ____cacheline_aligned_in_smp;
+	unsigned int write_count;
+	unsigned int packet_write_count;
+	unsigned int old_read_count;
+	unsigned int tso_bursts;
+	unsigned int tso_long_headers;
+	unsigned int tso_packets;
+	unsigned int tso_fallbacks;
+	unsigned int pushes;
+	unsigned int pio_packets;
+	bool xmit_pending;
+	unsigned int cb_packets;
+	unsigned int notify_count;
+	/* Statistics to supplement MAC stats */
+	unsigned long tx_packets;
+
+	/* Members shared between paths and sometimes updated */
+	unsigned int empty_read_count ____cacheline_aligned_in_smp;
+#define EFX_EMPTY_COUNT_VALID 0x80000000
+	atomic_t flush_outstanding;
+};
+
+#define EFX_TX_CB_ORDER	7
+#define EFX_TX_CB_SIZE	(1 << EFX_TX_CB_ORDER) - NET_IP_ALIGN
+
+/**
+ * struct efx_rx_buffer - An Efx RX data buffer
+ * @dma_addr: DMA base address of the buffer
+ * @page: The associated page buffer.
+ *	Will be %NULL if the buffer slot is currently free.
+ * @page_offset: If pending: offset in @page of DMA base address.
+ *	If completed: offset in @page of Ethernet header.
+ * @len: If pending: length for DMA descriptor.
+ *	If completed: received length, excluding hash prefix.
+ * @flags: Flags for buffer and packet state.  These are only set on the
+ *	first buffer of a scattered packet.
+ */
+struct efx_rx_buffer {
+	dma_addr_t dma_addr;
+	struct page *page;
+	u16 page_offset;
+	u16 len;
+	u16 flags;
+};
+#define EFX_RX_BUF_LAST_IN_PAGE	0x0001
+#define EFX_RX_PKT_CSUMMED	0x0002
+#define EFX_RX_PKT_DISCARD	0x0004
+#define EFX_RX_PKT_TCP		0x0040
+#define EFX_RX_PKT_PREFIX_LEN	0x0080	/* length is in prefix only */
+#define EFX_RX_PKT_CSUM_LEVEL	0x0200
+
+/**
+ * struct efx_rx_page_state - Page-based rx buffer state
+ *
+ * Inserted at the start of every page allocated for receive buffers.
+ * Used to facilitate sharing dma mappings between recycled rx buffers
+ * and those passed up to the kernel.
+ *
+ * @dma_addr: The dma address of this page.
+ */
+struct efx_rx_page_state {
+	dma_addr_t dma_addr;
+
+	unsigned int __pad[] ____cacheline_aligned;
+};
+
+/**
+ * struct efx_rx_queue - An Efx RX queue
+ * @efx: The associated Efx NIC
+ * @core_index:  Index of network core RX queue.  Will be >= 0 iff this
+ *	is associated with a real RX queue.
+ * @buffer: The software buffer ring
+ * @rxd: The hardware descriptor ring
+ * @ptr_mask: The size of the ring minus 1.
+ * @refill_enabled: Enable refill whenever fill level is low
+ * @flush_pending: Set when a RX flush is pending. Has the same lifetime as
+ *	@rxq_flush_pending.
+ * @added_count: Number of buffers added to the receive queue.
+ * @notified_count: Number of buffers given to NIC (<= @added_count).
+ * @removed_count: Number of buffers removed from the receive queue.
+ * @scatter_n: Used by NIC specific receive code.
+ * @scatter_len: Used by NIC specific receive code.
+ * @page_ring: The ring to store DMA mapped pages for reuse.
+ * @page_add: Counter to calculate the write pointer for the recycle ring.
+ * @page_remove: Counter to calculate the read pointer for the recycle ring.
+ * @page_recycle_count: The number of pages that have been recycled.
+ * @page_recycle_failed: The number of pages that couldn't be recycled because
+ *      the kernel still held a reference to them.
+ * @page_recycle_full: The number of pages that were released because the
+ *      recycle ring was full.
+ * @page_ptr_mask: The number of pages in the RX recycle ring minus 1.
+ * @max_fill: RX descriptor maximum fill level (<= ring size)
+ * @fast_fill_trigger: RX descriptor fill level that will trigger a fast fill
+ *	(<= @max_fill)
+ * @min_fill: RX descriptor minimum non-zero fill level.
+ *	This records the minimum fill level observed when a ring
+ *	refill was triggered.
+ * @recycle_count: RX buffer recycle counter.
+ * @slow_fill: Timer used to defer efx_nic_generate_fill_event().
+ * @xdp_rxq_info: XDP specific RX queue information.
+ * @xdp_rxq_info_valid: Is xdp_rxq_info valid data?.
+ */
+struct efx_rx_queue {
+	struct efx_nic *efx;
+	int core_index;
+	struct efx_rx_buffer *buffer;
+	struct efx_special_buffer rxd;
+	unsigned int ptr_mask;
+	bool refill_enabled;
+	bool flush_pending;
+
+	unsigned int added_count;
+	unsigned int notified_count;
+	unsigned int removed_count;
+	unsigned int scatter_n;
+	unsigned int scatter_len;
+	struct page **page_ring;
+	unsigned int page_add;
+	unsigned int page_remove;
+	unsigned int page_recycle_count;
+	unsigned int page_recycle_failed;
+	unsigned int page_recycle_full;
+	unsigned int page_ptr_mask;
+	unsigned int max_fill;
+	unsigned int fast_fill_trigger;
+	unsigned int min_fill;
+	unsigned int min_overfill;
+	unsigned int recycle_count;
+	struct timer_list slow_fill;
+	unsigned int slow_fill_count;
+	/* Statistics to supplement MAC stats */
+	unsigned long rx_packets;
+	struct xdp_rxq_info xdp_rxq_info;
+	bool xdp_rxq_info_valid;
+};
+
+enum efx_sync_events_state {
+	SYNC_EVENTS_DISABLED = 0,
+	SYNC_EVENTS_QUIESCENT,
+	SYNC_EVENTS_REQUESTED,
+	SYNC_EVENTS_VALID,
+};
+
+/**
+ * struct efx_channel - An Efx channel
+ *
+ * A channel comprises an event queue, at least one TX queue, at least
+ * one RX queue, and an associated tasklet for processing the event
+ * queue.
+ *
+ * @efx: Associated Efx NIC
+ * @channel: Channel instance number
+ * @type: Channel type definition
+ * @eventq_init: Event queue initialised flag
+ * @enabled: Channel enabled indicator
+ * @irq: IRQ number (MSI and MSI-X only)
+ * @irq_moderation_us: IRQ moderation value (in microseconds)
+ * @napi_dev: Net device used with NAPI
+ * @napi_str: NAPI control structure
+ * @state: state for NAPI vs busy polling
+ * @state_lock: lock protecting @state
+ * @eventq: Event queue buffer
+ * @eventq_mask: Event queue pointer mask
+ * @eventq_read_ptr: Event queue read pointer
+ * @event_test_cpu: Last CPU to handle interrupt or test event for this channel
+ * @irq_count: Number of IRQs since last adaptive moderation decision
+ * @irq_mod_score: IRQ moderation score
+ * @rfs_filter_count: number of accelerated RFS filters currently in place;
+ *	equals the count of @rps_flow_id slots filled
+ * @rfs_last_expiry: value of jiffies last time some accelerated RFS filters
+ *	were checked for expiry
+ * @rfs_expire_index: next accelerated RFS filter ID to check for expiry
+ * @n_rfs_succeeded: number of successful accelerated RFS filter insertions
+ * @n_rfs_failed: number of failed accelerated RFS filter insertions
+ * @filter_work: Work item for efx_filter_rfs_expire()
+ * @rps_flow_id: Flow IDs of filters allocated for accelerated RFS,
+ *      indexed by filter ID
+ * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors
+ * @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors
+ * @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors
+ * @n_rx_mcast_mismatch: Count of unmatched multicast frames
+ * @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors
+ * @n_rx_overlength: Count of RX_OVERLENGTH errors
+ * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun
+ * @n_rx_nodesc_trunc: Number of RX packets truncated and then dropped due to
+ *	lack of descriptors
+ * @n_rx_merge_events: Number of RX merged completion events
+ * @n_rx_merge_packets: Number of RX packets completed by merged events
+ * @n_rx_xdp_drops: Count of RX packets intentionally dropped due to XDP
+ * @n_rx_xdp_bad_drops: Count of RX packets dropped due to XDP errors
+ * @n_rx_xdp_tx: Count of RX packets retransmitted due to XDP
+ * @n_rx_xdp_redirect: Count of RX packets redirected to a different NIC by XDP
+ * @rx_pkt_n_frags: Number of fragments in next packet to be delivered by
+ *	__efx_rx_packet(), or zero if there is none
+ * @rx_pkt_index: Ring index of first buffer for next packet to be delivered
+ *	by __efx_rx_packet(), if @rx_pkt_n_frags != 0
+ * @rx_list: list of SKBs from current RX, awaiting processing
+ * @rx_queue: RX queue for this channel
+ * @tx_queue: TX queues for this channel
+ * @tx_queue_by_type: pointers into @tx_queue, or %NULL, indexed by txq type
+ * @sync_events_state: Current state of sync events on this channel
+ * @sync_timestamp_major: Major part of the last ptp sync event
+ * @sync_timestamp_minor: Minor part of the last ptp sync event
+ */
+struct efx_channel {
+	struct efx_nic *efx;
+	int channel;
+	const struct efx_channel_type *type;
+	bool eventq_init;
+	bool enabled;
+	int irq;
+	unsigned int irq_moderation_us;
+	struct net_device *napi_dev;
+	struct napi_struct napi_str;
+#ifdef CONFIG_NET_RX_BUSY_POLL
+	unsigned long busy_poll_state;
+#endif
+	struct efx_special_buffer eventq;
+	unsigned int eventq_mask;
+	unsigned int eventq_read_ptr;
+	int event_test_cpu;
+
+	unsigned int irq_count;
+	unsigned int irq_mod_score;
+#ifdef CONFIG_RFS_ACCEL
+	unsigned int rfs_filter_count;
+	unsigned int rfs_last_expiry;
+	unsigned int rfs_expire_index;
+	unsigned int n_rfs_succeeded;
+	unsigned int n_rfs_failed;
+	struct delayed_work filter_work;
+#define RPS_FLOW_ID_INVALID 0xFFFFFFFF
+	u32 *rps_flow_id;
+#endif
+
+	unsigned int n_rx_tobe_disc;
+	unsigned int n_rx_ip_hdr_chksum_err;
+	unsigned int n_rx_tcp_udp_chksum_err;
+	unsigned int n_rx_outer_ip_hdr_chksum_err;
+	unsigned int n_rx_outer_tcp_udp_chksum_err;
+	unsigned int n_rx_inner_ip_hdr_chksum_err;
+	unsigned int n_rx_inner_tcp_udp_chksum_err;
+	unsigned int n_rx_eth_crc_err;
+	unsigned int n_rx_mcast_mismatch;
+	unsigned int n_rx_frm_trunc;
+	unsigned int n_rx_overlength;
+	unsigned int n_skbuff_leaks;
+	unsigned int n_rx_nodesc_trunc;
+	unsigned int n_rx_merge_events;
+	unsigned int n_rx_merge_packets;
+	unsigned int n_rx_xdp_drops;
+	unsigned int n_rx_xdp_bad_drops;
+	unsigned int n_rx_xdp_tx;
+	unsigned int n_rx_xdp_redirect;
+
+	unsigned int rx_pkt_n_frags;
+	unsigned int rx_pkt_index;
+
+	struct list_head *rx_list;
+
+	struct efx_rx_queue rx_queue;
+	struct efx_tx_queue tx_queue[EFX_MAX_TXQ_PER_CHANNEL];
+	struct efx_tx_queue *tx_queue_by_type[EFX_TXQ_TYPES];
+
+	enum efx_sync_events_state sync_events_state;
+	u32 sync_timestamp_major;
+	u32 sync_timestamp_minor;
+};
+
+/**
+ * struct efx_msi_context - Context for each MSI
+ * @efx: The associated NIC
+ * @index: Index of the channel/IRQ
+ * @name: Name of the channel/IRQ
+ *
+ * Unlike &struct efx_channel, this is never reallocated and is always
+ * safe for the IRQ handler to access.
+ */
+struct efx_msi_context {
+	struct efx_nic *efx;
+	unsigned int index;
+	char name[IFNAMSIZ + 6];
+};
+
+/**
+ * struct efx_channel_type - distinguishes traffic and extra channels
+ * @handle_no_channel: Handle failure to allocate an extra channel
+ * @pre_probe: Set up extra state prior to initialisation
+ * @post_remove: Tear down extra state after finalisation, if allocated.
+ *	May be called on channels that have not been probed.
+ * @get_name: Generate the channel's name (used for its IRQ handler)
+ * @copy: Copy the channel state prior to reallocation.  May be %NULL if
+ *	reallocation is not supported.
+ * @receive_skb: Handle an skb ready to be passed to netif_receive_skb()
+ * @want_txqs: Determine whether this channel should have TX queues
+ *	created.  If %NULL, TX queues are not created.
+ * @keep_eventq: Flag for whether event queue should be kept initialised
+ *	while the device is stopped
+ * @want_pio: Flag for whether PIO buffers should be linked to this
+ *	channel's TX queues.
+ */
+struct efx_channel_type {
+	void (*handle_no_channel)(struct efx_nic *);
+	int (*pre_probe)(struct efx_channel *);
+	void (*post_remove)(struct efx_channel *);
+	void (*get_name)(struct efx_channel *, char *buf, size_t len);
+	struct efx_channel *(*copy)(const struct efx_channel *);
+	bool (*receive_skb)(struct efx_channel *, struct sk_buff *);
+	bool (*want_txqs)(struct efx_channel *);
+	bool keep_eventq;
+	bool want_pio;
+};
+
+enum efx_led_mode {
+	EFX_LED_OFF	= 0,
+	EFX_LED_ON	= 1,
+	EFX_LED_DEFAULT	= 2
+};
+
+#define STRING_TABLE_LOOKUP(val, member) \
+	((val) < member ## _max) ? member ## _names[val] : "(invalid)"
+
+extern const char *const efx_loopback_mode_names[];
+extern const unsigned int efx_loopback_mode_max;
+#define LOOPBACK_MODE(efx) \
+	STRING_TABLE_LOOKUP((efx)->loopback_mode, efx_loopback_mode)
+
+enum efx_int_mode {
+	/* Be careful if altering to correct macro below */
+	EFX_INT_MODE_MSIX = 0,
+	EFX_INT_MODE_MSI = 1,
+	EFX_INT_MODE_LEGACY = 2,
+	EFX_INT_MODE_MAX	/* Insert any new items before this */
+};
+#define EFX_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EFX_INT_MODE_MSI)
+
+enum nic_state {
+	STATE_UNINIT = 0,	/* device being probed/removed or is frozen */
+	STATE_READY = 1,	/* hardware ready and netdev registered */
+	STATE_DISABLED = 2,	/* device disabled due to hardware errors */
+	STATE_RECOVERY = 3,	/* device recovering from PCI error */
+};
+
+/* Forward declaration */
+struct efx_nic;
+
+/* Pseudo bit-mask flow control field */
+#define EFX_FC_RX	FLOW_CTRL_RX
+#define EFX_FC_TX	FLOW_CTRL_TX
+#define EFX_FC_AUTO	4
+
+/**
+ * struct efx_link_state - Current state of the link
+ * @up: Link is up
+ * @fd: Link is full-duplex
+ * @fc: Actual flow control flags
+ * @speed: Link speed (Mbps)
+ */
+struct efx_link_state {
+	bool up;
+	bool fd;
+	u8 fc;
+	unsigned int speed;
+};
+
+static inline bool efx_link_state_equal(const struct efx_link_state *left,
+					const struct efx_link_state *right)
+{
+	return left->up == right->up && left->fd == right->fd &&
+		left->fc == right->fc && left->speed == right->speed;
+}
+
+/**
+ * enum efx_phy_mode - PHY operating mode flags
+ * @PHY_MODE_NORMAL: on and should pass traffic
+ * @PHY_MODE_TX_DISABLED: on with TX disabled
+ * @PHY_MODE_LOW_POWER: set to low power through MDIO
+ * @PHY_MODE_OFF: switched off through external control
+ * @PHY_MODE_SPECIAL: on but will not pass traffic
+ */
+enum efx_phy_mode {
+	PHY_MODE_NORMAL		= 0,
+	PHY_MODE_TX_DISABLED	= 1,
+	PHY_MODE_LOW_POWER	= 2,
+	PHY_MODE_OFF		= 4,
+	PHY_MODE_SPECIAL	= 8,
+};
+
+static inline bool efx_phy_mode_disabled(enum efx_phy_mode mode)
+{
+	return !!(mode & ~PHY_MODE_TX_DISABLED);
+}
+
+/**
+ * struct efx_hw_stat_desc - Description of a hardware statistic
+ * @name: Name of the statistic as visible through ethtool, or %NULL if
+ *	it should not be exposed
+ * @dma_width: Width in bits (0 for non-DMA statistics)
+ * @offset: Offset within stats (ignored for non-DMA statistics)
+ */
+struct efx_hw_stat_desc {
+	const char *name;
+	u16 dma_width;
+	u16 offset;
+};
+
+/* Number of bits used in a multicast filter hash address */
+#define EFX_MCAST_HASH_BITS 8
+
+/* Number of (single-bit) entries in a multicast filter hash */
+#define EFX_MCAST_HASH_ENTRIES (1 << EFX_MCAST_HASH_BITS)
+
+/* An Efx multicast filter hash */
+union efx_multicast_hash {
+	u8 byte[EFX_MCAST_HASH_ENTRIES / 8];
+	efx_oword_t oword[EFX_MCAST_HASH_ENTRIES / sizeof(efx_oword_t) / 8];
+};
+
+struct vfdi_status;
+
+/* The reserved RSS context value */
+#define EFX_MCDI_RSS_CONTEXT_INVALID	0xffffffff
+/**
+ * struct efx_rss_context - A user-defined RSS context for filtering
+ * @list: node of linked list on which this struct is stored
+ * @context_id: the RSS_CONTEXT_ID returned by MC firmware, or
+ *	%EFX_MCDI_RSS_CONTEXT_INVALID if this context is not present on the NIC.
+ *	For Siena, 0 if RSS is active, else %EFX_MCDI_RSS_CONTEXT_INVALID.
+ * @user_id: the rss_context ID exposed to userspace over ethtool.
+ * @rx_hash_udp_4tuple: UDP 4-tuple hashing enabled
+ * @rx_hash_key: Toeplitz hash key for this RSS context
+ * @indir_table: Indirection table for this RSS context
+ */
+struct efx_rss_context {
+	struct list_head list;
+	u32 context_id;
+	u32 user_id;
+	bool rx_hash_udp_4tuple;
+	u8 rx_hash_key[40];
+	u32 rx_indir_table[128];
+};
+
+#ifdef CONFIG_RFS_ACCEL
+/* Order of these is important, since filter_id >= %EFX_ARFS_FILTER_ID_PENDING
+ * is used to test if filter does or will exist.
+ */
+#define EFX_ARFS_FILTER_ID_PENDING	-1
+#define EFX_ARFS_FILTER_ID_ERROR	-2
+#define EFX_ARFS_FILTER_ID_REMOVING	-3
+/**
+ * struct efx_arfs_rule - record of an ARFS filter and its IDs
+ * @node: linkage into hash table
+ * @spec: details of the filter (used as key for hash table).  Use efx->type to
+ *	determine which member to use.
+ * @rxq_index: channel to which the filter will steer traffic.
+ * @arfs_id: filter ID which was returned to ARFS
+ * @filter_id: index in software filter table.  May be
+ *	%EFX_ARFS_FILTER_ID_PENDING if filter was not inserted yet,
+ *	%EFX_ARFS_FILTER_ID_ERROR if filter insertion failed, or
+ *	%EFX_ARFS_FILTER_ID_REMOVING if expiry is currently removing the filter.
+ */
+struct efx_arfs_rule {
+	struct hlist_node node;
+	struct efx_filter_spec spec;
+	u16 rxq_index;
+	u16 arfs_id;
+	s32 filter_id;
+};
+
+/* Size chosen so that the table is one page (4kB) */
+#define EFX_ARFS_HASH_TABLE_SIZE	512
+
+/**
+ * struct efx_async_filter_insertion - Request to asynchronously insert a filter
+ * @net_dev: Reference to the netdevice
+ * @spec: The filter to insert
+ * @work: Workitem for this request
+ * @rxq_index: Identifies the channel for which this request was made
+ * @flow_id: Identifies the kernel-side flow for which this request was made
+ */
+struct efx_async_filter_insertion {
+	struct net_device *net_dev;
+	struct efx_filter_spec spec;
+	struct work_struct work;
+	u16 rxq_index;
+	u32 flow_id;
+};
+
+/* Maximum number of ARFS workitems that may be in flight on an efx_nic */
+#define EFX_RPS_MAX_IN_FLIGHT	8
+#endif /* CONFIG_RFS_ACCEL */
+
+enum efx_xdp_tx_queues_mode {
+	EFX_XDP_TX_QUEUES_DEDICATED,	/* one queue per core, locking not needed */
+	EFX_XDP_TX_QUEUES_SHARED,	/* each queue used by more than 1 core */
+	EFX_XDP_TX_QUEUES_BORROWED	/* queues borrowed from net stack */
+};
+
+/**
+ * struct efx_nic - an Efx NIC
+ * @name: Device name (net device name or bus id before net device registered)
+ * @pci_dev: The PCI device
+ * @node: List node for maintaning primary/secondary function lists
+ * @primary: &struct efx_nic instance for the primary function of this
+ *	controller.  May be the same structure, and may be %NULL if no
+ *	primary function is bound.  Serialised by rtnl_lock.
+ * @secondary_list: List of &struct efx_nic instances for the secondary PCI
+ *	functions of the controller, if this is for the primary function.
+ *	Serialised by rtnl_lock.
+ * @type: Controller type attributes
+ * @legacy_irq: IRQ number
+ * @workqueue: Workqueue for port reconfigures and the HW monitor.
+ *	Work items do not hold and must not acquire RTNL.
+ * @workqueue_name: Name of workqueue
+ * @reset_work: Scheduled reset workitem
+ * @membase_phys: Memory BAR value as physical address
+ * @membase: Memory BAR value
+ * @vi_stride: step between per-VI registers / memory regions
+ * @interrupt_mode: Interrupt mode
+ * @timer_quantum_ns: Interrupt timer quantum, in nanoseconds
+ * @timer_max_ns: Interrupt timer maximum value, in nanoseconds
+ * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues
+ * @irqs_hooked: Channel interrupts are hooked
+ * @irq_rx_mod_step_us: Step size for IRQ moderation for RX event queues
+ * @irq_rx_moderation_us: IRQ moderation time for RX event queues
+ * @msg_enable: Log message enable flags
+ * @state: Device state number (%STATE_*). Serialised by the rtnl_lock.
+ * @reset_pending: Bitmask for pending resets
+ * @tx_queue: TX DMA queues
+ * @rx_queue: RX DMA queues
+ * @channel: Channels
+ * @msi_context: Context for each MSI
+ * @extra_channel_types: Types of extra (non-traffic) channels that
+ *	should be allocated for this NIC
+ * @xdp_tx_queue_count: Number of entries in %xdp_tx_queues.
+ * @xdp_tx_queues: Array of pointers to tx queues used for XDP transmit.
+ * @xdp_txq_queues_mode: XDP TX queues sharing strategy.
+ * @rxq_entries: Size of receive queues requested by user.
+ * @txq_entries: Size of transmit queues requested by user.
+ * @txq_stop_thresh: TX queue fill level at or above which we stop it.
+ * @txq_wake_thresh: TX queue fill level at or below which we wake it.
+ * @tx_dc_base: Base qword address in SRAM of TX queue descriptor caches
+ * @rx_dc_base: Base qword address in SRAM of RX queue descriptor caches
+ * @sram_lim_qw: Qword address limit of SRAM
+ * @next_buffer_table: First available buffer table id
+ * @n_channels: Number of channels in use
+ * @n_rx_channels: Number of channels used for RX (= number of RX queues)
+ * @n_tx_channels: Number of channels used for TX
+ * @n_extra_tx_channels: Number of extra channels with TX queues
+ * @tx_queues_per_channel: number of TX queues probed on each channel
+ * @n_xdp_channels: Number of channels used for XDP TX
+ * @xdp_channel_offset: Offset of zeroth channel used for XPD TX.
+ * @xdp_tx_per_channel: Max number of TX queues on an XDP TX channel.
+ * @rx_ip_align: RX DMA address offset to have IP header aligned in
+ *	in accordance with NET_IP_ALIGN
+ * @rx_dma_len: Current maximum RX DMA length
+ * @rx_buffer_order: Order (log2) of number of pages for each RX buffer
+ * @rx_buffer_truesize: Amortised allocation size of an RX buffer,
+ *	for use in sk_buff::truesize
+ * @rx_prefix_size: Size of RX prefix before packet data
+ * @rx_packet_hash_offset: Offset of RX flow hash from start of packet data
+ *	(valid only if @rx_prefix_size != 0; always negative)
+ * @rx_packet_len_offset: Offset of RX packet length from start of packet data
+ *	(valid only for NICs that set %EFX_RX_PKT_PREFIX_LEN; always negative)
+ * @rx_packet_ts_offset: Offset of timestamp from start of packet data
+ *	(valid only if channel->sync_timestamps_enabled; always negative)
+ * @rx_scatter: Scatter mode enabled for receives
+ * @rss_context: Main RSS context.  Its @list member is the head of the list of
+ *	RSS contexts created by user requests
+ * @rss_lock: Protects custom RSS context software state in @rss_context.list
+ * @vport_id: The function's vport ID, only relevant for PFs
+ * @int_error_count: Number of internal errors seen recently
+ * @int_error_expire: Time at which error count will be expired
+ * @must_realloc_vis: Flag: VIs have yet to be reallocated after MC reboot
+ * @irq_soft_enabled: Are IRQs soft-enabled? If not, IRQ handler will
+ *	acknowledge but do nothing else.
+ * @irq_status: Interrupt status buffer
+ * @irq_zero_count: Number of legacy IRQs seen with queue flags == 0
+ * @irq_level: IRQ level/index for IRQs not triggered by an event queue
+ * @selftest_work: Work item for asynchronous self-test
+ * @mtd_list: List of MTDs attached to the NIC
+ * @nic_data: Hardware dependent state
+ * @mcdi: Management-Controller-to-Driver Interface state
+ * @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode,
+ *	efx_monitor() and efx_reconfigure_port()
+ * @port_enabled: Port enabled indicator.
+ *	Serialises efx_stop_all(), efx_start_all(), efx_monitor() and
+ *	efx_mac_work() with kernel interfaces. Safe to read under any
+ *	one of the rtnl_lock, mac_lock, or netif_tx_lock, but all three must
+ *	be held to modify it.
+ * @port_initialized: Port initialized?
+ * @net_dev: Operating system network device. Consider holding the rtnl lock
+ * @fixed_features: Features which cannot be turned off
+ * @num_mac_stats: Number of MAC stats reported by firmware (MAC_STATS_NUM_STATS
+ *	field of %MC_CMD_GET_CAPABILITIES_V4 response, or %MC_CMD_MAC_NSTATS)
+ * @stats_buffer: DMA buffer for statistics
+ * @phy_type: PHY type
+ * @phy_data: PHY private data (including PHY-specific stats)
+ * @mdio: PHY MDIO interface
+ * @mdio_bus: PHY MDIO bus ID (only used by Siena)
+ * @phy_mode: PHY operating mode. Serialised by @mac_lock.
+ * @link_advertising: Autonegotiation advertising flags
+ * @fec_config: Forward Error Correction configuration flags.  For bit positions
+ *	see &enum ethtool_fec_config_bits.
+ * @link_state: Current state of the link
+ * @n_link_state_changes: Number of times the link has changed state
+ * @unicast_filter: Flag for Falcon-arch simple unicast filter.
+ *	Protected by @mac_lock.
+ * @multicast_hash: Multicast hash table for Falcon-arch.
+ *	Protected by @mac_lock.
+ * @wanted_fc: Wanted flow control flags
+ * @fc_disable: When non-zero flow control is disabled. Typically used to
+ *	ensure that network back pressure doesn't delay dma queue flushes.
+ *	Serialised by the rtnl lock.
+ * @mac_work: Work item for changing MAC promiscuity and multicast hash
+ * @loopback_mode: Loopback status
+ * @loopback_modes: Supported loopback mode bitmask
+ * @loopback_selftest: Offline self-test private state
+ * @xdp_prog: Current XDP programme for this interface
+ * @filter_sem: Filter table rw_semaphore, protects existence of @filter_state
+ * @filter_state: Architecture-dependent filter table state
+ * @rps_mutex: Protects RPS state of all channels
+ * @rps_slot_map: bitmap of in-flight entries in @rps_slot
+ * @rps_slot: array of ARFS insertion requests for efx_filter_rfs_work()
+ * @rps_hash_lock: Protects ARFS filter mapping state (@rps_hash_table and
+ *	@rps_next_id).
+ * @rps_hash_table: Mapping between ARFS filters and their various IDs
+ * @rps_next_id: next arfs_id for an ARFS filter
+ * @active_queues: Count of RX and TX queues that haven't been flushed and drained.
+ * @rxq_flush_pending: Count of number of receive queues that need to be flushed.
+ *	Decremented when the efx_flush_rx_queue() is called.
+ * @rxq_flush_outstanding: Count of number of RX flushes started but not yet
+ *	completed (either success or failure). Not used when MCDI is used to
+ *	flush receive queues.
+ * @flush_wq: wait queue used by efx_nic_flush_queues() to wait for flush completions.
+ * @vf_count: Number of VFs intended to be enabled.
+ * @vf_init_count: Number of VFs that have been fully initialised.
+ * @vi_scale: log2 number of vnics per VF.
+ * @ptp_data: PTP state data
+ * @ptp_warned: has this NIC seen and warned about unexpected PTP events?
+ * @vpd_sn: Serial number read from VPD
+ * @xdp_rxq_info_failed: Have any of the rx queues failed to initialise their
+ *      xdp_rxq_info structures?
+ * @netdev_notifier: Netdevice notifier.
+ * @mem_bar: The BAR that is mapped into membase.
+ * @reg_base: Offset from the start of the bar to the function control window.
+ * @monitor_work: Hardware monitor workitem
+ * @biu_lock: BIU (bus interface unit) lock
+ * @last_irq_cpu: Last CPU to handle a possible test interrupt.  This
+ *	field is used by efx_test_interrupts() to verify that an
+ *	interrupt has occurred.
+ * @stats_lock: Statistics update lock. Must be held when calling
+ *	efx_nic_type::{update,start,stop}_stats.
+ * @n_rx_noskb_drops: Count of RX packets dropped due to failure to allocate an skb
+ *
+ * This is stored in the private area of the &struct net_device.
+ */
+struct efx_nic {
+	/* The following fields should be written very rarely */
+
+	char name[IFNAMSIZ];
+	struct list_head node;
+	struct efx_nic *primary;
+	struct list_head secondary_list;
+	struct pci_dev *pci_dev;
+	unsigned int port_num;
+	const struct efx_nic_type *type;
+	int legacy_irq;
+	bool eeh_disabled_legacy_irq;
+	struct workqueue_struct *workqueue;
+	char workqueue_name[16];
+	struct work_struct reset_work;
+	resource_size_t membase_phys;
+	void __iomem *membase;
+
+	unsigned int vi_stride;
+
+	enum efx_int_mode interrupt_mode;
+	unsigned int timer_quantum_ns;
+	unsigned int timer_max_ns;
+	bool irq_rx_adaptive;
+	bool irqs_hooked;
+	unsigned int irq_mod_step_us;
+	unsigned int irq_rx_moderation_us;
+	u32 msg_enable;
+
+	enum nic_state state;
+	unsigned long reset_pending;
+
+	struct efx_channel *channel[EFX_MAX_CHANNELS];
+	struct efx_msi_context msi_context[EFX_MAX_CHANNELS];
+	const struct efx_channel_type *
+	extra_channel_type[EFX_MAX_EXTRA_CHANNELS];
+
+	unsigned int xdp_tx_queue_count;
+	struct efx_tx_queue **xdp_tx_queues;
+	enum efx_xdp_tx_queues_mode xdp_txq_queues_mode;
+
+	unsigned rxq_entries;
+	unsigned txq_entries;
+	unsigned int txq_stop_thresh;
+	unsigned int txq_wake_thresh;
+
+	unsigned tx_dc_base;
+	unsigned rx_dc_base;
+	unsigned sram_lim_qw;
+	unsigned next_buffer_table;
+
+	unsigned int max_channels;
+	unsigned int max_vis;
+	unsigned int max_tx_channels;
+	unsigned n_channels;
+	unsigned n_rx_channels;
+	unsigned rss_spread;
+	unsigned tx_channel_offset;
+	unsigned n_tx_channels;
+	unsigned n_extra_tx_channels;
+	unsigned int tx_queues_per_channel;
+	unsigned int n_xdp_channels;
+	unsigned int xdp_channel_offset;
+	unsigned int xdp_tx_per_channel;
+	unsigned int rx_ip_align;
+	unsigned int rx_dma_len;
+	unsigned int rx_buffer_order;
+	unsigned int rx_buffer_truesize;
+	unsigned int rx_page_buf_step;
+	unsigned int rx_bufs_per_page;
+	unsigned int rx_pages_per_batch;
+	unsigned int rx_prefix_size;
+	int rx_packet_hash_offset;
+	int rx_packet_len_offset;
+	int rx_packet_ts_offset;
+	bool rx_scatter;
+	struct efx_rss_context rss_context;
+	struct mutex rss_lock;
+	u32 vport_id;
+
+	unsigned int_error_count;
+	unsigned long int_error_expire;
+
+	bool must_realloc_vis;
+	bool irq_soft_enabled;
+	struct efx_buffer irq_status;
+	unsigned irq_zero_count;
+	unsigned irq_level;
+	struct delayed_work selftest_work;
+
+#ifdef CONFIG_SFC_MTD
+	struct list_head mtd_list;
+#endif
+
+	void *nic_data;
+	struct efx_mcdi_data *mcdi;
+
+	struct mutex mac_lock;
+	struct work_struct mac_work;
+	bool port_enabled;
+
+	bool mc_bist_for_other_fn;
+	bool port_initialized;
+	struct net_device *net_dev;
+
+	netdev_features_t fixed_features;
+
+	u16 num_mac_stats;
+	struct efx_buffer stats_buffer;
+	u64 rx_nodesc_drops_total;
+	u64 rx_nodesc_drops_while_down;
+	bool rx_nodesc_drops_prev_state;
+
+	unsigned int phy_type;
+	void *phy_data;
+	struct mdio_if_info mdio;
+	unsigned int mdio_bus;
+	enum efx_phy_mode phy_mode;
+
+	__ETHTOOL_DECLARE_LINK_MODE_MASK(link_advertising);
+	u32 fec_config;
+	struct efx_link_state link_state;
+	unsigned int n_link_state_changes;
+
+	bool unicast_filter;
+	union efx_multicast_hash multicast_hash;
+	u8 wanted_fc;
+	unsigned fc_disable;
+
+	atomic_t rx_reset;
+	enum efx_loopback_mode loopback_mode;
+	u64 loopback_modes;
+
+	void *loopback_selftest;
+	/* We access loopback_selftest immediately before running XDP,
+	 * so we want them next to each other.
+	 */
+	struct bpf_prog __rcu *xdp_prog;
+
+	struct rw_semaphore filter_sem;
+	void *filter_state;
+#ifdef CONFIG_RFS_ACCEL
+	struct mutex rps_mutex;
+	unsigned long rps_slot_map;
+	struct efx_async_filter_insertion rps_slot[EFX_RPS_MAX_IN_FLIGHT];
+	spinlock_t rps_hash_lock;
+	struct hlist_head *rps_hash_table;
+	u32 rps_next_id;
+#endif
+
+	atomic_t active_queues;
+	atomic_t rxq_flush_pending;
+	atomic_t rxq_flush_outstanding;
+	wait_queue_head_t flush_wq;
+
+#ifdef CONFIG_SFC_SRIOV
+	unsigned vf_count;
+	unsigned vf_init_count;
+	unsigned vi_scale;
+#endif
+
+	struct efx_ptp_data *ptp_data;
+	bool ptp_warned;
+
+	char *vpd_sn;
+	bool xdp_rxq_info_failed;
+
+	struct notifier_block netdev_notifier;
+
+	unsigned int mem_bar;
+	u32 reg_base;
+
+	/* The following fields may be written more often */
+
+	struct delayed_work monitor_work ____cacheline_aligned_in_smp;
+	spinlock_t biu_lock;
+	int last_irq_cpu;
+	spinlock_t stats_lock;
+	atomic_t n_rx_noskb_drops;
+};
+
+static inline int efx_dev_registered(struct efx_nic *efx)
+{
+	return efx->net_dev->reg_state == NETREG_REGISTERED;
+}
+
+static inline unsigned int efx_port_num(struct efx_nic *efx)
+{
+	return efx->port_num;
+}
+
+struct efx_mtd_partition {
+	struct list_head node;
+	struct mtd_info mtd;
+	const char *dev_type_name;
+	const char *type_name;
+	char name[IFNAMSIZ + 20];
+};
+
+struct efx_udp_tunnel {
+#define TUNNEL_ENCAP_UDP_PORT_ENTRY_INVALID	0xffff
+	u16 type; /* TUNNEL_ENCAP_UDP_PORT_ENTRY_foo, see mcdi_pcol.h */
+	__be16 port;
+};
+
+/**
+ * struct efx_nic_type - Efx device type definition
+ * @mem_bar: Get the memory BAR
+ * @mem_map_size: Get memory BAR mapped size
+ * @probe: Probe the controller
+ * @remove: Free resources allocated by probe()
+ * @init: Initialise the controller
+ * @dimension_resources: Dimension controller resources (buffer table,
+ *	and VIs once the available interrupt resources are clear)
+ * @fini: Shut down the controller
+ * @monitor: Periodic function for polling link state and hardware monitor
+ * @map_reset_reason: Map ethtool reset reason to a reset method
+ * @map_reset_flags: Map ethtool reset flags to a reset method, if possible
+ * @reset: Reset the controller hardware and possibly the PHY.  This will
+ *	be called while the controller is uninitialised.
+ * @probe_port: Probe the MAC and PHY
+ * @remove_port: Free resources allocated by probe_port()
+ * @handle_global_event: Handle a "global" event (may be %NULL)
+ * @fini_dmaq: Flush and finalise DMA queues (RX and TX queues)
+ * @prepare_flush: Prepare the hardware for flushing the DMA queues
+ *	(for Falcon architecture)
+ * @finish_flush: Clean up after flushing the DMA queues (for Falcon
+ *	architecture)
+ * @prepare_flr: Prepare for an FLR
+ * @finish_flr: Clean up after an FLR
+ * @describe_stats: Describe statistics for ethtool
+ * @update_stats: Update statistics not provided by event handling.
+ *	Either argument may be %NULL.
+ * @update_stats_atomic: Update statistics while in atomic context, if that
+ *	is more limiting than @update_stats.  Otherwise, leave %NULL and
+ *	driver core will call @update_stats.
+ * @start_stats: Start the regular fetching of statistics
+ * @pull_stats: Pull stats from the NIC and wait until they arrive.
+ * @stop_stats: Stop the regular fetching of statistics
+ * @push_irq_moderation: Apply interrupt moderation value
+ * @reconfigure_port: Push loopback/power/txdis changes to the MAC and PHY
+ * @prepare_enable_fc_tx: Prepare MAC to enable pause frame TX (may be %NULL)
+ * @reconfigure_mac: Push MAC address, MTU, flow control and filter settings
+ *	to the hardware.  Serialised by the mac_lock.
+ * @check_mac_fault: Check MAC fault state. True if fault present.
+ * @get_wol: Get WoL configuration from driver state
+ * @set_wol: Push WoL configuration to the NIC
+ * @resume_wol: Synchronise WoL state between driver and MC (e.g. after resume)
+ * @get_fec_stats: Get standard FEC statistics.
+ * @test_chip: Test registers.  May use efx_farch_test_registers(), and is
+ *	expected to reset the NIC.
+ * @test_nvram: Test validity of NVRAM contents
+ * @mcdi_request: Send an MCDI request with the given header and SDU.
+ *	The SDU length may be any value from 0 up to the protocol-
+ *	defined maximum, but its buffer will be padded to a multiple
+ *	of 4 bytes.
+ * @mcdi_poll_response: Test whether an MCDI response is available.
+ * @mcdi_read_response: Read the MCDI response PDU.  The offset will
+ *	be a multiple of 4.  The length may not be, but the buffer
+ *	will be padded so it is safe to round up.
+ * @mcdi_poll_reboot: Test whether the MCDI has rebooted.  If so,
+ *	return an appropriate error code for aborting any current
+ *	request; otherwise return 0.
+ * @irq_enable_master: Enable IRQs on the NIC.  Each event queue must
+ *	be separately enabled after this.
+ * @irq_test_generate: Generate a test IRQ
+ * @irq_disable_non_ev: Disable non-event IRQs on the NIC.  Each event
+ *	queue must be separately disabled before this.
+ * @irq_handle_msi: Handle MSI for a channel.  The @dev_id argument is
+ *	a pointer to the &struct efx_msi_context for the channel.
+ * @irq_handle_legacy: Handle legacy interrupt.  The @dev_id argument
+ *	is a pointer to the &struct efx_nic.
+ * @tx_probe: Allocate resources for TX queue (and select TXQ type)
+ * @tx_init: Initialise TX queue on the NIC
+ * @tx_remove: Free resources for TX queue
+ * @tx_write: Write TX descriptors and doorbell
+ * @tx_enqueue: Add an SKB to TX queue
+ * @rx_push_rss_config: Write RSS hash key and indirection table to the NIC
+ * @rx_pull_rss_config: Read RSS hash key and indirection table back from the NIC
+ * @rx_push_rss_context_config: Write RSS hash key and indirection table for
+ *	user RSS context to the NIC
+ * @rx_pull_rss_context_config: Read RSS hash key and indirection table for user
+ *	RSS context back from the NIC
+ * @rx_probe: Allocate resources for RX queue
+ * @rx_init: Initialise RX queue on the NIC
+ * @rx_remove: Free resources for RX queue
+ * @rx_write: Write RX descriptors and doorbell
+ * @rx_defer_refill: Generate a refill reminder event
+ * @rx_packet: Receive the queued RX buffer on a channel
+ * @rx_buf_hash_valid: Determine whether the RX prefix contains a valid hash
+ * @ev_probe: Allocate resources for event queue
+ * @ev_init: Initialise event queue on the NIC
+ * @ev_fini: Deinitialise event queue on the NIC
+ * @ev_remove: Free resources for event queue
+ * @ev_process: Process events for a queue, up to the given NAPI quota
+ * @ev_read_ack: Acknowledge read events on a queue, rearming its IRQ
+ * @ev_test_generate: Generate a test event
+ * @filter_table_probe: Probe filter capabilities and set up filter software state
+ * @filter_table_restore: Restore filters removed from hardware
+ * @filter_table_remove: Remove filters from hardware and tear down software state
+ * @filter_update_rx_scatter: Update filters after change to rx scatter setting
+ * @filter_insert: add or replace a filter
+ * @filter_remove_safe: remove a filter by ID, carefully
+ * @filter_get_safe: retrieve a filter by ID, carefully
+ * @filter_clear_rx: Remove all RX filters whose priority is less than or
+ *	equal to the given priority and is not %EFX_FILTER_PRI_AUTO
+ * @filter_count_rx_used: Get the number of filters in use at a given priority
+ * @filter_get_rx_id_limit: Get maximum value of a filter id, plus 1
+ * @filter_get_rx_ids: Get list of RX filters at a given priority
+ * @filter_rfs_expire_one: Consider expiring a filter inserted for RFS.
+ *	This must check whether the specified table entry is used by RFS
+ *	and that rps_may_expire_flow() returns true for it.
+ * @mtd_probe: Probe and add MTD partitions associated with this net device,
+ *	 using efx_mtd_add()
+ * @mtd_rename: Set an MTD partition name using the net device name
+ * @mtd_read: Read from an MTD partition
+ * @mtd_erase: Erase part of an MTD partition
+ * @mtd_write: Write to an MTD partition
+ * @mtd_sync: Wait for write-back to complete on MTD partition.  This
+ *	also notifies the driver that a writer has finished using this
+ *	partition.
+ * @ptp_write_host_time: Send host time to MC as part of sync protocol
+ * @ptp_set_ts_sync_events: Enable or disable sync events for inline RX
+ *	timestamping, possibly only temporarily for the purposes of a reset.
+ * @ptp_set_ts_config: Set hardware timestamp configuration.  The flags
+ *	and tx_type will already have been validated but this operation
+ *	must validate and update rx_filter.
+ * @get_phys_port_id: Get the underlying physical port id.
+ * @set_mac_address: Set the MAC address of the device
+ * @tso_versions: Returns mask of firmware-assisted TSO versions supported.
+ *	If %NULL, then device does not support any TSO version.
+ * @udp_tnl_push_ports: Push the list of UDP tunnel ports to the NIC if required.
+ * @udp_tnl_has_port: Check if a port has been added as UDP tunnel
+ * @print_additional_fwver: Dump NIC-specific additional FW version info
+ * @sensor_event: Handle a sensor event from MCDI
+ * @rx_recycle_ring_size: Size of the RX recycle ring
+ * @revision: Hardware architecture revision
+ * @txd_ptr_tbl_base: TX descriptor ring base address
+ * @rxd_ptr_tbl_base: RX descriptor ring base address
+ * @buf_tbl_base: Buffer table base address
+ * @evq_ptr_tbl_base: Event queue pointer table base address
+ * @evq_rptr_tbl_base: Event queue read-pointer table base address
+ * @max_dma_mask: Maximum possible DMA mask
+ * @rx_prefix_size: Size of RX prefix before packet data
+ * @rx_hash_offset: Offset of RX flow hash within prefix
+ * @rx_ts_offset: Offset of timestamp within prefix
+ * @rx_buffer_padding: Size of padding at end of RX packet
+ * @can_rx_scatter: NIC is able to scatter packets to multiple buffers
+ * @always_rx_scatter: NIC will always scatter packets to multiple buffers
+ * @option_descriptors: NIC supports TX option descriptors
+ * @min_interrupt_mode: Lowest capability interrupt mode supported
+ *	from &enum efx_int_mode.
+ * @timer_period_max: Maximum period of interrupt timer (in ticks)
+ * @offload_features: net_device feature flags for protocol offload
+ *	features implemented in hardware
+ * @mcdi_max_ver: Maximum MCDI version supported
+ * @hwtstamp_filters: Mask of hardware timestamp filter types supported
+ */
+struct efx_nic_type {
+	bool is_vf;
+	unsigned int (*mem_bar)(struct efx_nic *efx);
+	unsigned int (*mem_map_size)(struct efx_nic *efx);
+	int (*probe)(struct efx_nic *efx);
+	void (*remove)(struct efx_nic *efx);
+	int (*init)(struct efx_nic *efx);
+	int (*dimension_resources)(struct efx_nic *efx);
+	void (*fini)(struct efx_nic *efx);
+	void (*monitor)(struct efx_nic *efx);
+	enum reset_type (*map_reset_reason)(enum reset_type reason);
+	int (*map_reset_flags)(u32 *flags);
+	int (*reset)(struct efx_nic *efx, enum reset_type method);
+	int (*probe_port)(struct efx_nic *efx);
+	void (*remove_port)(struct efx_nic *efx);
+	bool (*handle_global_event)(struct efx_channel *channel, efx_qword_t *);
+	int (*fini_dmaq)(struct efx_nic *efx);
+	void (*prepare_flush)(struct efx_nic *efx);
+	void (*finish_flush)(struct efx_nic *efx);
+	void (*prepare_flr)(struct efx_nic *efx);
+	void (*finish_flr)(struct efx_nic *efx);
+	size_t (*describe_stats)(struct efx_nic *efx, u8 *names);
+	size_t (*update_stats)(struct efx_nic *efx, u64 *full_stats,
+			       struct rtnl_link_stats64 *core_stats);
+	size_t (*update_stats_atomic)(struct efx_nic *efx, u64 *full_stats,
+				      struct rtnl_link_stats64 *core_stats);
+	void (*start_stats)(struct efx_nic *efx);
+	void (*pull_stats)(struct efx_nic *efx);
+	void (*stop_stats)(struct efx_nic *efx);
+	void (*push_irq_moderation)(struct efx_channel *channel);
+	int (*reconfigure_port)(struct efx_nic *efx);
+	void (*prepare_enable_fc_tx)(struct efx_nic *efx);
+	int (*reconfigure_mac)(struct efx_nic *efx, bool mtu_only);
+	bool (*check_mac_fault)(struct efx_nic *efx);
+	void (*get_wol)(struct efx_nic *efx, struct ethtool_wolinfo *wol);
+	int (*set_wol)(struct efx_nic *efx, u32 type);
+	void (*resume_wol)(struct efx_nic *efx);
+	void (*get_fec_stats)(struct efx_nic *efx,
+			      struct ethtool_fec_stats *fec_stats);
+	unsigned int (*check_caps)(const struct efx_nic *efx,
+				   u8 flag,
+				   u32 offset);
+	int (*test_chip)(struct efx_nic *efx, struct efx_self_tests *tests);
+	int (*test_nvram)(struct efx_nic *efx);
+	void (*mcdi_request)(struct efx_nic *efx,
+			     const efx_dword_t *hdr, size_t hdr_len,
+			     const efx_dword_t *sdu, size_t sdu_len);
+	bool (*mcdi_poll_response)(struct efx_nic *efx);
+	void (*mcdi_read_response)(struct efx_nic *efx, efx_dword_t *pdu,
+				   size_t pdu_offset, size_t pdu_len);
+	int (*mcdi_poll_reboot)(struct efx_nic *efx);
+	void (*mcdi_reboot_detected)(struct efx_nic *efx);
+	void (*irq_enable_master)(struct efx_nic *efx);
+	int (*irq_test_generate)(struct efx_nic *efx);
+	void (*irq_disable_non_ev)(struct efx_nic *efx);
+	irqreturn_t (*irq_handle_msi)(int irq, void *dev_id);
+	irqreturn_t (*irq_handle_legacy)(int irq, void *dev_id);
+	int (*tx_probe)(struct efx_tx_queue *tx_queue);
+	void (*tx_init)(struct efx_tx_queue *tx_queue);
+	void (*tx_remove)(struct efx_tx_queue *tx_queue);
+	void (*tx_write)(struct efx_tx_queue *tx_queue);
+	netdev_tx_t (*tx_enqueue)(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
+	unsigned int (*tx_limit_len)(struct efx_tx_queue *tx_queue,
+				     dma_addr_t dma_addr, unsigned int len);
+	int (*rx_push_rss_config)(struct efx_nic *efx, bool user,
+				  const u32 *rx_indir_table, const u8 *key);
+	int (*rx_pull_rss_config)(struct efx_nic *efx);
+	int (*rx_push_rss_context_config)(struct efx_nic *efx,
+					  struct efx_rss_context *ctx,
+					  const u32 *rx_indir_table,
+					  const u8 *key);
+	int (*rx_pull_rss_context_config)(struct efx_nic *efx,
+					  struct efx_rss_context *ctx);
+	void (*rx_restore_rss_contexts)(struct efx_nic *efx);
+	int (*rx_probe)(struct efx_rx_queue *rx_queue);
+	void (*rx_init)(struct efx_rx_queue *rx_queue);
+	void (*rx_remove)(struct efx_rx_queue *rx_queue);
+	void (*rx_write)(struct efx_rx_queue *rx_queue);
+	void (*rx_defer_refill)(struct efx_rx_queue *rx_queue);
+	void (*rx_packet)(struct efx_channel *channel);
+	bool (*rx_buf_hash_valid)(const u8 *prefix);
+	int (*ev_probe)(struct efx_channel *channel);
+	int (*ev_init)(struct efx_channel *channel);
+	void (*ev_fini)(struct efx_channel *channel);
+	void (*ev_remove)(struct efx_channel *channel);
+	int (*ev_process)(struct efx_channel *channel, int quota);
+	void (*ev_read_ack)(struct efx_channel *channel);
+	void (*ev_test_generate)(struct efx_channel *channel);
+	int (*filter_table_probe)(struct efx_nic *efx);
+	void (*filter_table_restore)(struct efx_nic *efx);
+	void (*filter_table_remove)(struct efx_nic *efx);
+	void (*filter_update_rx_scatter)(struct efx_nic *efx);
+	s32 (*filter_insert)(struct efx_nic *efx,
+			     struct efx_filter_spec *spec, bool replace);
+	int (*filter_remove_safe)(struct efx_nic *efx,
+				  enum efx_filter_priority priority,
+				  u32 filter_id);
+	int (*filter_get_safe)(struct efx_nic *efx,
+			       enum efx_filter_priority priority,
+			       u32 filter_id, struct efx_filter_spec *);
+	int (*filter_clear_rx)(struct efx_nic *efx,
+			       enum efx_filter_priority priority);
+	u32 (*filter_count_rx_used)(struct efx_nic *efx,
+				    enum efx_filter_priority priority);
+	u32 (*filter_get_rx_id_limit)(struct efx_nic *efx);
+	s32 (*filter_get_rx_ids)(struct efx_nic *efx,
+				 enum efx_filter_priority priority,
+				 u32 *buf, u32 size);
+#ifdef CONFIG_RFS_ACCEL
+	bool (*filter_rfs_expire_one)(struct efx_nic *efx, u32 flow_id,
+				      unsigned int index);
+#endif
+#ifdef CONFIG_SFC_MTD
+	int (*mtd_probe)(struct efx_nic *efx);
+	void (*mtd_rename)(struct efx_mtd_partition *part);
+	int (*mtd_read)(struct mtd_info *mtd, loff_t start, size_t len,
+			size_t *retlen, u8 *buffer);
+	int (*mtd_erase)(struct mtd_info *mtd, loff_t start, size_t len);
+	int (*mtd_write)(struct mtd_info *mtd, loff_t start, size_t len,
+			 size_t *retlen, const u8 *buffer);
+	int (*mtd_sync)(struct mtd_info *mtd);
+#endif
+	void (*ptp_write_host_time)(struct efx_nic *efx, u32 host_time);
+	int (*ptp_set_ts_sync_events)(struct efx_nic *efx, bool en, bool temp);
+	int (*ptp_set_ts_config)(struct efx_nic *efx,
+				 struct hwtstamp_config *init);
+	int (*sriov_configure)(struct efx_nic *efx, int num_vfs);
+	int (*vlan_rx_add_vid)(struct efx_nic *efx, __be16 proto, u16 vid);
+	int (*vlan_rx_kill_vid)(struct efx_nic *efx, __be16 proto, u16 vid);
+	int (*get_phys_port_id)(struct efx_nic *efx,
+				struct netdev_phys_item_id *ppid);
+	int (*sriov_init)(struct efx_nic *efx);
+	void (*sriov_fini)(struct efx_nic *efx);
+	bool (*sriov_wanted)(struct efx_nic *efx);
+	void (*sriov_reset)(struct efx_nic *efx);
+	void (*sriov_flr)(struct efx_nic *efx, unsigned vf_i);
+	int (*sriov_set_vf_mac)(struct efx_nic *efx, int vf_i, const u8 *mac);
+	int (*sriov_set_vf_vlan)(struct efx_nic *efx, int vf_i, u16 vlan,
+				 u8 qos);
+	int (*sriov_set_vf_spoofchk)(struct efx_nic *efx, int vf_i,
+				     bool spoofchk);
+	int (*sriov_get_vf_config)(struct efx_nic *efx, int vf_i,
+				   struct ifla_vf_info *ivi);
+	int (*sriov_set_vf_link_state)(struct efx_nic *efx, int vf_i,
+				       int link_state);
+	int (*vswitching_probe)(struct efx_nic *efx);
+	int (*vswitching_restore)(struct efx_nic *efx);
+	void (*vswitching_remove)(struct efx_nic *efx);
+	int (*get_mac_address)(struct efx_nic *efx, unsigned char *perm_addr);
+	int (*set_mac_address)(struct efx_nic *efx);
+	u32 (*tso_versions)(struct efx_nic *efx);
+	int (*udp_tnl_push_ports)(struct efx_nic *efx);
+	bool (*udp_tnl_has_port)(struct efx_nic *efx, __be16 port);
+	size_t (*print_additional_fwver)(struct efx_nic *efx, char *buf,
+					 size_t len);
+	void (*sensor_event)(struct efx_nic *efx, efx_qword_t *ev);
+	unsigned int (*rx_recycle_ring_size)(const struct efx_nic *efx);
+
+	int revision;
+	unsigned int txd_ptr_tbl_base;
+	unsigned int rxd_ptr_tbl_base;
+	unsigned int buf_tbl_base;
+	unsigned int evq_ptr_tbl_base;
+	unsigned int evq_rptr_tbl_base;
+	u64 max_dma_mask;
+	unsigned int rx_prefix_size;
+	unsigned int rx_hash_offset;
+	unsigned int rx_ts_offset;
+	unsigned int rx_buffer_padding;
+	bool can_rx_scatter;
+	bool always_rx_scatter;
+	bool option_descriptors;
+	unsigned int min_interrupt_mode;
+	unsigned int timer_period_max;
+	netdev_features_t offload_features;
+	int mcdi_max_ver;
+	unsigned int max_rx_ip_filters;
+	u32 hwtstamp_filters;
+	unsigned int rx_hash_key_size;
+};
+
+/**************************************************************************
+ *
+ * Prototypes and inline functions
+ *
+ *************************************************************************/
+
+static inline struct efx_channel *
+efx_get_channel(struct efx_nic *efx, unsigned index)
+{
+	EFX_WARN_ON_ONCE_PARANOID(index >= efx->n_channels);
+	return efx->channel[index];
+}
+
+/* Iterate over all used channels */
+#define efx_for_each_channel(_channel, _efx)				\
+	for (_channel = (_efx)->channel[0];				\
+	     _channel;							\
+	     _channel = (_channel->channel + 1 < (_efx)->n_channels) ?	\
+		     (_efx)->channel[_channel->channel + 1] : NULL)
+
+/* Iterate over all used channels in reverse */
+#define efx_for_each_channel_rev(_channel, _efx)			\
+	for (_channel = (_efx)->channel[(_efx)->n_channels - 1];	\
+	     _channel;							\
+	     _channel = _channel->channel ?				\
+		     (_efx)->channel[_channel->channel - 1] : NULL)
+
+static inline struct efx_channel *
+efx_get_tx_channel(struct efx_nic *efx, unsigned int index)
+{
+	EFX_WARN_ON_ONCE_PARANOID(index >= efx->n_tx_channels);
+	return efx->channel[efx->tx_channel_offset + index];
+}
+
+static inline struct efx_channel *
+efx_get_xdp_channel(struct efx_nic *efx, unsigned int index)
+{
+	EFX_WARN_ON_ONCE_PARANOID(index >= efx->n_xdp_channels);
+	return efx->channel[efx->xdp_channel_offset + index];
+}
+
+static inline bool efx_channel_is_xdp_tx(struct efx_channel *channel)
+{
+	return channel->channel - channel->efx->xdp_channel_offset <
+	       channel->efx->n_xdp_channels;
+}
+
+static inline bool efx_channel_has_tx_queues(struct efx_channel *channel)
+{
+	return true;
+}
+
+static inline unsigned int efx_channel_num_tx_queues(struct efx_channel *channel)
+{
+	if (efx_channel_is_xdp_tx(channel))
+		return channel->efx->xdp_tx_per_channel;
+	return channel->efx->tx_queues_per_channel;
+}
+
+static inline struct efx_tx_queue *
+efx_channel_get_tx_queue(struct efx_channel *channel, unsigned int type)
+{
+	EFX_WARN_ON_ONCE_PARANOID(type >= EFX_TXQ_TYPES);
+	return channel->tx_queue_by_type[type];
+}
+
+static inline struct efx_tx_queue *
+efx_get_tx_queue(struct efx_nic *efx, unsigned int index, unsigned int type)
+{
+	struct efx_channel *channel = efx_get_tx_channel(efx, index);
+
+	return efx_channel_get_tx_queue(channel, type);
+}
+
+/* Iterate over all TX queues belonging to a channel */
+#define efx_for_each_channel_tx_queue(_tx_queue, _channel)		\
+	if (!efx_channel_has_tx_queues(_channel))			\
+		;							\
+	else								\
+		for (_tx_queue = (_channel)->tx_queue;			\
+		     _tx_queue < (_channel)->tx_queue +			\
+				 efx_channel_num_tx_queues(_channel);		\
+		     _tx_queue++)
+
+static inline bool efx_channel_has_rx_queue(struct efx_channel *channel)
+{
+	return channel->rx_queue.core_index >= 0;
+}
+
+static inline struct efx_rx_queue *
+efx_channel_get_rx_queue(struct efx_channel *channel)
+{
+	EFX_WARN_ON_ONCE_PARANOID(!efx_channel_has_rx_queue(channel));
+	return &channel->rx_queue;
+}
+
+/* Iterate over all RX queues belonging to a channel */
+#define efx_for_each_channel_rx_queue(_rx_queue, _channel)		\
+	if (!efx_channel_has_rx_queue(_channel))			\
+		;							\
+	else								\
+		for (_rx_queue = &(_channel)->rx_queue;			\
+		     _rx_queue;						\
+		     _rx_queue = NULL)
+
+static inline struct efx_channel *
+efx_rx_queue_channel(struct efx_rx_queue *rx_queue)
+{
+	return container_of(rx_queue, struct efx_channel, rx_queue);
+}
+
+static inline int efx_rx_queue_index(struct efx_rx_queue *rx_queue)
+{
+	return efx_rx_queue_channel(rx_queue)->channel;
+}
+
+/* Returns a pointer to the specified receive buffer in the RX
+ * descriptor queue.
+ */
+static inline struct efx_rx_buffer *efx_rx_buffer(struct efx_rx_queue *rx_queue,
+						  unsigned int index)
+{
+	return &rx_queue->buffer[index];
+}
+
+static inline struct efx_rx_buffer *
+efx_rx_buf_next(struct efx_rx_queue *rx_queue, struct efx_rx_buffer *rx_buf)
+{
+	if (unlikely(rx_buf == efx_rx_buffer(rx_queue, rx_queue->ptr_mask)))
+		return efx_rx_buffer(rx_queue, 0);
+	else
+		return rx_buf + 1;
+}
+
+/**
+ * EFX_MAX_FRAME_LEN - calculate maximum frame length
+ *
+ * This calculates the maximum frame length that will be used for a
+ * given MTU.  The frame length will be equal to the MTU plus a
+ * constant amount of header space and padding.  This is the quantity
+ * that the net driver will program into the MAC as the maximum frame
+ * length.
+ *
+ * The 10G MAC requires 8-byte alignment on the frame
+ * length, so we round up to the nearest 8.
+ *
+ * Re-clocking by the XGXS on RX can reduce an IPG to 32 bits (half an
+ * XGMII cycle).  If the frame length reaches the maximum value in the
+ * same cycle, the XMAC can miss the IPG altogether.  We work around
+ * this by adding a further 16 bytes.
+ */
+#define EFX_FRAME_PAD	16
+#define EFX_MAX_FRAME_LEN(mtu) \
+	(ALIGN(((mtu) + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN + EFX_FRAME_PAD), 8))
+
+static inline bool efx_xmit_with_hwtstamp(struct sk_buff *skb)
+{
+	return skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP;
+}
+static inline void efx_xmit_hwtstamp_pending(struct sk_buff *skb)
+{
+	skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+}
+
+/* Get the max fill level of the TX queues on this channel */
+static inline unsigned int
+efx_channel_tx_fill_level(struct efx_channel *channel)
+{
+	struct efx_tx_queue *tx_queue;
+	unsigned int fill_level = 0;
+
+	efx_for_each_channel_tx_queue(tx_queue, channel)
+		fill_level = max(fill_level,
+				 tx_queue->insert_count - tx_queue->read_count);
+
+	return fill_level;
+}
+
+/* Conservative approximation of efx_channel_tx_fill_level using cached value */
+static inline unsigned int
+efx_channel_tx_old_fill_level(struct efx_channel *channel)
+{
+	struct efx_tx_queue *tx_queue;
+	unsigned int fill_level = 0;
+
+	efx_for_each_channel_tx_queue(tx_queue, channel)
+		fill_level = max(fill_level,
+				 tx_queue->insert_count - tx_queue->old_read_count);
+
+	return fill_level;
+}
+
+/* Get all supported features.
+ * If a feature is not fixed, it is present in hw_features.
+ * If a feature is fixed, it does not present in hw_features, but
+ * always in features.
+ */
+static inline netdev_features_t efx_supported_features(const struct efx_nic *efx)
+{
+	const struct net_device *net_dev = efx->net_dev;
+
+	return net_dev->features | net_dev->hw_features;
+}
+
+/* Get the current TX queue insert index. */
+static inline unsigned int
+efx_tx_queue_get_insert_index(const struct efx_tx_queue *tx_queue)
+{
+	return tx_queue->insert_count & tx_queue->ptr_mask;
+}
+
+/* Get a TX buffer. */
+static inline struct efx_tx_buffer *
+__efx_tx_queue_get_insert_buffer(const struct efx_tx_queue *tx_queue)
+{
+	return &tx_queue->buffer[efx_tx_queue_get_insert_index(tx_queue)];
+}
+
+/* Get a TX buffer, checking it's not currently in use. */
+static inline struct efx_tx_buffer *
+efx_tx_queue_get_insert_buffer(const struct efx_tx_queue *tx_queue)
+{
+	struct efx_tx_buffer *buffer =
+		__efx_tx_queue_get_insert_buffer(tx_queue);
+
+	EFX_WARN_ON_ONCE_PARANOID(buffer->len);
+	EFX_WARN_ON_ONCE_PARANOID(buffer->flags);
+	EFX_WARN_ON_ONCE_PARANOID(buffer->unmap_len);
+
+	return buffer;
+}
+
+#endif /* EFX_NET_DRIVER_H */
diff --git a/drivers/net/ethernet/sfc/siena/nic.c b/drivers/net/ethernet/sfc/siena/nic.c
new file mode 100644
index 000000000000..22fbb0ae77fb
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/nic.c
@@ -0,0 +1,580 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2013 Solarflare Communications Inc.
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/cpu_rmap.h>
+#include "net_driver.h"
+#include "bitfield.h"
+#include "efx.h"
+#include "nic.h"
+#include "ef10_regs.h"
+#include "farch_regs.h"
+#include "io.h"
+#include "workarounds.h"
+#include "mcdi_pcol.h"
+
+/**************************************************************************
+ *
+ * Generic buffer handling
+ * These buffers are used for interrupt status, MAC stats, etc.
+ *
+ **************************************************************************/
+
+int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
+			 unsigned int len, gfp_t gfp_flags)
+{
+	buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len,
+					  &buffer->dma_addr, gfp_flags);
+	if (!buffer->addr)
+		return -ENOMEM;
+	buffer->len = len;
+	return 0;
+}
+
+void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer)
+{
+	if (buffer->addr) {
+		dma_free_coherent(&efx->pci_dev->dev, buffer->len,
+				  buffer->addr, buffer->dma_addr);
+		buffer->addr = NULL;
+	}
+}
+
+/* Check whether an event is present in the eventq at the current
+ * read pointer.  Only useful for self-test.
+ */
+bool efx_nic_event_present(struct efx_channel *channel)
+{
+	return efx_event_present(efx_event(channel, channel->eventq_read_ptr));
+}
+
+void efx_nic_event_test_start(struct efx_channel *channel)
+{
+	channel->event_test_cpu = -1;
+	smp_wmb();
+	channel->efx->type->ev_test_generate(channel);
+}
+
+int efx_nic_irq_test_start(struct efx_nic *efx)
+{
+	efx->last_irq_cpu = -1;
+	smp_wmb();
+	return efx->type->irq_test_generate(efx);
+}
+
+/* Hook interrupt handler(s)
+ * Try MSI and then legacy interrupts.
+ */
+int efx_nic_init_interrupt(struct efx_nic *efx)
+{
+	struct efx_channel *channel;
+	unsigned int n_irqs;
+	int rc;
+
+	if (!EFX_INT_MODE_USE_MSI(efx)) {
+		rc = request_irq(efx->legacy_irq,
+				 efx->type->irq_handle_legacy, IRQF_SHARED,
+				 efx->name, efx);
+		if (rc) {
+			netif_err(efx, drv, efx->net_dev,
+				  "failed to hook legacy IRQ %d\n",
+				  efx->pci_dev->irq);
+			goto fail1;
+		}
+		efx->irqs_hooked = true;
+		return 0;
+	}
+
+#ifdef CONFIG_RFS_ACCEL
+	if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
+		efx->net_dev->rx_cpu_rmap =
+			alloc_irq_cpu_rmap(efx->n_rx_channels);
+		if (!efx->net_dev->rx_cpu_rmap) {
+			rc = -ENOMEM;
+			goto fail1;
+		}
+	}
+#endif
+
+	/* Hook MSI or MSI-X interrupt */
+	n_irqs = 0;
+	efx_for_each_channel(channel, efx) {
+		rc = request_irq(channel->irq, efx->type->irq_handle_msi,
+				 IRQF_PROBE_SHARED, /* Not shared */
+				 efx->msi_context[channel->channel].name,
+				 &efx->msi_context[channel->channel]);
+		if (rc) {
+			netif_err(efx, drv, efx->net_dev,
+				  "failed to hook IRQ %d\n", channel->irq);
+			goto fail2;
+		}
+		++n_irqs;
+
+#ifdef CONFIG_RFS_ACCEL
+		if (efx->interrupt_mode == EFX_INT_MODE_MSIX &&
+		    channel->channel < efx->n_rx_channels) {
+			rc = irq_cpu_rmap_add(efx->net_dev->rx_cpu_rmap,
+					      channel->irq);
+			if (rc)
+				goto fail2;
+		}
+#endif
+	}
+
+	efx->irqs_hooked = true;
+	return 0;
+
+ fail2:
+#ifdef CONFIG_RFS_ACCEL
+	free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
+	efx->net_dev->rx_cpu_rmap = NULL;
+#endif
+	efx_for_each_channel(channel, efx) {
+		if (n_irqs-- == 0)
+			break;
+		free_irq(channel->irq, &efx->msi_context[channel->channel]);
+	}
+ fail1:
+	return rc;
+}
+
+void efx_nic_fini_interrupt(struct efx_nic *efx)
+{
+	struct efx_channel *channel;
+
+#ifdef CONFIG_RFS_ACCEL
+	free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
+	efx->net_dev->rx_cpu_rmap = NULL;
+#endif
+
+	if (!efx->irqs_hooked)
+		return;
+	if (EFX_INT_MODE_USE_MSI(efx)) {
+		/* Disable MSI/MSI-X interrupts */
+		efx_for_each_channel(channel, efx)
+			free_irq(channel->irq,
+				 &efx->msi_context[channel->channel]);
+	} else {
+		/* Disable legacy interrupt */
+		free_irq(efx->legacy_irq, efx);
+	}
+	efx->irqs_hooked = false;
+}
+
+/* Register dump */
+
+#define REGISTER_REVISION_FA	1
+#define REGISTER_REVISION_FB	2
+#define REGISTER_REVISION_FC	3
+#define REGISTER_REVISION_FZ	3	/* last Falcon arch revision */
+#define REGISTER_REVISION_ED	4
+#define REGISTER_REVISION_EZ	4	/* latest EF10 revision */
+
+struct efx_nic_reg {
+	u32 offset:24;
+	u32 min_revision:3, max_revision:3;
+};
+
+#define REGISTER(name, arch, min_rev, max_rev) {			\
+	arch ## R_ ## min_rev ## max_rev ## _ ## name,			\
+	REGISTER_REVISION_ ## arch ## min_rev,				\
+	REGISTER_REVISION_ ## arch ## max_rev				\
+}
+#define REGISTER_AA(name) REGISTER(name, F, A, A)
+#define REGISTER_AB(name) REGISTER(name, F, A, B)
+#define REGISTER_AZ(name) REGISTER(name, F, A, Z)
+#define REGISTER_BB(name) REGISTER(name, F, B, B)
+#define REGISTER_BZ(name) REGISTER(name, F, B, Z)
+#define REGISTER_CZ(name) REGISTER(name, F, C, Z)
+#define REGISTER_DZ(name) REGISTER(name, E, D, Z)
+
+static const struct efx_nic_reg efx_nic_regs[] = {
+	REGISTER_AZ(ADR_REGION),
+	REGISTER_AZ(INT_EN_KER),
+	REGISTER_BZ(INT_EN_CHAR),
+	REGISTER_AZ(INT_ADR_KER),
+	REGISTER_BZ(INT_ADR_CHAR),
+	/* INT_ACK_KER is WO */
+	/* INT_ISR0 is RC */
+	REGISTER_AZ(HW_INIT),
+	REGISTER_CZ(USR_EV_CFG),
+	REGISTER_AB(EE_SPI_HCMD),
+	REGISTER_AB(EE_SPI_HADR),
+	REGISTER_AB(EE_SPI_HDATA),
+	REGISTER_AB(EE_BASE_PAGE),
+	REGISTER_AB(EE_VPD_CFG0),
+	/* EE_VPD_SW_CNTL and EE_VPD_SW_DATA are not used */
+	/* PMBX_DBG_IADDR and PBMX_DBG_IDATA are indirect */
+	/* PCIE_CORE_INDIRECT is indirect */
+	REGISTER_AB(NIC_STAT),
+	REGISTER_AB(GPIO_CTL),
+	REGISTER_AB(GLB_CTL),
+	/* FATAL_INTR_KER and FATAL_INTR_CHAR are partly RC */
+	REGISTER_BZ(DP_CTRL),
+	REGISTER_AZ(MEM_STAT),
+	REGISTER_AZ(CS_DEBUG),
+	REGISTER_AZ(ALTERA_BUILD),
+	REGISTER_AZ(CSR_SPARE),
+	REGISTER_AB(PCIE_SD_CTL0123),
+	REGISTER_AB(PCIE_SD_CTL45),
+	REGISTER_AB(PCIE_PCS_CTL_STAT),
+	/* DEBUG_DATA_OUT is not used */
+	/* DRV_EV is WO */
+	REGISTER_AZ(EVQ_CTL),
+	REGISTER_AZ(EVQ_CNT1),
+	REGISTER_AZ(EVQ_CNT2),
+	REGISTER_AZ(BUF_TBL_CFG),
+	REGISTER_AZ(SRM_RX_DC_CFG),
+	REGISTER_AZ(SRM_TX_DC_CFG),
+	REGISTER_AZ(SRM_CFG),
+	/* BUF_TBL_UPD is WO */
+	REGISTER_AZ(SRM_UPD_EVQ),
+	REGISTER_AZ(SRAM_PARITY),
+	REGISTER_AZ(RX_CFG),
+	REGISTER_BZ(RX_FILTER_CTL),
+	/* RX_FLUSH_DESCQ is WO */
+	REGISTER_AZ(RX_DC_CFG),
+	REGISTER_AZ(RX_DC_PF_WM),
+	REGISTER_BZ(RX_RSS_TKEY),
+	/* RX_NODESC_DROP is RC */
+	REGISTER_AA(RX_SELF_RST),
+	/* RX_DEBUG, RX_PUSH_DROP are not used */
+	REGISTER_CZ(RX_RSS_IPV6_REG1),
+	REGISTER_CZ(RX_RSS_IPV6_REG2),
+	REGISTER_CZ(RX_RSS_IPV6_REG3),
+	/* TX_FLUSH_DESCQ is WO */
+	REGISTER_AZ(TX_DC_CFG),
+	REGISTER_AA(TX_CHKSM_CFG),
+	REGISTER_AZ(TX_CFG),
+	/* TX_PUSH_DROP is not used */
+	REGISTER_AZ(TX_RESERVED),
+	REGISTER_BZ(TX_PACE),
+	/* TX_PACE_DROP_QID is RC */
+	REGISTER_BB(TX_VLAN),
+	REGISTER_BZ(TX_IPFIL_PORTEN),
+	REGISTER_AB(MD_TXD),
+	REGISTER_AB(MD_RXD),
+	REGISTER_AB(MD_CS),
+	REGISTER_AB(MD_PHY_ADR),
+	REGISTER_AB(MD_ID),
+	/* MD_STAT is RC */
+	REGISTER_AB(MAC_STAT_DMA),
+	REGISTER_AB(MAC_CTRL),
+	REGISTER_BB(GEN_MODE),
+	REGISTER_AB(MAC_MC_HASH_REG0),
+	REGISTER_AB(MAC_MC_HASH_REG1),
+	REGISTER_AB(GM_CFG1),
+	REGISTER_AB(GM_CFG2),
+	/* GM_IPG and GM_HD are not used */
+	REGISTER_AB(GM_MAX_FLEN),
+	/* GM_TEST is not used */
+	REGISTER_AB(GM_ADR1),
+	REGISTER_AB(GM_ADR2),
+	REGISTER_AB(GMF_CFG0),
+	REGISTER_AB(GMF_CFG1),
+	REGISTER_AB(GMF_CFG2),
+	REGISTER_AB(GMF_CFG3),
+	REGISTER_AB(GMF_CFG4),
+	REGISTER_AB(GMF_CFG5),
+	REGISTER_BB(TX_SRC_MAC_CTL),
+	REGISTER_AB(XM_ADR_LO),
+	REGISTER_AB(XM_ADR_HI),
+	REGISTER_AB(XM_GLB_CFG),
+	REGISTER_AB(XM_TX_CFG),
+	REGISTER_AB(XM_RX_CFG),
+	REGISTER_AB(XM_MGT_INT_MASK),
+	REGISTER_AB(XM_FC),
+	REGISTER_AB(XM_PAUSE_TIME),
+	REGISTER_AB(XM_TX_PARAM),
+	REGISTER_AB(XM_RX_PARAM),
+	/* XM_MGT_INT_MSK (note no 'A') is RC */
+	REGISTER_AB(XX_PWR_RST),
+	REGISTER_AB(XX_SD_CTL),
+	REGISTER_AB(XX_TXDRV_CTL),
+	/* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */
+	/* XX_CORE_STAT is partly RC */
+	REGISTER_DZ(BIU_HW_REV_ID),
+	REGISTER_DZ(MC_DB_LWRD),
+	REGISTER_DZ(MC_DB_HWRD),
+};
+
+struct efx_nic_reg_table {
+	u32 offset:24;
+	u32 min_revision:3, max_revision:3;
+	u32 step:6, rows:21;
+};
+
+#define REGISTER_TABLE_DIMENSIONS(_, offset, arch, min_rev, max_rev, step, rows) { \
+	offset,								\
+	REGISTER_REVISION_ ## arch ## min_rev,				\
+	REGISTER_REVISION_ ## arch ## max_rev,				\
+	step, rows							\
+}
+#define REGISTER_TABLE(name, arch, min_rev, max_rev)			\
+	REGISTER_TABLE_DIMENSIONS(					\
+		name, arch ## R_ ## min_rev ## max_rev ## _ ## name,	\
+		arch, min_rev, max_rev,					\
+		arch ## R_ ## min_rev ## max_rev ## _ ## name ## _STEP,	\
+		arch ## R_ ## min_rev ## max_rev ## _ ## name ## _ROWS)
+#define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, F, A, A)
+#define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, F, A, Z)
+#define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, F, B, B)
+#define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, F, B, Z)
+#define REGISTER_TABLE_BB_CZ(name)					\
+	REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, F, B, B,	\
+				  FR_BZ_ ## name ## _STEP,		\
+				  FR_BB_ ## name ## _ROWS),		\
+	REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, F, C, Z,	\
+				  FR_BZ_ ## name ## _STEP,		\
+				  FR_CZ_ ## name ## _ROWS)
+#define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, F, C, Z)
+#define REGISTER_TABLE_DZ(name) REGISTER_TABLE(name, E, D, Z)
+
+static const struct efx_nic_reg_table efx_nic_reg_tables[] = {
+	/* DRIVER is not used */
+	/* EVQ_RPTR, TIMER_COMMAND, USR_EV and {RX,TX}_DESC_UPD are WO */
+	REGISTER_TABLE_BB(TX_IPFIL_TBL),
+	REGISTER_TABLE_BB(TX_SRC_MAC_TBL),
+	REGISTER_TABLE_AA(RX_DESC_PTR_TBL_KER),
+	REGISTER_TABLE_BB_CZ(RX_DESC_PTR_TBL),
+	REGISTER_TABLE_AA(TX_DESC_PTR_TBL_KER),
+	REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL),
+	REGISTER_TABLE_AA(EVQ_PTR_TBL_KER),
+	REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL),
+	/* We can't reasonably read all of the buffer table (up to 8MB!).
+	 * However this driver will only use a few entries.  Reading
+	 * 1K entries allows for some expansion of queue count and
+	 * size before we need to change the version. */
+	REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER,
+				  F, A, A, 8, 1024),
+	REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL,
+				  F, B, Z, 8, 1024),
+	REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0),
+	REGISTER_TABLE_BB_CZ(TIMER_TBL),
+	REGISTER_TABLE_BB_CZ(TX_PACE_TBL),
+	REGISTER_TABLE_BZ(RX_INDIRECTION_TBL),
+	/* TX_FILTER_TBL0 is huge and not used by this driver */
+	REGISTER_TABLE_CZ(TX_MAC_FILTER_TBL0),
+	REGISTER_TABLE_CZ(MC_TREG_SMEM),
+	/* MSIX_PBA_TABLE is not mapped */
+	/* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */
+	REGISTER_TABLE_BZ(RX_FILTER_TBL0),
+	REGISTER_TABLE_DZ(BIU_MC_SFT_STATUS),
+};
+
+size_t efx_nic_get_regs_len(struct efx_nic *efx)
+{
+	const struct efx_nic_reg *reg;
+	const struct efx_nic_reg_table *table;
+	size_t len = 0;
+
+	for (reg = efx_nic_regs;
+	     reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs);
+	     reg++)
+		if (efx->type->revision >= reg->min_revision &&
+		    efx->type->revision <= reg->max_revision)
+			len += sizeof(efx_oword_t);
+
+	for (table = efx_nic_reg_tables;
+	     table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables);
+	     table++)
+		if (efx->type->revision >= table->min_revision &&
+		    efx->type->revision <= table->max_revision)
+			len += table->rows * min_t(size_t, table->step, 16);
+
+	return len;
+}
+
+void efx_nic_get_regs(struct efx_nic *efx, void *buf)
+{
+	const struct efx_nic_reg *reg;
+	const struct efx_nic_reg_table *table;
+
+	for (reg = efx_nic_regs;
+	     reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs);
+	     reg++) {
+		if (efx->type->revision >= reg->min_revision &&
+		    efx->type->revision <= reg->max_revision) {
+			efx_reado(efx, (efx_oword_t *)buf, reg->offset);
+			buf += sizeof(efx_oword_t);
+		}
+	}
+
+	for (table = efx_nic_reg_tables;
+	     table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables);
+	     table++) {
+		size_t size, i;
+
+		if (!(efx->type->revision >= table->min_revision &&
+		      efx->type->revision <= table->max_revision))
+			continue;
+
+		size = min_t(size_t, table->step, 16);
+
+		for (i = 0; i < table->rows; i++) {
+			switch (table->step) {
+			case 4: /* 32-bit SRAM */
+				efx_readd(efx, buf, table->offset + 4 * i);
+				break;
+			case 8: /* 64-bit SRAM */
+				efx_sram_readq(efx,
+					       efx->membase + table->offset,
+					       buf, i);
+				break;
+			case 16: /* 128-bit-readable register */
+				efx_reado_table(efx, buf, table->offset, i);
+				break;
+			case 32: /* 128-bit register, interleaved */
+				efx_reado_table(efx, buf, table->offset, 2 * i);
+				break;
+			default:
+				WARN_ON(1);
+				return;
+			}
+			buf += size;
+		}
+	}
+}
+
+/**
+ * efx_nic_describe_stats - Describe supported statistics for ethtool
+ * @desc: Array of &struct efx_hw_stat_desc describing the statistics
+ * @count: Length of the @desc array
+ * @mask: Bitmask of which elements of @desc are enabled
+ * @names: Buffer to copy names to, or %NULL.  The names are copied
+ *	starting at intervals of %ETH_GSTRING_LEN bytes.
+ *
+ * Returns the number of visible statistics, i.e. the number of set
+ * bits in the first @count bits of @mask for which a name is defined.
+ */
+size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
+			      const unsigned long *mask, u8 *names)
+{
+	size_t visible = 0;
+	size_t index;
+
+	for_each_set_bit(index, mask, count) {
+		if (desc[index].name) {
+			if (names) {
+				strlcpy(names, desc[index].name,
+					ETH_GSTRING_LEN);
+				names += ETH_GSTRING_LEN;
+			}
+			++visible;
+		}
+	}
+
+	return visible;
+}
+
+/**
+ * efx_nic_copy_stats - Copy stats from the DMA buffer in to an
+ *	intermediate buffer. This is used to get a consistent
+ *	set of stats while the DMA buffer can be written at any time
+ *	by the NIC.
+ * @efx: The associated NIC.
+ * @dest: Destination buffer. Must be the same size as the DMA buffer.
+ */
+int efx_nic_copy_stats(struct efx_nic *efx, __le64 *dest)
+{
+	__le64 *dma_stats = efx->stats_buffer.addr;
+	__le64 generation_start, generation_end;
+	int rc = 0, retry;
+
+	if (!dest)
+		return 0;
+
+	if (!dma_stats)
+		goto return_zeroes;
+
+	/* If we're unlucky enough to read statistics during the DMA, wait
+	 * up to 10ms for it to finish (typically takes <500us)
+	 */
+	for (retry = 0; retry < 100; ++retry) {
+		generation_end = dma_stats[efx->num_mac_stats - 1];
+		if (generation_end == EFX_MC_STATS_GENERATION_INVALID)
+			goto return_zeroes;
+		rmb();
+		memcpy(dest, dma_stats, efx->num_mac_stats * sizeof(__le64));
+		rmb();
+		generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
+		if (generation_end == generation_start)
+			return 0; /* return good data */
+		udelay(100);
+	}
+
+	rc = -EIO;
+
+return_zeroes:
+	memset(dest, 0, efx->num_mac_stats * sizeof(u64));
+	return rc;
+}
+
+/**
+ * efx_nic_update_stats - Convert statistics DMA buffer to array of u64
+ * @desc: Array of &struct efx_hw_stat_desc describing the DMA buffer
+ *	layout.  DMA widths of 0, 16, 32 and 64 are supported; where
+ *	the width is specified as 0 the corresponding element of
+ *	@stats is not updated.
+ * @count: Length of the @desc array
+ * @mask: Bitmask of which elements of @desc are enabled
+ * @stats: Buffer to update with the converted statistics.  The length
+ *	of this array must be at least @count.
+ * @dma_buf: DMA buffer containing hardware statistics
+ * @accumulate: If set, the converted values will be added rather than
+ *	directly stored to the corresponding elements of @stats
+ */
+void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
+			  const unsigned long *mask,
+			  u64 *stats, const void *dma_buf, bool accumulate)
+{
+	size_t index;
+
+	for_each_set_bit(index, mask, count) {
+		if (desc[index].dma_width) {
+			const void *addr = dma_buf + desc[index].offset;
+			u64 val;
+
+			switch (desc[index].dma_width) {
+			case 16:
+				val = le16_to_cpup((__le16 *)addr);
+				break;
+			case 32:
+				val = le32_to_cpup((__le32 *)addr);
+				break;
+			case 64:
+				val = le64_to_cpup((__le64 *)addr);
+				break;
+			default:
+				WARN_ON(1);
+				val = 0;
+				break;
+			}
+
+			if (accumulate)
+				stats[index] += val;
+			else
+				stats[index] = val;
+		}
+	}
+}
+
+void efx_nic_fix_nodesc_drop_stat(struct efx_nic *efx, u64 *rx_nodesc_drops)
+{
+	/* if down, or this is the first update after coming up */
+	if (!(efx->net_dev->flags & IFF_UP) || !efx->rx_nodesc_drops_prev_state)
+		efx->rx_nodesc_drops_while_down +=
+			*rx_nodesc_drops - efx->rx_nodesc_drops_total;
+	efx->rx_nodesc_drops_total = *rx_nodesc_drops;
+	efx->rx_nodesc_drops_prev_state = !!(efx->net_dev->flags & IFF_UP);
+	*rx_nodesc_drops -= efx->rx_nodesc_drops_while_down;
+}
diff --git a/drivers/net/ethernet/sfc/siena/nic.h b/drivers/net/ethernet/sfc/siena/nic.h
new file mode 100644
index 000000000000..251868235ae4
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/nic.h
@@ -0,0 +1,392 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2013 Solarflare Communications Inc.
+ */
+
+#ifndef EFX_NIC_H
+#define EFX_NIC_H
+
+#include "nic_common.h"
+#include "efx.h"
+
+u32 efx_farch_fpga_ver(struct efx_nic *efx);
+
+enum {
+	PHY_TYPE_NONE = 0,
+	PHY_TYPE_TXC43128 = 1,
+	PHY_TYPE_88E1111 = 2,
+	PHY_TYPE_SFX7101 = 3,
+	PHY_TYPE_QT2022C2 = 4,
+	PHY_TYPE_PM8358 = 6,
+	PHY_TYPE_SFT9001A = 8,
+	PHY_TYPE_QT2025C = 9,
+	PHY_TYPE_SFT9001B = 10,
+};
+
+enum {
+	SIENA_STAT_tx_bytes = GENERIC_STAT_COUNT,
+	SIENA_STAT_tx_good_bytes,
+	SIENA_STAT_tx_bad_bytes,
+	SIENA_STAT_tx_packets,
+	SIENA_STAT_tx_bad,
+	SIENA_STAT_tx_pause,
+	SIENA_STAT_tx_control,
+	SIENA_STAT_tx_unicast,
+	SIENA_STAT_tx_multicast,
+	SIENA_STAT_tx_broadcast,
+	SIENA_STAT_tx_lt64,
+	SIENA_STAT_tx_64,
+	SIENA_STAT_tx_65_to_127,
+	SIENA_STAT_tx_128_to_255,
+	SIENA_STAT_tx_256_to_511,
+	SIENA_STAT_tx_512_to_1023,
+	SIENA_STAT_tx_1024_to_15xx,
+	SIENA_STAT_tx_15xx_to_jumbo,
+	SIENA_STAT_tx_gtjumbo,
+	SIENA_STAT_tx_collision,
+	SIENA_STAT_tx_single_collision,
+	SIENA_STAT_tx_multiple_collision,
+	SIENA_STAT_tx_excessive_collision,
+	SIENA_STAT_tx_deferred,
+	SIENA_STAT_tx_late_collision,
+	SIENA_STAT_tx_excessive_deferred,
+	SIENA_STAT_tx_non_tcpudp,
+	SIENA_STAT_tx_mac_src_error,
+	SIENA_STAT_tx_ip_src_error,
+	SIENA_STAT_rx_bytes,
+	SIENA_STAT_rx_good_bytes,
+	SIENA_STAT_rx_bad_bytes,
+	SIENA_STAT_rx_packets,
+	SIENA_STAT_rx_good,
+	SIENA_STAT_rx_bad,
+	SIENA_STAT_rx_pause,
+	SIENA_STAT_rx_control,
+	SIENA_STAT_rx_unicast,
+	SIENA_STAT_rx_multicast,
+	SIENA_STAT_rx_broadcast,
+	SIENA_STAT_rx_lt64,
+	SIENA_STAT_rx_64,
+	SIENA_STAT_rx_65_to_127,
+	SIENA_STAT_rx_128_to_255,
+	SIENA_STAT_rx_256_to_511,
+	SIENA_STAT_rx_512_to_1023,
+	SIENA_STAT_rx_1024_to_15xx,
+	SIENA_STAT_rx_15xx_to_jumbo,
+	SIENA_STAT_rx_gtjumbo,
+	SIENA_STAT_rx_bad_gtjumbo,
+	SIENA_STAT_rx_overflow,
+	SIENA_STAT_rx_false_carrier,
+	SIENA_STAT_rx_symbol_error,
+	SIENA_STAT_rx_align_error,
+	SIENA_STAT_rx_length_error,
+	SIENA_STAT_rx_internal_error,
+	SIENA_STAT_rx_nodesc_drop_cnt,
+	SIENA_STAT_COUNT
+};
+
+/**
+ * struct siena_nic_data - Siena NIC state
+ * @efx: Pointer back to main interface structure
+ * @wol_filter_id: Wake-on-LAN packet filter id
+ * @stats: Hardware statistics
+ * @vf: Array of &struct siena_vf objects
+ * @vf_buftbl_base: The zeroth buffer table index used to back VF queues.
+ * @vfdi_status: Common VFDI status page to be dmad to VF address space.
+ * @local_addr_list: List of local addresses. Protected by %local_lock.
+ * @local_page_list: List of DMA addressable pages used to broadcast
+ *	%local_addr_list. Protected by %local_lock.
+ * @local_lock: Mutex protecting %local_addr_list and %local_page_list.
+ * @peer_work: Work item to broadcast peer addresses to VMs.
+ */
+struct siena_nic_data {
+	struct efx_nic *efx;
+	int wol_filter_id;
+	u64 stats[SIENA_STAT_COUNT];
+#ifdef CONFIG_SFC_SRIOV
+	struct siena_vf *vf;
+	struct efx_channel *vfdi_channel;
+	unsigned vf_buftbl_base;
+	struct efx_buffer vfdi_status;
+	struct list_head local_addr_list;
+	struct list_head local_page_list;
+	struct mutex local_lock;
+	struct work_struct peer_work;
+#endif
+};
+
+enum {
+	EF10_STAT_port_tx_bytes = GENERIC_STAT_COUNT,
+	EF10_STAT_port_tx_packets,
+	EF10_STAT_port_tx_pause,
+	EF10_STAT_port_tx_control,
+	EF10_STAT_port_tx_unicast,
+	EF10_STAT_port_tx_multicast,
+	EF10_STAT_port_tx_broadcast,
+	EF10_STAT_port_tx_lt64,
+	EF10_STAT_port_tx_64,
+	EF10_STAT_port_tx_65_to_127,
+	EF10_STAT_port_tx_128_to_255,
+	EF10_STAT_port_tx_256_to_511,
+	EF10_STAT_port_tx_512_to_1023,
+	EF10_STAT_port_tx_1024_to_15xx,
+	EF10_STAT_port_tx_15xx_to_jumbo,
+	EF10_STAT_port_rx_bytes,
+	EF10_STAT_port_rx_bytes_minus_good_bytes,
+	EF10_STAT_port_rx_good_bytes,
+	EF10_STAT_port_rx_bad_bytes,
+	EF10_STAT_port_rx_packets,
+	EF10_STAT_port_rx_good,
+	EF10_STAT_port_rx_bad,
+	EF10_STAT_port_rx_pause,
+	EF10_STAT_port_rx_control,
+	EF10_STAT_port_rx_unicast,
+	EF10_STAT_port_rx_multicast,
+	EF10_STAT_port_rx_broadcast,
+	EF10_STAT_port_rx_lt64,
+	EF10_STAT_port_rx_64,
+	EF10_STAT_port_rx_65_to_127,
+	EF10_STAT_port_rx_128_to_255,
+	EF10_STAT_port_rx_256_to_511,
+	EF10_STAT_port_rx_512_to_1023,
+	EF10_STAT_port_rx_1024_to_15xx,
+	EF10_STAT_port_rx_15xx_to_jumbo,
+	EF10_STAT_port_rx_gtjumbo,
+	EF10_STAT_port_rx_bad_gtjumbo,
+	EF10_STAT_port_rx_overflow,
+	EF10_STAT_port_rx_align_error,
+	EF10_STAT_port_rx_length_error,
+	EF10_STAT_port_rx_nodesc_drops,
+	EF10_STAT_port_rx_pm_trunc_bb_overflow,
+	EF10_STAT_port_rx_pm_discard_bb_overflow,
+	EF10_STAT_port_rx_pm_trunc_vfifo_full,
+	EF10_STAT_port_rx_pm_discard_vfifo_full,
+	EF10_STAT_port_rx_pm_trunc_qbb,
+	EF10_STAT_port_rx_pm_discard_qbb,
+	EF10_STAT_port_rx_pm_discard_mapping,
+	EF10_STAT_port_rx_dp_q_disabled_packets,
+	EF10_STAT_port_rx_dp_di_dropped_packets,
+	EF10_STAT_port_rx_dp_streaming_packets,
+	EF10_STAT_port_rx_dp_hlb_fetch,
+	EF10_STAT_port_rx_dp_hlb_wait,
+	EF10_STAT_rx_unicast,
+	EF10_STAT_rx_unicast_bytes,
+	EF10_STAT_rx_multicast,
+	EF10_STAT_rx_multicast_bytes,
+	EF10_STAT_rx_broadcast,
+	EF10_STAT_rx_broadcast_bytes,
+	EF10_STAT_rx_bad,
+	EF10_STAT_rx_bad_bytes,
+	EF10_STAT_rx_overflow,
+	EF10_STAT_tx_unicast,
+	EF10_STAT_tx_unicast_bytes,
+	EF10_STAT_tx_multicast,
+	EF10_STAT_tx_multicast_bytes,
+	EF10_STAT_tx_broadcast,
+	EF10_STAT_tx_broadcast_bytes,
+	EF10_STAT_tx_bad,
+	EF10_STAT_tx_bad_bytes,
+	EF10_STAT_tx_overflow,
+	EF10_STAT_V1_COUNT,
+	EF10_STAT_fec_uncorrected_errors = EF10_STAT_V1_COUNT,
+	EF10_STAT_fec_corrected_errors,
+	EF10_STAT_fec_corrected_symbols_lane0,
+	EF10_STAT_fec_corrected_symbols_lane1,
+	EF10_STAT_fec_corrected_symbols_lane2,
+	EF10_STAT_fec_corrected_symbols_lane3,
+	EF10_STAT_ctpio_vi_busy_fallback,
+	EF10_STAT_ctpio_long_write_success,
+	EF10_STAT_ctpio_missing_dbell_fail,
+	EF10_STAT_ctpio_overflow_fail,
+	EF10_STAT_ctpio_underflow_fail,
+	EF10_STAT_ctpio_timeout_fail,
+	EF10_STAT_ctpio_noncontig_wr_fail,
+	EF10_STAT_ctpio_frm_clobber_fail,
+	EF10_STAT_ctpio_invalid_wr_fail,
+	EF10_STAT_ctpio_vi_clobber_fallback,
+	EF10_STAT_ctpio_unqualified_fallback,
+	EF10_STAT_ctpio_runt_fallback,
+	EF10_STAT_ctpio_success,
+	EF10_STAT_ctpio_fallback,
+	EF10_STAT_ctpio_poison,
+	EF10_STAT_ctpio_erase,
+	EF10_STAT_COUNT
+};
+
+/* Maximum number of TX PIO buffers we may allocate to a function.
+ * This matches the total number of buffers on each SFC9100-family
+ * controller.
+ */
+#define EF10_TX_PIOBUF_COUNT 16
+
+/**
+ * struct efx_ef10_nic_data - EF10 architecture NIC state
+ * @mcdi_buf: DMA buffer for MCDI
+ * @warm_boot_count: Last seen MC warm boot count
+ * @vi_base: Absolute index of first VI in this function
+ * @n_allocated_vis: Number of VIs allocated to this function
+ * @n_piobufs: Number of PIO buffers allocated to this function
+ * @wc_membase: Base address of write-combining mapping of the memory BAR
+ * @pio_write_base: Base address for writing PIO buffers
+ * @pio_write_vi_base: Relative VI number for @pio_write_base
+ * @piobuf_handle: Handle of each PIO buffer allocated
+ * @piobuf_size: size of a single PIO buffer
+ * @must_restore_piobufs: Flag: PIO buffers have yet to be restored after MC
+ *	reboot
+ * @mc_stats: Scratch buffer for converting statistics to the kernel's format
+ * @stats: Hardware statistics
+ * @workaround_35388: Flag: firmware supports workaround for bug 35388
+ * @workaround_26807: Flag: firmware supports workaround for bug 26807
+ * @workaround_61265: Flag: firmware supports workaround for bug 61265
+ * @must_check_datapath_caps: Flag: @datapath_caps needs to be revalidated
+ *	after MC reboot
+ * @datapath_caps: Capabilities of datapath firmware (FLAGS1 field of
+ *	%MC_CMD_GET_CAPABILITIES response)
+ * @datapath_caps2: Further Capabilities of datapath firmware (FLAGS2 field of
+ * %MC_CMD_GET_CAPABILITIES response)
+ * @rx_dpcpu_fw_id: Firmware ID of the RxDPCPU
+ * @tx_dpcpu_fw_id: Firmware ID of the TxDPCPU
+ * @must_probe_vswitching: Flag: vswitching has yet to be setup after MC reboot
+ * @pf_index: The number for this PF, or the parent PF if this is a VF
+#ifdef CONFIG_SFC_SRIOV
+ * @vf: Pointer to VF data structure
+#endif
+ * @vport_mac: The MAC address on the vport, only for PFs; VFs will be zero
+ * @vlan_list: List of VLANs added over the interface. Serialised by vlan_lock.
+ * @vlan_lock: Lock to serialize access to vlan_list.
+ * @udp_tunnels: UDP tunnel port numbers and types.
+ * @udp_tunnels_dirty: flag indicating a reboot occurred while pushing
+ *	@udp_tunnels to hardware and thus the push must be re-done.
+ * @udp_tunnels_lock: Serialises writes to @udp_tunnels and @udp_tunnels_dirty.
+ */
+struct efx_ef10_nic_data {
+	struct efx_buffer mcdi_buf;
+	u16 warm_boot_count;
+	unsigned int vi_base;
+	unsigned int n_allocated_vis;
+	unsigned int n_piobufs;
+	void __iomem *wc_membase, *pio_write_base;
+	unsigned int pio_write_vi_base;
+	unsigned int piobuf_handle[EF10_TX_PIOBUF_COUNT];
+	u16 piobuf_size;
+	bool must_restore_piobufs;
+	__le64 *mc_stats;
+	u64 stats[EF10_STAT_COUNT];
+	bool workaround_35388;
+	bool workaround_26807;
+	bool workaround_61265;
+	bool must_check_datapath_caps;
+	u32 datapath_caps;
+	u32 datapath_caps2;
+	unsigned int rx_dpcpu_fw_id;
+	unsigned int tx_dpcpu_fw_id;
+	bool must_probe_vswitching;
+	unsigned int pf_index;
+	u8 port_id[ETH_ALEN];
+#ifdef CONFIG_SFC_SRIOV
+	unsigned int vf_index;
+	struct ef10_vf *vf;
+#endif
+	u8 vport_mac[ETH_ALEN];
+	struct list_head vlan_list;
+	struct mutex vlan_lock;
+	struct efx_udp_tunnel udp_tunnels[16];
+	bool udp_tunnels_dirty;
+	struct mutex udp_tunnels_lock;
+	u64 licensed_features;
+};
+
+/* TSOv2 */
+int efx_ef10_tx_tso_desc(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
+			 bool *data_mapped);
+
+extern const struct efx_nic_type efx_hunt_a0_nic_type;
+extern const struct efx_nic_type efx_hunt_a0_vf_nic_type;
+
+int falcon_probe_board(struct efx_nic *efx, u16 revision_info);
+
+/* Falcon/Siena queue operations */
+int efx_farch_tx_probe(struct efx_tx_queue *tx_queue);
+void efx_farch_tx_init(struct efx_tx_queue *tx_queue);
+void efx_farch_tx_fini(struct efx_tx_queue *tx_queue);
+void efx_farch_tx_remove(struct efx_tx_queue *tx_queue);
+void efx_farch_tx_write(struct efx_tx_queue *tx_queue);
+unsigned int efx_farch_tx_limit_len(struct efx_tx_queue *tx_queue,
+				    dma_addr_t dma_addr, unsigned int len);
+int efx_farch_rx_probe(struct efx_rx_queue *rx_queue);
+void efx_farch_rx_init(struct efx_rx_queue *rx_queue);
+void efx_farch_rx_fini(struct efx_rx_queue *rx_queue);
+void efx_farch_rx_remove(struct efx_rx_queue *rx_queue);
+void efx_farch_rx_write(struct efx_rx_queue *rx_queue);
+void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue);
+int efx_farch_ev_probe(struct efx_channel *channel);
+int efx_farch_ev_init(struct efx_channel *channel);
+void efx_farch_ev_fini(struct efx_channel *channel);
+void efx_farch_ev_remove(struct efx_channel *channel);
+int efx_farch_ev_process(struct efx_channel *channel, int quota);
+void efx_farch_ev_read_ack(struct efx_channel *channel);
+void efx_farch_ev_test_generate(struct efx_channel *channel);
+
+/* Falcon/Siena filter operations */
+int efx_farch_filter_table_probe(struct efx_nic *efx);
+void efx_farch_filter_table_restore(struct efx_nic *efx);
+void efx_farch_filter_table_remove(struct efx_nic *efx);
+void efx_farch_filter_update_rx_scatter(struct efx_nic *efx);
+s32 efx_farch_filter_insert(struct efx_nic *efx, struct efx_filter_spec *spec,
+			    bool replace);
+int efx_farch_filter_remove_safe(struct efx_nic *efx,
+				 enum efx_filter_priority priority,
+				 u32 filter_id);
+int efx_farch_filter_get_safe(struct efx_nic *efx,
+			      enum efx_filter_priority priority, u32 filter_id,
+			      struct efx_filter_spec *);
+int efx_farch_filter_clear_rx(struct efx_nic *efx,
+			      enum efx_filter_priority priority);
+u32 efx_farch_filter_count_rx_used(struct efx_nic *efx,
+				   enum efx_filter_priority priority);
+u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx);
+s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx,
+				enum efx_filter_priority priority, u32 *buf,
+				u32 size);
+#ifdef CONFIG_RFS_ACCEL
+bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
+				     unsigned int index);
+#endif
+void efx_farch_filter_sync_rx_mode(struct efx_nic *efx);
+
+/* Falcon/Siena interrupts */
+void efx_farch_irq_enable_master(struct efx_nic *efx);
+int efx_farch_irq_test_generate(struct efx_nic *efx);
+void efx_farch_irq_disable_master(struct efx_nic *efx);
+irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id);
+irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id);
+irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx);
+
+/* Global Resources */
+void siena_prepare_flush(struct efx_nic *efx);
+int efx_farch_fini_dmaq(struct efx_nic *efx);
+void efx_farch_finish_flr(struct efx_nic *efx);
+void siena_finish_flush(struct efx_nic *efx);
+void falcon_start_nic_stats(struct efx_nic *efx);
+void falcon_stop_nic_stats(struct efx_nic *efx);
+int falcon_reset_xaui(struct efx_nic *efx);
+void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw);
+void efx_farch_init_common(struct efx_nic *efx);
+void efx_farch_rx_push_indir_table(struct efx_nic *efx);
+void efx_farch_rx_pull_indir_table(struct efx_nic *efx);
+
+/* Tests */
+struct efx_farch_register_test {
+	unsigned address;
+	efx_oword_t mask;
+};
+
+int efx_farch_test_registers(struct efx_nic *efx,
+			     const struct efx_farch_register_test *regs,
+			     size_t n_regs);
+
+void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq,
+			      efx_qword_t *event);
+
+#endif /* EFX_NIC_H */
diff --git a/drivers/net/ethernet/sfc/siena/nic_common.h b/drivers/net/ethernet/sfc/siena/nic_common.h
new file mode 100644
index 000000000000..0cef35c0c559
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/nic_common.h
@@ -0,0 +1,262 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2013 Solarflare Communications Inc.
+ * Copyright 2019-2020 Xilinx Inc.
+ */
+
+#ifndef EFX_NIC_COMMON_H
+#define EFX_NIC_COMMON_H
+
+#include "net_driver.h"
+#include "efx_common.h"
+#include "mcdi.h"
+#include "ptp.h"
+
+enum {
+	/* Revisions 0-2 were Falcon A0, A1 and B0 respectively.
+	 * They are not supported by this driver but these revision numbers
+	 * form part of the ethtool API for register dumping.
+	 */
+	EFX_REV_SIENA_A0 = 3,
+	EFX_REV_HUNT_A0 = 4,
+	EFX_REV_EF100 = 5,
+};
+
+static inline int efx_nic_rev(struct efx_nic *efx)
+{
+	return efx->type->revision;
+}
+
+/* Read the current event from the event queue */
+static inline efx_qword_t *efx_event(struct efx_channel *channel,
+				     unsigned int index)
+{
+	return ((efx_qword_t *) (channel->eventq.buf.addr)) +
+		(index & channel->eventq_mask);
+}
+
+/* See if an event is present
+ *
+ * We check both the high and low dword of the event for all ones.  We
+ * wrote all ones when we cleared the event, and no valid event can
+ * have all ones in either its high or low dwords.  This approach is
+ * robust against reordering.
+ *
+ * Note that using a single 64-bit comparison is incorrect; even
+ * though the CPU read will be atomic, the DMA write may not be.
+ */
+static inline int efx_event_present(efx_qword_t *event)
+{
+	return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
+		  EFX_DWORD_IS_ALL_ONES(event->dword[1]));
+}
+
+/* Returns a pointer to the specified transmit descriptor in the TX
+ * descriptor queue belonging to the specified channel.
+ */
+static inline efx_qword_t *
+efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
+{
+	return ((efx_qword_t *) (tx_queue->txd.buf.addr)) + index;
+}
+
+/* Report whether this TX queue would be empty for the given write_count.
+ * May return false negative.
+ */
+static inline bool efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue, unsigned int write_count)
+{
+	unsigned int empty_read_count = READ_ONCE(tx_queue->empty_read_count);
+
+	if (empty_read_count == 0)
+		return false;
+
+	return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0;
+}
+
+int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
+			bool *data_mapped);
+
+/* Decide whether to push a TX descriptor to the NIC vs merely writing
+ * the doorbell.  This can reduce latency when we are adding a single
+ * descriptor to an empty queue, but is otherwise pointless.  Further,
+ * Falcon and Siena have hardware bugs (SF bug 33851) that may be
+ * triggered if we don't check this.
+ * We use the write_count used for the last doorbell push, to get the
+ * NIC's view of the tx queue.
+ */
+static inline bool efx_nic_may_push_tx_desc(struct efx_tx_queue *tx_queue,
+					    unsigned int write_count)
+{
+	bool was_empty = efx_nic_tx_is_empty(tx_queue, write_count);
+
+	tx_queue->empty_read_count = 0;
+	return was_empty && tx_queue->write_count - write_count == 1;
+}
+
+/* Returns a pointer to the specified descriptor in the RX descriptor queue */
+static inline efx_qword_t *
+efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
+{
+	return ((efx_qword_t *) (rx_queue->rxd.buf.addr)) + index;
+}
+
+/* Alignment of PCIe DMA boundaries (4KB) */
+#define EFX_PAGE_SIZE	4096
+/* Size and alignment of buffer table entries (same) */
+#define EFX_BUF_SIZE	EFX_PAGE_SIZE
+
+/* NIC-generic software stats */
+enum {
+	GENERIC_STAT_rx_noskb_drops,
+	GENERIC_STAT_rx_nodesc_trunc,
+	GENERIC_STAT_COUNT
+};
+
+#define EFX_GENERIC_SW_STAT(ext_name)				\
+	[GENERIC_STAT_ ## ext_name] = { #ext_name, 0, 0 }
+
+/* TX data path */
+static inline int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
+{
+	return tx_queue->efx->type->tx_probe(tx_queue);
+}
+static inline void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
+{
+	tx_queue->efx->type->tx_init(tx_queue);
+}
+static inline void efx_nic_remove_tx(struct efx_tx_queue *tx_queue)
+{
+	if (tx_queue->efx->type->tx_remove)
+		tx_queue->efx->type->tx_remove(tx_queue);
+}
+static inline void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
+{
+	tx_queue->efx->type->tx_write(tx_queue);
+}
+
+/* RX data path */
+static inline int efx_nic_probe_rx(struct efx_rx_queue *rx_queue)
+{
+	return rx_queue->efx->type->rx_probe(rx_queue);
+}
+static inline void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
+{
+	rx_queue->efx->type->rx_init(rx_queue);
+}
+static inline void efx_nic_remove_rx(struct efx_rx_queue *rx_queue)
+{
+	rx_queue->efx->type->rx_remove(rx_queue);
+}
+static inline void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue)
+{
+	rx_queue->efx->type->rx_write(rx_queue);
+}
+static inline void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue)
+{
+	rx_queue->efx->type->rx_defer_refill(rx_queue);
+}
+
+/* Event data path */
+static inline int efx_nic_probe_eventq(struct efx_channel *channel)
+{
+	return channel->efx->type->ev_probe(channel);
+}
+static inline int efx_nic_init_eventq(struct efx_channel *channel)
+{
+	return channel->efx->type->ev_init(channel);
+}
+static inline void efx_nic_fini_eventq(struct efx_channel *channel)
+{
+	channel->efx->type->ev_fini(channel);
+}
+static inline void efx_nic_remove_eventq(struct efx_channel *channel)
+{
+	channel->efx->type->ev_remove(channel);
+}
+static inline int
+efx_nic_process_eventq(struct efx_channel *channel, int quota)
+{
+	return channel->efx->type->ev_process(channel, quota);
+}
+static inline void efx_nic_eventq_read_ack(struct efx_channel *channel)
+{
+	channel->efx->type->ev_read_ack(channel);
+}
+
+void efx_nic_event_test_start(struct efx_channel *channel);
+
+bool efx_nic_event_present(struct efx_channel *channel);
+
+static inline void efx_sensor_event(struct efx_nic *efx, efx_qword_t *ev)
+{
+	if (efx->type->sensor_event)
+		efx->type->sensor_event(efx, ev);
+}
+
+static inline unsigned int efx_rx_recycle_ring_size(const struct efx_nic *efx)
+{
+	return efx->type->rx_recycle_ring_size(efx);
+}
+
+/* Some statistics are computed as A - B where A and B each increase
+ * linearly with some hardware counter(s) and the counters are read
+ * asynchronously.  If the counters contributing to B are always read
+ * after those contributing to A, the computed value may be lower than
+ * the true value by some variable amount, and may decrease between
+ * subsequent computations.
+ *
+ * We should never allow statistics to decrease or to exceed the true
+ * value.  Since the computed value will never be greater than the
+ * true value, we can achieve this by only storing the computed value
+ * when it increases.
+ */
+static inline void efx_update_diff_stat(u64 *stat, u64 diff)
+{
+	if ((s64)(diff - *stat) > 0)
+		*stat = diff;
+}
+
+/* Interrupts */
+int efx_nic_init_interrupt(struct efx_nic *efx);
+int efx_nic_irq_test_start(struct efx_nic *efx);
+void efx_nic_fini_interrupt(struct efx_nic *efx);
+
+static inline int efx_nic_event_test_irq_cpu(struct efx_channel *channel)
+{
+	return READ_ONCE(channel->event_test_cpu);
+}
+static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx)
+{
+	return READ_ONCE(efx->last_irq_cpu);
+}
+
+/* Global Resources */
+int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
+			 unsigned int len, gfp_t gfp_flags);
+void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer);
+
+size_t efx_nic_get_regs_len(struct efx_nic *efx);
+void efx_nic_get_regs(struct efx_nic *efx, void *buf);
+
+#define EFX_MC_STATS_GENERATION_INVALID ((__force __le64)(-1))
+
+size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
+			      const unsigned long *mask, u8 *names);
+int efx_nic_copy_stats(struct efx_nic *efx, __le64 *dest);
+void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
+			  const unsigned long *mask, u64 *stats,
+			  const void *dma_buf, bool accumulate);
+void efx_nic_fix_nodesc_drop_stat(struct efx_nic *efx, u64 *stat);
+static inline size_t efx_nic_update_stats_atomic(struct efx_nic *efx, u64 *full_stats,
+						 struct rtnl_link_stats64 *core_stats)
+{
+	if (efx->type->update_stats_atomic)
+		return efx->type->update_stats_atomic(efx, full_stats, core_stats);
+	return efx->type->update_stats(efx, full_stats, core_stats);
+}
+
+#define EFX_MAX_FLUSH_TIME 5000
+
+#endif /* EFX_NIC_COMMON_H */
diff --git a/drivers/net/ethernet/sfc/siena/ptp.c b/drivers/net/ethernet/sfc/siena/ptp.c
new file mode 100644
index 000000000000..f0ef515e2ade
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/ptp.c
@@ -0,0 +1,2210 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2011-2013 Solarflare Communications Inc.
+ */
+
+/* Theory of operation:
+ *
+ * PTP support is assisted by firmware running on the MC, which provides
+ * the hardware timestamping capabilities.  Both transmitted and received
+ * PTP event packets are queued onto internal queues for subsequent processing;
+ * this is because the MC operations are relatively long and would block
+ * block NAPI/interrupt operation.
+ *
+ * Receive event processing:
+ *	The event contains the packet's UUID and sequence number, together
+ *	with the hardware timestamp.  The PTP receive packet queue is searched
+ *	for this UUID/sequence number and, if found, put on a pending queue.
+ *	Packets not matching are delivered without timestamps (MCDI events will
+ *	always arrive after the actual packet).
+ *	It is important for the operation of the PTP protocol that the ordering
+ *	of packets between the event and general port is maintained.
+ *
+ * Work queue processing:
+ *	If work waiting, synchronise host/hardware time
+ *
+ *	Transmit: send packet through MC, which returns the transmission time
+ *	that is converted to an appropriate timestamp.
+ *
+ *	Receive: the packet's reception time is converted to an appropriate
+ *	timestamp.
+ */
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <linux/time.h>
+#include <linux/ktime.h>
+#include <linux/module.h>
+#include <linux/pps_kernel.h>
+#include <linux/ptp_clock_kernel.h>
+#include "net_driver.h"
+#include "efx.h"
+#include "mcdi.h"
+#include "mcdi_pcol.h"
+#include "io.h"
+#include "farch_regs.h"
+#include "tx.h"
+#include "nic.h" /* indirectly includes ptp.h */
+
+/* Maximum number of events expected to make up a PTP event */
+#define	MAX_EVENT_FRAGS			3
+
+/* Maximum delay, ms, to begin synchronisation */
+#define	MAX_SYNCHRONISE_WAIT_MS		2
+
+/* How long, at most, to spend synchronising */
+#define	SYNCHRONISE_PERIOD_NS		250000
+
+/* How often to update the shared memory time */
+#define	SYNCHRONISATION_GRANULARITY_NS	200
+
+/* Minimum permitted length of a (corrected) synchronisation time */
+#define	DEFAULT_MIN_SYNCHRONISATION_NS	120
+
+/* Maximum permitted length of a (corrected) synchronisation time */
+#define	MAX_SYNCHRONISATION_NS		1000
+
+/* How many (MC) receive events that can be queued */
+#define	MAX_RECEIVE_EVENTS		8
+
+/* Length of (modified) moving average. */
+#define	AVERAGE_LENGTH			16
+
+/* How long an unmatched event or packet can be held */
+#define PKT_EVENT_LIFETIME_MS		10
+
+/* Offsets into PTP packet for identification.  These offsets are from the
+ * start of the IP header, not the MAC header.  Note that neither PTP V1 nor
+ * PTP V2 permit the use of IPV4 options.
+ */
+#define PTP_DPORT_OFFSET	22
+
+#define PTP_V1_VERSION_LENGTH	2
+#define PTP_V1_VERSION_OFFSET	28
+
+#define PTP_V1_UUID_LENGTH	6
+#define PTP_V1_UUID_OFFSET	50
+
+#define PTP_V1_SEQUENCE_LENGTH	2
+#define PTP_V1_SEQUENCE_OFFSET	58
+
+/* The minimum length of a PTP V1 packet for offsets, etc. to be valid:
+ * includes IP header.
+ */
+#define	PTP_V1_MIN_LENGTH	64
+
+#define PTP_V2_VERSION_LENGTH	1
+#define PTP_V2_VERSION_OFFSET	29
+
+#define PTP_V2_UUID_LENGTH	8
+#define PTP_V2_UUID_OFFSET	48
+
+/* Although PTP V2 UUIDs are comprised a ClockIdentity (8) and PortNumber (2),
+ * the MC only captures the last six bytes of the clock identity. These values
+ * reflect those, not the ones used in the standard.  The standard permits
+ * mapping of V1 UUIDs to V2 UUIDs with these same values.
+ */
+#define PTP_V2_MC_UUID_LENGTH	6
+#define PTP_V2_MC_UUID_OFFSET	50
+
+#define PTP_V2_SEQUENCE_LENGTH	2
+#define PTP_V2_SEQUENCE_OFFSET	58
+
+/* The minimum length of a PTP V2 packet for offsets, etc. to be valid:
+ * includes IP header.
+ */
+#define	PTP_V2_MIN_LENGTH	63
+
+#define	PTP_MIN_LENGTH		63
+
+#define PTP_ADDRESS		0xe0000181	/* 224.0.1.129 */
+#define PTP_EVENT_PORT		319
+#define PTP_GENERAL_PORT	320
+
+/* Annoyingly the format of the version numbers are different between
+ * versions 1 and 2 so it isn't possible to simply look for 1 or 2.
+ */
+#define	PTP_VERSION_V1		1
+
+#define	PTP_VERSION_V2		2
+#define	PTP_VERSION_V2_MASK	0x0f
+
+enum ptp_packet_state {
+	PTP_PACKET_STATE_UNMATCHED = 0,
+	PTP_PACKET_STATE_MATCHED,
+	PTP_PACKET_STATE_TIMED_OUT,
+	PTP_PACKET_STATE_MATCH_UNWANTED
+};
+
+/* NIC synchronised with single word of time only comprising
+ * partial seconds and full nanoseconds: 10^9 ~ 2^30 so 2 bits for seconds.
+ */
+#define	MC_NANOSECOND_BITS	30
+#define	MC_NANOSECOND_MASK	((1 << MC_NANOSECOND_BITS) - 1)
+#define	MC_SECOND_MASK		((1 << (32 - MC_NANOSECOND_BITS)) - 1)
+
+/* Maximum parts-per-billion adjustment that is acceptable */
+#define MAX_PPB			1000000
+
+/* Precalculate scale word to avoid long long division at runtime */
+/* This is equivalent to 2^66 / 10^9. */
+#define PPB_SCALE_WORD  ((1LL << (57)) / 1953125LL)
+
+/* How much to shift down after scaling to convert to FP40 */
+#define PPB_SHIFT_FP40		26
+/* ... and FP44. */
+#define PPB_SHIFT_FP44		22
+
+#define PTP_SYNC_ATTEMPTS	4
+
+/**
+ * struct efx_ptp_match - Matching structure, stored in sk_buff's cb area.
+ * @words: UUID and (partial) sequence number
+ * @expiry: Time after which the packet should be delivered irrespective of
+ *            event arrival.
+ * @state: The state of the packet - whether it is ready for processing or
+ *         whether that is of no interest.
+ */
+struct efx_ptp_match {
+	u32 words[DIV_ROUND_UP(PTP_V1_UUID_LENGTH, 4)];
+	unsigned long expiry;
+	enum ptp_packet_state state;
+};
+
+/**
+ * struct efx_ptp_event_rx - A PTP receive event (from MC)
+ * @link: list of events
+ * @seq0: First part of (PTP) UUID
+ * @seq1: Second part of (PTP) UUID and sequence number
+ * @hwtimestamp: Event timestamp
+ * @expiry: Time which the packet arrived
+ */
+struct efx_ptp_event_rx {
+	struct list_head link;
+	u32 seq0;
+	u32 seq1;
+	ktime_t hwtimestamp;
+	unsigned long expiry;
+};
+
+/**
+ * struct efx_ptp_timeset - Synchronisation between host and MC
+ * @host_start: Host time immediately before hardware timestamp taken
+ * @major: Hardware timestamp, major
+ * @minor: Hardware timestamp, minor
+ * @host_end: Host time immediately after hardware timestamp taken
+ * @wait: Number of NIC clock ticks between hardware timestamp being read and
+ *          host end time being seen
+ * @window: Difference of host_end and host_start
+ * @valid: Whether this timeset is valid
+ */
+struct efx_ptp_timeset {
+	u32 host_start;
+	u32 major;
+	u32 minor;
+	u32 host_end;
+	u32 wait;
+	u32 window;	/* Derived: end - start, allowing for wrap */
+};
+
+/**
+ * struct efx_ptp_data - Precision Time Protocol (PTP) state
+ * @efx: The NIC context
+ * @channel: The PTP channel (Siena only)
+ * @rx_ts_inline: Flag for whether RX timestamps are inline (else they are
+ *	separate events)
+ * @rxq: Receive SKB queue (awaiting timestamps)
+ * @txq: Transmit SKB queue
+ * @evt_list: List of MC receive events awaiting packets
+ * @evt_free_list: List of free events
+ * @evt_lock: Lock for manipulating evt_list and evt_free_list
+ * @rx_evts: Instantiated events (on evt_list and evt_free_list)
+ * @workwq: Work queue for processing pending PTP operations
+ * @work: Work task
+ * @reset_required: A serious error has occurred and the PTP task needs to be
+ *                  reset (disable, enable).
+ * @rxfilter_event: Receive filter when operating
+ * @rxfilter_general: Receive filter when operating
+ * @rxfilter_installed: Receive filter installed
+ * @config: Current timestamp configuration
+ * @enabled: PTP operation enabled
+ * @mode: Mode in which PTP operating (PTP version)
+ * @ns_to_nic_time: Function to convert from scalar nanoseconds to NIC time
+ * @nic_to_kernel_time: Function to convert from NIC to kernel time
+ * @nic_time: contains time details
+ * @nic_time.minor_max: Wrap point for NIC minor times
+ * @nic_time.sync_event_diff_min: Minimum acceptable difference between time
+ * in packet prefix and last MCDI time sync event i.e. how much earlier than
+ * the last sync event time a packet timestamp can be.
+ * @nic_time.sync_event_diff_max: Maximum acceptable difference between time
+ * in packet prefix and last MCDI time sync event i.e. how much later than
+ * the last sync event time a packet timestamp can be.
+ * @nic_time.sync_event_minor_shift: Shift required to make minor time from
+ * field in MCDI time sync event.
+ * @min_synchronisation_ns: Minimum acceptable corrected sync window
+ * @capabilities: Capabilities flags from the NIC
+ * @ts_corrections: contains corrections details
+ * @ts_corrections.ptp_tx: Required driver correction of PTP packet transmit
+ *                         timestamps
+ * @ts_corrections.ptp_rx: Required driver correction of PTP packet receive
+ *                         timestamps
+ * @ts_corrections.pps_out: PPS output error (information only)
+ * @ts_corrections.pps_in: Required driver correction of PPS input timestamps
+ * @ts_corrections.general_tx: Required driver correction of general packet
+ *                             transmit timestamps
+ * @ts_corrections.general_rx: Required driver correction of general packet
+ *                             receive timestamps
+ * @evt_frags: Partly assembled PTP events
+ * @evt_frag_idx: Current fragment number
+ * @evt_code: Last event code
+ * @start: Address at which MC indicates ready for synchronisation
+ * @host_time_pps: Host time at last PPS
+ * @adjfreq_ppb_shift: Shift required to convert scaled parts-per-billion
+ * frequency adjustment into a fixed point fractional nanosecond format.
+ * @current_adjfreq: Current ppb adjustment.
+ * @phc_clock: Pointer to registered phc device (if primary function)
+ * @phc_clock_info: Registration structure for phc device
+ * @pps_work: pps work task for handling pps events
+ * @pps_workwq: pps work queue
+ * @nic_ts_enabled: Flag indicating if NIC generated TS events are handled
+ * @txbuf: Buffer for use when transmitting (PTP) packets to MC (avoids
+ *         allocations in main data path).
+ * @good_syncs: Number of successful synchronisations.
+ * @fast_syncs: Number of synchronisations requiring short delay
+ * @bad_syncs: Number of failed synchronisations.
+ * @sync_timeouts: Number of synchronisation timeouts
+ * @no_time_syncs: Number of synchronisations with no good times.
+ * @invalid_sync_windows: Number of sync windows with bad durations.
+ * @undersize_sync_windows: Number of corrected sync windows that are too small
+ * @oversize_sync_windows: Number of corrected sync windows that are too large
+ * @rx_no_timestamp: Number of packets received without a timestamp.
+ * @timeset: Last set of synchronisation statistics.
+ * @xmit_skb: Transmit SKB function.
+ */
+struct efx_ptp_data {
+	struct efx_nic *efx;
+	struct efx_channel *channel;
+	bool rx_ts_inline;
+	struct sk_buff_head rxq;
+	struct sk_buff_head txq;
+	struct list_head evt_list;
+	struct list_head evt_free_list;
+	spinlock_t evt_lock;
+	struct efx_ptp_event_rx rx_evts[MAX_RECEIVE_EVENTS];
+	struct workqueue_struct *workwq;
+	struct work_struct work;
+	bool reset_required;
+	u32 rxfilter_event;
+	u32 rxfilter_general;
+	bool rxfilter_installed;
+	struct hwtstamp_config config;
+	bool enabled;
+	unsigned int mode;
+	void (*ns_to_nic_time)(s64 ns, u32 *nic_major, u32 *nic_minor);
+	ktime_t (*nic_to_kernel_time)(u32 nic_major, u32 nic_minor,
+				      s32 correction);
+	struct {
+		u32 minor_max;
+		u32 sync_event_diff_min;
+		u32 sync_event_diff_max;
+		unsigned int sync_event_minor_shift;
+	} nic_time;
+	unsigned int min_synchronisation_ns;
+	unsigned int capabilities;
+	struct {
+		s32 ptp_tx;
+		s32 ptp_rx;
+		s32 pps_out;
+		s32 pps_in;
+		s32 general_tx;
+		s32 general_rx;
+	} ts_corrections;
+	efx_qword_t evt_frags[MAX_EVENT_FRAGS];
+	int evt_frag_idx;
+	int evt_code;
+	struct efx_buffer start;
+	struct pps_event_time host_time_pps;
+	unsigned int adjfreq_ppb_shift;
+	s64 current_adjfreq;
+	struct ptp_clock *phc_clock;
+	struct ptp_clock_info phc_clock_info;
+	struct work_struct pps_work;
+	struct workqueue_struct *pps_workwq;
+	bool nic_ts_enabled;
+	efx_dword_t txbuf[MCDI_TX_BUF_LEN(MC_CMD_PTP_IN_TRANSMIT_LENMAX)];
+
+	unsigned int good_syncs;
+	unsigned int fast_syncs;
+	unsigned int bad_syncs;
+	unsigned int sync_timeouts;
+	unsigned int no_time_syncs;
+	unsigned int invalid_sync_windows;
+	unsigned int undersize_sync_windows;
+	unsigned int oversize_sync_windows;
+	unsigned int rx_no_timestamp;
+	struct efx_ptp_timeset
+	timeset[MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM];
+	void (*xmit_skb)(struct efx_nic *efx, struct sk_buff *skb);
+};
+
+static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta);
+static int efx_phc_adjtime(struct ptp_clock_info *ptp, s64 delta);
+static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts);
+static int efx_phc_settime(struct ptp_clock_info *ptp,
+			   const struct timespec64 *e_ts);
+static int efx_phc_enable(struct ptp_clock_info *ptp,
+			  struct ptp_clock_request *request, int on);
+
+bool efx_ptp_use_mac_tx_timestamps(struct efx_nic *efx)
+{
+	return efx_has_cap(efx, TX_MAC_TIMESTAMPING);
+}
+
+/* PTP 'extra' channel is still a traffic channel, but we only create TX queues
+ * if PTP uses MAC TX timestamps, not if PTP uses the MC directly to transmit.
+ */
+static bool efx_ptp_want_txqs(struct efx_channel *channel)
+{
+	return efx_ptp_use_mac_tx_timestamps(channel->efx);
+}
+
+#define PTP_SW_STAT(ext_name, field_name)				\
+	{ #ext_name, 0, offsetof(struct efx_ptp_data, field_name) }
+#define PTP_MC_STAT(ext_name, mcdi_name)				\
+	{ #ext_name, 32, MC_CMD_PTP_OUT_STATUS_STATS_ ## mcdi_name ## _OFST }
+static const struct efx_hw_stat_desc efx_ptp_stat_desc[] = {
+	PTP_SW_STAT(ptp_good_syncs, good_syncs),
+	PTP_SW_STAT(ptp_fast_syncs, fast_syncs),
+	PTP_SW_STAT(ptp_bad_syncs, bad_syncs),
+	PTP_SW_STAT(ptp_sync_timeouts, sync_timeouts),
+	PTP_SW_STAT(ptp_no_time_syncs, no_time_syncs),
+	PTP_SW_STAT(ptp_invalid_sync_windows, invalid_sync_windows),
+	PTP_SW_STAT(ptp_undersize_sync_windows, undersize_sync_windows),
+	PTP_SW_STAT(ptp_oversize_sync_windows, oversize_sync_windows),
+	PTP_SW_STAT(ptp_rx_no_timestamp, rx_no_timestamp),
+	PTP_MC_STAT(ptp_tx_timestamp_packets, TX),
+	PTP_MC_STAT(ptp_rx_timestamp_packets, RX),
+	PTP_MC_STAT(ptp_timestamp_packets, TS),
+	PTP_MC_STAT(ptp_filter_matches, FM),
+	PTP_MC_STAT(ptp_non_filter_matches, NFM),
+};
+#define PTP_STAT_COUNT ARRAY_SIZE(efx_ptp_stat_desc)
+static const unsigned long efx_ptp_stat_mask[] = {
+	[0 ... BITS_TO_LONGS(PTP_STAT_COUNT) - 1] = ~0UL,
+};
+
+size_t efx_ptp_describe_stats(struct efx_nic *efx, u8 *strings)
+{
+	if (!efx->ptp_data)
+		return 0;
+
+	return efx_nic_describe_stats(efx_ptp_stat_desc, PTP_STAT_COUNT,
+				      efx_ptp_stat_mask, strings);
+}
+
+size_t efx_ptp_update_stats(struct efx_nic *efx, u64 *stats)
+{
+	MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_STATUS_LEN);
+	MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_STATUS_LEN);
+	size_t i;
+	int rc;
+
+	if (!efx->ptp_data)
+		return 0;
+
+	/* Copy software statistics */
+	for (i = 0; i < PTP_STAT_COUNT; i++) {
+		if (efx_ptp_stat_desc[i].dma_width)
+			continue;
+		stats[i] = *(unsigned int *)((char *)efx->ptp_data +
+					     efx_ptp_stat_desc[i].offset);
+	}
+
+	/* Fetch MC statistics.  We *must* fill in all statistics or
+	 * risk leaking kernel memory to userland, so if the MCDI
+	 * request fails we pretend we got zeroes.
+	 */
+	MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_STATUS);
+	MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
+	rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+			  outbuf, sizeof(outbuf), NULL);
+	if (rc)
+		memset(outbuf, 0, sizeof(outbuf));
+	efx_nic_update_stats(efx_ptp_stat_desc, PTP_STAT_COUNT,
+			     efx_ptp_stat_mask,
+			     stats, _MCDI_PTR(outbuf, 0), false);
+
+	return PTP_STAT_COUNT;
+}
+
+/* For Siena platforms NIC time is s and ns */
+static void efx_ptp_ns_to_s_ns(s64 ns, u32 *nic_major, u32 *nic_minor)
+{
+	struct timespec64 ts = ns_to_timespec64(ns);
+	*nic_major = (u32)ts.tv_sec;
+	*nic_minor = ts.tv_nsec;
+}
+
+static ktime_t efx_ptp_s_ns_to_ktime_correction(u32 nic_major, u32 nic_minor,
+						s32 correction)
+{
+	ktime_t kt = ktime_set(nic_major, nic_minor);
+	if (correction >= 0)
+		kt = ktime_add_ns(kt, (u64)correction);
+	else
+		kt = ktime_sub_ns(kt, (u64)-correction);
+	return kt;
+}
+
+/* To convert from s27 format to ns we multiply then divide by a power of 2.
+ * For the conversion from ns to s27, the operation is also converted to a
+ * multiply and shift.
+ */
+#define S27_TO_NS_SHIFT	(27)
+#define NS_TO_S27_MULT	(((1ULL << 63) + NSEC_PER_SEC / 2) / NSEC_PER_SEC)
+#define NS_TO_S27_SHIFT	(63 - S27_TO_NS_SHIFT)
+#define S27_MINOR_MAX	(1 << S27_TO_NS_SHIFT)
+
+/* For Huntington platforms NIC time is in seconds and fractions of a second
+ * where the minor register only uses 27 bits in units of 2^-27s.
+ */
+static void efx_ptp_ns_to_s27(s64 ns, u32 *nic_major, u32 *nic_minor)
+{
+	struct timespec64 ts = ns_to_timespec64(ns);
+	u32 maj = (u32)ts.tv_sec;
+	u32 min = (u32)(((u64)ts.tv_nsec * NS_TO_S27_MULT +
+			 (1ULL << (NS_TO_S27_SHIFT - 1))) >> NS_TO_S27_SHIFT);
+
+	/* The conversion can result in the minor value exceeding the maximum.
+	 * In this case, round up to the next second.
+	 */
+	if (min >= S27_MINOR_MAX) {
+		min -= S27_MINOR_MAX;
+		maj++;
+	}
+
+	*nic_major = maj;
+	*nic_minor = min;
+}
+
+static inline ktime_t efx_ptp_s27_to_ktime(u32 nic_major, u32 nic_minor)
+{
+	u32 ns = (u32)(((u64)nic_minor * NSEC_PER_SEC +
+			(1ULL << (S27_TO_NS_SHIFT - 1))) >> S27_TO_NS_SHIFT);
+	return ktime_set(nic_major, ns);
+}
+
+static ktime_t efx_ptp_s27_to_ktime_correction(u32 nic_major, u32 nic_minor,
+					       s32 correction)
+{
+	/* Apply the correction and deal with carry */
+	nic_minor += correction;
+	if ((s32)nic_minor < 0) {
+		nic_minor += S27_MINOR_MAX;
+		nic_major--;
+	} else if (nic_minor >= S27_MINOR_MAX) {
+		nic_minor -= S27_MINOR_MAX;
+		nic_major++;
+	}
+
+	return efx_ptp_s27_to_ktime(nic_major, nic_minor);
+}
+
+/* For Medford2 platforms the time is in seconds and quarter nanoseconds. */
+static void efx_ptp_ns_to_s_qns(s64 ns, u32 *nic_major, u32 *nic_minor)
+{
+	struct timespec64 ts = ns_to_timespec64(ns);
+
+	*nic_major = (u32)ts.tv_sec;
+	*nic_minor = ts.tv_nsec * 4;
+}
+
+static ktime_t efx_ptp_s_qns_to_ktime_correction(u32 nic_major, u32 nic_minor,
+						 s32 correction)
+{
+	ktime_t kt;
+
+	nic_minor = DIV_ROUND_CLOSEST(nic_minor, 4);
+	correction = DIV_ROUND_CLOSEST(correction, 4);
+
+	kt = ktime_set(nic_major, nic_minor);
+
+	if (correction >= 0)
+		kt = ktime_add_ns(kt, (u64)correction);
+	else
+		kt = ktime_sub_ns(kt, (u64)-correction);
+	return kt;
+}
+
+struct efx_channel *efx_ptp_channel(struct efx_nic *efx)
+{
+	return efx->ptp_data ? efx->ptp_data->channel : NULL;
+}
+
+static u32 last_sync_timestamp_major(struct efx_nic *efx)
+{
+	struct efx_channel *channel = efx_ptp_channel(efx);
+	u32 major = 0;
+
+	if (channel)
+		major = channel->sync_timestamp_major;
+	return major;
+}
+
+/* The 8000 series and later can provide the time from the MAC, which is only
+ * 48 bits long and provides meta-information in the top 2 bits.
+ */
+static ktime_t
+efx_ptp_mac_nic_to_ktime_correction(struct efx_nic *efx,
+				    struct efx_ptp_data *ptp,
+				    u32 nic_major, u32 nic_minor,
+				    s32 correction)
+{
+	u32 sync_timestamp;
+	ktime_t kt = { 0 };
+	s16 delta;
+
+	if (!(nic_major & 0x80000000)) {
+		WARN_ON_ONCE(nic_major >> 16);
+
+		/* Medford provides 48 bits of timestamp, so we must get the top
+		 * 16 bits from the timesync event state.
+		 *
+		 * We only have the lower 16 bits of the time now, but we do
+		 * have a full resolution timestamp at some point in past. As
+		 * long as the difference between the (real) now and the sync
+		 * is less than 2^15, then we can reconstruct the difference
+		 * between those two numbers using only the lower 16 bits of
+		 * each.
+		 *
+		 * Put another way
+		 *
+		 * a - b = ((a mod k) - b) mod k
+		 *
+		 * when -k/2 < (a-b) < k/2. In our case k is 2^16. We know
+		 * (a mod k) and b, so can calculate the delta, a - b.
+		 *
+		 */
+		sync_timestamp = last_sync_timestamp_major(efx);
+
+		/* Because delta is s16 this does an implicit mask down to
+		 * 16 bits which is what we need, assuming
+		 * MEDFORD_TX_SECS_EVENT_BITS is 16. delta is signed so that
+		 * we can deal with the (unlikely) case of sync timestamps
+		 * arriving from the future.
+		 */
+		delta = nic_major - sync_timestamp;
+
+		/* Recover the fully specified time now, by applying the offset
+		 * to the (fully specified) sync time.
+		 */
+		nic_major = sync_timestamp + delta;
+
+		kt = ptp->nic_to_kernel_time(nic_major, nic_minor,
+					     correction);
+	}
+	return kt;
+}
+
+ktime_t efx_ptp_nic_to_kernel_time(struct efx_tx_queue *tx_queue)
+{
+	struct efx_nic *efx = tx_queue->efx;
+	struct efx_ptp_data *ptp = efx->ptp_data;
+	ktime_t kt;
+
+	if (efx_ptp_use_mac_tx_timestamps(efx))
+		kt = efx_ptp_mac_nic_to_ktime_correction(efx, ptp,
+				tx_queue->completed_timestamp_major,
+				tx_queue->completed_timestamp_minor,
+				ptp->ts_corrections.general_tx);
+	else
+		kt = ptp->nic_to_kernel_time(
+				tx_queue->completed_timestamp_major,
+				tx_queue->completed_timestamp_minor,
+				ptp->ts_corrections.general_tx);
+	return kt;
+}
+
+/* Get PTP attributes and set up time conversions */
+static int efx_ptp_get_attributes(struct efx_nic *efx)
+{
+	MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_GET_ATTRIBUTES_LEN);
+	MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN);
+	struct efx_ptp_data *ptp = efx->ptp_data;
+	int rc;
+	u32 fmt;
+	size_t out_len;
+
+	/* Get the PTP attributes. If the NIC doesn't support the operation we
+	 * use the default format for compatibility with older NICs i.e.
+	 * seconds and nanoseconds.
+	 */
+	MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_GET_ATTRIBUTES);
+	MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
+	rc = efx_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+				outbuf, sizeof(outbuf), &out_len);
+	if (rc == 0) {
+		fmt = MCDI_DWORD(outbuf, PTP_OUT_GET_ATTRIBUTES_TIME_FORMAT);
+	} else if (rc == -EINVAL) {
+		fmt = MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS;
+	} else if (rc == -EPERM) {
+		pci_info(efx->pci_dev, "no PTP support\n");
+		return rc;
+	} else {
+		efx_mcdi_display_error(efx, MC_CMD_PTP, sizeof(inbuf),
+				       outbuf, sizeof(outbuf), rc);
+		return rc;
+	}
+
+	switch (fmt) {
+	case MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_27FRACTION:
+		ptp->ns_to_nic_time = efx_ptp_ns_to_s27;
+		ptp->nic_to_kernel_time = efx_ptp_s27_to_ktime_correction;
+		ptp->nic_time.minor_max = 1 << 27;
+		ptp->nic_time.sync_event_minor_shift = 19;
+		break;
+	case MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS:
+		ptp->ns_to_nic_time = efx_ptp_ns_to_s_ns;
+		ptp->nic_to_kernel_time = efx_ptp_s_ns_to_ktime_correction;
+		ptp->nic_time.minor_max = 1000000000;
+		ptp->nic_time.sync_event_minor_shift = 22;
+		break;
+	case MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_QTR_NANOSECONDS:
+		ptp->ns_to_nic_time = efx_ptp_ns_to_s_qns;
+		ptp->nic_to_kernel_time = efx_ptp_s_qns_to_ktime_correction;
+		ptp->nic_time.minor_max = 4000000000UL;
+		ptp->nic_time.sync_event_minor_shift = 24;
+		break;
+	default:
+		return -ERANGE;
+	}
+
+	/* Precalculate acceptable difference between the minor time in the
+	 * packet prefix and the last MCDI time sync event. We expect the
+	 * packet prefix timestamp to be after of sync event by up to one
+	 * sync event interval (0.25s) but we allow it to exceed this by a
+	 * fuzz factor of (0.1s)
+	 */
+	ptp->nic_time.sync_event_diff_min = ptp->nic_time.minor_max
+		- (ptp->nic_time.minor_max / 10);
+	ptp->nic_time.sync_event_diff_max = (ptp->nic_time.minor_max / 4)
+		+ (ptp->nic_time.minor_max / 10);
+
+	/* MC_CMD_PTP_OP_GET_ATTRIBUTES has been extended twice from an older
+	 * operation MC_CMD_PTP_OP_GET_TIME_FORMAT. The function now may return
+	 * a value to use for the minimum acceptable corrected synchronization
+	 * window and may return further capabilities.
+	 * If we have the extra information store it. For older firmware that
+	 * does not implement the extended command use the default value.
+	 */
+	if (rc == 0 &&
+	    out_len >= MC_CMD_PTP_OUT_GET_ATTRIBUTES_CAPABILITIES_OFST)
+		ptp->min_synchronisation_ns =
+			MCDI_DWORD(outbuf,
+				   PTP_OUT_GET_ATTRIBUTES_SYNC_WINDOW_MIN);
+	else
+		ptp->min_synchronisation_ns = DEFAULT_MIN_SYNCHRONISATION_NS;
+
+	if (rc == 0 &&
+	    out_len >= MC_CMD_PTP_OUT_GET_ATTRIBUTES_LEN)
+		ptp->capabilities = MCDI_DWORD(outbuf,
+					PTP_OUT_GET_ATTRIBUTES_CAPABILITIES);
+	else
+		ptp->capabilities = 0;
+
+	/* Set up the shift for conversion between frequency
+	 * adjustments in parts-per-billion and the fixed-point
+	 * fractional ns format that the adapter uses.
+	 */
+	if (ptp->capabilities & (1 << MC_CMD_PTP_OUT_GET_ATTRIBUTES_FP44_FREQ_ADJ_LBN))
+		ptp->adjfreq_ppb_shift = PPB_SHIFT_FP44;
+	else
+		ptp->adjfreq_ppb_shift = PPB_SHIFT_FP40;
+
+	return 0;
+}
+
+/* Get PTP timestamp corrections */
+static int efx_ptp_get_timestamp_corrections(struct efx_nic *efx)
+{
+	MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_GET_TIMESTAMP_CORRECTIONS_LEN);
+	MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_LEN);
+	int rc;
+	size_t out_len;
+
+	/* Get the timestamp corrections from the NIC. If this operation is
+	 * not supported (older NICs) then no correction is required.
+	 */
+	MCDI_SET_DWORD(inbuf, PTP_IN_OP,
+		       MC_CMD_PTP_OP_GET_TIMESTAMP_CORRECTIONS);
+	MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
+
+	rc = efx_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+				outbuf, sizeof(outbuf), &out_len);
+	if (rc == 0) {
+		efx->ptp_data->ts_corrections.ptp_tx = MCDI_DWORD(outbuf,
+			PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT);
+		efx->ptp_data->ts_corrections.ptp_rx = MCDI_DWORD(outbuf,
+			PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE);
+		efx->ptp_data->ts_corrections.pps_out = MCDI_DWORD(outbuf,
+			PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT);
+		efx->ptp_data->ts_corrections.pps_in = MCDI_DWORD(outbuf,
+			PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN);
+
+		if (out_len >= MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_LEN) {
+			efx->ptp_data->ts_corrections.general_tx = MCDI_DWORD(
+				outbuf,
+				PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_TX);
+			efx->ptp_data->ts_corrections.general_rx = MCDI_DWORD(
+				outbuf,
+				PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_RX);
+		} else {
+			efx->ptp_data->ts_corrections.general_tx =
+				efx->ptp_data->ts_corrections.ptp_tx;
+			efx->ptp_data->ts_corrections.general_rx =
+				efx->ptp_data->ts_corrections.ptp_rx;
+		}
+	} else if (rc == -EINVAL) {
+		efx->ptp_data->ts_corrections.ptp_tx = 0;
+		efx->ptp_data->ts_corrections.ptp_rx = 0;
+		efx->ptp_data->ts_corrections.pps_out = 0;
+		efx->ptp_data->ts_corrections.pps_in = 0;
+		efx->ptp_data->ts_corrections.general_tx = 0;
+		efx->ptp_data->ts_corrections.general_rx = 0;
+	} else {
+		efx_mcdi_display_error(efx, MC_CMD_PTP, sizeof(inbuf), outbuf,
+				       sizeof(outbuf), rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+/* Enable MCDI PTP support. */
+static int efx_ptp_enable(struct efx_nic *efx)
+{
+	MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_ENABLE_LEN);
+	MCDI_DECLARE_BUF_ERR(outbuf);
+	int rc;
+
+	MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ENABLE);
+	MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
+	MCDI_SET_DWORD(inbuf, PTP_IN_ENABLE_QUEUE,
+		       efx->ptp_data->channel ?
+		       efx->ptp_data->channel->channel : 0);
+	MCDI_SET_DWORD(inbuf, PTP_IN_ENABLE_MODE, efx->ptp_data->mode);
+
+	rc = efx_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+				outbuf, sizeof(outbuf), NULL);
+	rc = (rc == -EALREADY) ? 0 : rc;
+	if (rc)
+		efx_mcdi_display_error(efx, MC_CMD_PTP,
+				       MC_CMD_PTP_IN_ENABLE_LEN,
+				       outbuf, sizeof(outbuf), rc);
+	return rc;
+}
+
+/* Disable MCDI PTP support.
+ *
+ * Note that this function should never rely on the presence of ptp_data -
+ * may be called before that exists.
+ */
+static int efx_ptp_disable(struct efx_nic *efx)
+{
+	MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_DISABLE_LEN);
+	MCDI_DECLARE_BUF_ERR(outbuf);
+	int rc;
+
+	MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_DISABLE);
+	MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
+	rc = efx_mcdi_rpc_quiet(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+				outbuf, sizeof(outbuf), NULL);
+	rc = (rc == -EALREADY) ? 0 : rc;
+	/* If we get ENOSYS, the NIC doesn't support PTP, and thus this function
+	 * should only have been called during probe.
+	 */
+	if (rc == -ENOSYS || rc == -EPERM)
+		pci_info(efx->pci_dev, "no PTP support\n");
+	else if (rc)
+		efx_mcdi_display_error(efx, MC_CMD_PTP,
+				       MC_CMD_PTP_IN_DISABLE_LEN,
+				       outbuf, sizeof(outbuf), rc);
+	return rc;
+}
+
+static void efx_ptp_deliver_rx_queue(struct sk_buff_head *q)
+{
+	struct sk_buff *skb;
+
+	while ((skb = skb_dequeue(q))) {
+		local_bh_disable();
+		netif_receive_skb(skb);
+		local_bh_enable();
+	}
+}
+
+static void efx_ptp_handle_no_channel(struct efx_nic *efx)
+{
+	netif_err(efx, drv, efx->net_dev,
+		  "ERROR: PTP requires MSI-X and 1 additional interrupt"
+		  "vector. PTP disabled\n");
+}
+
+/* Repeatedly send the host time to the MC which will capture the hardware
+ * time.
+ */
+static void efx_ptp_send_times(struct efx_nic *efx,
+			       struct pps_event_time *last_time)
+{
+	struct pps_event_time now;
+	struct timespec64 limit;
+	struct efx_ptp_data *ptp = efx->ptp_data;
+	int *mc_running = ptp->start.addr;
+
+	pps_get_ts(&now);
+	limit = now.ts_real;
+	timespec64_add_ns(&limit, SYNCHRONISE_PERIOD_NS);
+
+	/* Write host time for specified period or until MC is done */
+	while ((timespec64_compare(&now.ts_real, &limit) < 0) &&
+	       READ_ONCE(*mc_running)) {
+		struct timespec64 update_time;
+		unsigned int host_time;
+
+		/* Don't update continuously to avoid saturating the PCIe bus */
+		update_time = now.ts_real;
+		timespec64_add_ns(&update_time, SYNCHRONISATION_GRANULARITY_NS);
+		do {
+			pps_get_ts(&now);
+		} while ((timespec64_compare(&now.ts_real, &update_time) < 0) &&
+			 READ_ONCE(*mc_running));
+
+		/* Synchronise NIC with single word of time only */
+		host_time = (now.ts_real.tv_sec << MC_NANOSECOND_BITS |
+			     now.ts_real.tv_nsec);
+		/* Update host time in NIC memory */
+		efx->type->ptp_write_host_time(efx, host_time);
+	}
+	*last_time = now;
+}
+
+/* Read a timeset from the MC's results and partial process. */
+static void efx_ptp_read_timeset(MCDI_DECLARE_STRUCT_PTR(data),
+				 struct efx_ptp_timeset *timeset)
+{
+	unsigned start_ns, end_ns;
+
+	timeset->host_start = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_HOSTSTART);
+	timeset->major = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_MAJOR);
+	timeset->minor = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_MINOR);
+	timeset->host_end = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_HOSTEND),
+	timeset->wait = MCDI_DWORD(data, PTP_OUT_SYNCHRONIZE_WAITNS);
+
+	/* Ignore seconds */
+	start_ns = timeset->host_start & MC_NANOSECOND_MASK;
+	end_ns = timeset->host_end & MC_NANOSECOND_MASK;
+	/* Allow for rollover */
+	if (end_ns < start_ns)
+		end_ns += NSEC_PER_SEC;
+	/* Determine duration of operation */
+	timeset->window = end_ns - start_ns;
+}
+
+/* Process times received from MC.
+ *
+ * Extract times from returned results, and establish the minimum value
+ * seen.  The minimum value represents the "best" possible time and events
+ * too much greater than this are rejected - the machine is, perhaps, too
+ * busy. A number of readings are taken so that, hopefully, at least one good
+ * synchronisation will be seen in the results.
+ */
+static int
+efx_ptp_process_times(struct efx_nic *efx, MCDI_DECLARE_STRUCT_PTR(synch_buf),
+		      size_t response_length,
+		      const struct pps_event_time *last_time)
+{
+	unsigned number_readings =
+		MCDI_VAR_ARRAY_LEN(response_length,
+				   PTP_OUT_SYNCHRONIZE_TIMESET);
+	unsigned i;
+	unsigned ngood = 0;
+	unsigned last_good = 0;
+	struct efx_ptp_data *ptp = efx->ptp_data;
+	u32 last_sec;
+	u32 start_sec;
+	struct timespec64 delta;
+	ktime_t mc_time;
+
+	if (number_readings == 0)
+		return -EAGAIN;
+
+	/* Read the set of results and find the last good host-MC
+	 * synchronization result. The MC times when it finishes reading the
+	 * host time so the corrected window time should be fairly constant
+	 * for a given platform. Increment stats for any results that appear
+	 * to be erroneous.
+	 */
+	for (i = 0; i < number_readings; i++) {
+		s32 window, corrected;
+		struct timespec64 wait;
+
+		efx_ptp_read_timeset(
+			MCDI_ARRAY_STRUCT_PTR(synch_buf,
+					      PTP_OUT_SYNCHRONIZE_TIMESET, i),
+			&ptp->timeset[i]);
+
+		wait = ktime_to_timespec64(
+			ptp->nic_to_kernel_time(0, ptp->timeset[i].wait, 0));
+		window = ptp->timeset[i].window;
+		corrected = window - wait.tv_nsec;
+
+		/* We expect the uncorrected synchronization window to be at
+		 * least as large as the interval between host start and end
+		 * times. If it is smaller than this then this is mostly likely
+		 * to be a consequence of the host's time being adjusted.
+		 * Check that the corrected sync window is in a reasonable
+		 * range. If it is out of range it is likely to be because an
+		 * interrupt or other delay occurred between reading the system
+		 * time and writing it to MC memory.
+		 */
+		if (window < SYNCHRONISATION_GRANULARITY_NS) {
+			++ptp->invalid_sync_windows;
+		} else if (corrected >= MAX_SYNCHRONISATION_NS) {
+			++ptp->oversize_sync_windows;
+		} else if (corrected < ptp->min_synchronisation_ns) {
+			++ptp->undersize_sync_windows;
+		} else {
+			ngood++;
+			last_good = i;
+		}
+	}
+
+	if (ngood == 0) {
+		netif_warn(efx, drv, efx->net_dev,
+			   "PTP no suitable synchronisations\n");
+		return -EAGAIN;
+	}
+
+	/* Calculate delay from last good sync (host time) to last_time.
+	 * It is possible that the seconds rolled over between taking
+	 * the start reading and the last value written by the host.  The
+	 * timescales are such that a gap of more than one second is never
+	 * expected.  delta is *not* normalised.
+	 */
+	start_sec = ptp->timeset[last_good].host_start >> MC_NANOSECOND_BITS;
+	last_sec = last_time->ts_real.tv_sec & MC_SECOND_MASK;
+	if (start_sec != last_sec &&
+	    ((start_sec + 1) & MC_SECOND_MASK) != last_sec) {
+		netif_warn(efx, hw, efx->net_dev,
+			   "PTP bad synchronisation seconds\n");
+		return -EAGAIN;
+	}
+	delta.tv_sec = (last_sec - start_sec) & 1;
+	delta.tv_nsec =
+		last_time->ts_real.tv_nsec -
+		(ptp->timeset[last_good].host_start & MC_NANOSECOND_MASK);
+
+	/* Convert the NIC time at last good sync into kernel time.
+	 * No correction is required - this time is the output of a
+	 * firmware process.
+	 */
+	mc_time = ptp->nic_to_kernel_time(ptp->timeset[last_good].major,
+					  ptp->timeset[last_good].minor, 0);
+
+	/* Calculate delay from NIC top of second to last_time */
+	delta.tv_nsec += ktime_to_timespec64(mc_time).tv_nsec;
+
+	/* Set PPS timestamp to match NIC top of second */
+	ptp->host_time_pps = *last_time;
+	pps_sub_ts(&ptp->host_time_pps, delta);
+
+	return 0;
+}
+
+/* Synchronize times between the host and the MC */
+static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
+{
+	struct efx_ptp_data *ptp = efx->ptp_data;
+	MCDI_DECLARE_BUF(synch_buf, MC_CMD_PTP_OUT_SYNCHRONIZE_LENMAX);
+	size_t response_length;
+	int rc;
+	unsigned long timeout;
+	struct pps_event_time last_time = {};
+	unsigned int loops = 0;
+	int *start = ptp->start.addr;
+
+	MCDI_SET_DWORD(synch_buf, PTP_IN_OP, MC_CMD_PTP_OP_SYNCHRONIZE);
+	MCDI_SET_DWORD(synch_buf, PTP_IN_PERIPH_ID, 0);
+	MCDI_SET_DWORD(synch_buf, PTP_IN_SYNCHRONIZE_NUMTIMESETS,
+		       num_readings);
+	MCDI_SET_QWORD(synch_buf, PTP_IN_SYNCHRONIZE_START_ADDR,
+		       ptp->start.dma_addr);
+
+	/* Clear flag that signals MC ready */
+	WRITE_ONCE(*start, 0);
+	rc = efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
+				MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
+	EFX_WARN_ON_ONCE_PARANOID(rc);
+
+	/* Wait for start from MCDI (or timeout) */
+	timeout = jiffies + msecs_to_jiffies(MAX_SYNCHRONISE_WAIT_MS);
+	while (!READ_ONCE(*start) && (time_before(jiffies, timeout))) {
+		udelay(20);	/* Usually start MCDI execution quickly */
+		loops++;
+	}
+
+	if (loops <= 1)
+		++ptp->fast_syncs;
+	if (!time_before(jiffies, timeout))
+		++ptp->sync_timeouts;
+
+	if (READ_ONCE(*start))
+		efx_ptp_send_times(efx, &last_time);
+
+	/* Collect results */
+	rc = efx_mcdi_rpc_finish(efx, MC_CMD_PTP,
+				 MC_CMD_PTP_IN_SYNCHRONIZE_LEN,
+				 synch_buf, sizeof(synch_buf),
+				 &response_length);
+	if (rc == 0) {
+		rc = efx_ptp_process_times(efx, synch_buf, response_length,
+					   &last_time);
+		if (rc == 0)
+			++ptp->good_syncs;
+		else
+			++ptp->no_time_syncs;
+	}
+
+	/* Increment the bad syncs counter if the synchronize fails, whatever
+	 * the reason.
+	 */
+	if (rc != 0)
+		++ptp->bad_syncs;
+
+	return rc;
+}
+
+/* Transmit a PTP packet via the dedicated hardware timestamped queue. */
+static void efx_ptp_xmit_skb_queue(struct efx_nic *efx, struct sk_buff *skb)
+{
+	struct efx_ptp_data *ptp_data = efx->ptp_data;
+	u8 type = efx_tx_csum_type_skb(skb);
+	struct efx_tx_queue *tx_queue;
+
+	tx_queue = efx_channel_get_tx_queue(ptp_data->channel, type);
+	if (tx_queue && tx_queue->timestamping) {
+		efx_enqueue_skb(tx_queue, skb);
+	} else {
+		WARN_ONCE(1, "PTP channel has no timestamped tx queue\n");
+		dev_kfree_skb_any(skb);
+	}
+}
+
+/* Transmit a PTP packet, via the MCDI interface, to the wire. */
+static void efx_ptp_xmit_skb_mc(struct efx_nic *efx, struct sk_buff *skb)
+{
+	struct efx_ptp_data *ptp_data = efx->ptp_data;
+	struct skb_shared_hwtstamps timestamps;
+	int rc = -EIO;
+	MCDI_DECLARE_BUF(txtime, MC_CMD_PTP_OUT_TRANSMIT_LEN);
+	size_t len;
+
+	MCDI_SET_DWORD(ptp_data->txbuf, PTP_IN_OP, MC_CMD_PTP_OP_TRANSMIT);
+	MCDI_SET_DWORD(ptp_data->txbuf, PTP_IN_PERIPH_ID, 0);
+	MCDI_SET_DWORD(ptp_data->txbuf, PTP_IN_TRANSMIT_LENGTH, skb->len);
+	if (skb_shinfo(skb)->nr_frags != 0) {
+		rc = skb_linearize(skb);
+		if (rc != 0)
+			goto fail;
+	}
+
+	if (skb->ip_summed == CHECKSUM_PARTIAL) {
+		rc = skb_checksum_help(skb);
+		if (rc != 0)
+			goto fail;
+	}
+	skb_copy_from_linear_data(skb,
+				  MCDI_PTR(ptp_data->txbuf,
+					   PTP_IN_TRANSMIT_PACKET),
+				  skb->len);
+	rc = efx_mcdi_rpc(efx, MC_CMD_PTP,
+			  ptp_data->txbuf, MC_CMD_PTP_IN_TRANSMIT_LEN(skb->len),
+			  txtime, sizeof(txtime), &len);
+	if (rc != 0)
+		goto fail;
+
+	memset(&timestamps, 0, sizeof(timestamps));
+	timestamps.hwtstamp = ptp_data->nic_to_kernel_time(
+		MCDI_DWORD(txtime, PTP_OUT_TRANSMIT_MAJOR),
+		MCDI_DWORD(txtime, PTP_OUT_TRANSMIT_MINOR),
+		ptp_data->ts_corrections.ptp_tx);
+
+	skb_tstamp_tx(skb, &timestamps);
+
+	rc = 0;
+
+fail:
+	dev_kfree_skb_any(skb);
+
+	return;
+}
+
+static void efx_ptp_drop_time_expired_events(struct efx_nic *efx)
+{
+	struct efx_ptp_data *ptp = efx->ptp_data;
+	struct list_head *cursor;
+	struct list_head *next;
+
+	if (ptp->rx_ts_inline)
+		return;
+
+	/* Drop time-expired events */
+	spin_lock_bh(&ptp->evt_lock);
+	list_for_each_safe(cursor, next, &ptp->evt_list) {
+		struct efx_ptp_event_rx *evt;
+
+		evt = list_entry(cursor, struct efx_ptp_event_rx,
+				 link);
+		if (time_after(jiffies, evt->expiry)) {
+			list_move(&evt->link, &ptp->evt_free_list);
+			netif_warn(efx, hw, efx->net_dev,
+				   "PTP rx event dropped\n");
+		}
+	}
+	spin_unlock_bh(&ptp->evt_lock);
+}
+
+static enum ptp_packet_state efx_ptp_match_rx(struct efx_nic *efx,
+					      struct sk_buff *skb)
+{
+	struct efx_ptp_data *ptp = efx->ptp_data;
+	bool evts_waiting;
+	struct list_head *cursor;
+	struct list_head *next;
+	struct efx_ptp_match *match;
+	enum ptp_packet_state rc = PTP_PACKET_STATE_UNMATCHED;
+
+	WARN_ON_ONCE(ptp->rx_ts_inline);
+
+	spin_lock_bh(&ptp->evt_lock);
+	evts_waiting = !list_empty(&ptp->evt_list);
+	spin_unlock_bh(&ptp->evt_lock);
+
+	if (!evts_waiting)
+		return PTP_PACKET_STATE_UNMATCHED;
+
+	match = (struct efx_ptp_match *)skb->cb;
+	/* Look for a matching timestamp in the event queue */
+	spin_lock_bh(&ptp->evt_lock);
+	list_for_each_safe(cursor, next, &ptp->evt_list) {
+		struct efx_ptp_event_rx *evt;
+
+		evt = list_entry(cursor, struct efx_ptp_event_rx, link);
+		if ((evt->seq0 == match->words[0]) &&
+		    (evt->seq1 == match->words[1])) {
+			struct skb_shared_hwtstamps *timestamps;
+
+			/* Match - add in hardware timestamp */
+			timestamps = skb_hwtstamps(skb);
+			timestamps->hwtstamp = evt->hwtimestamp;
+
+			match->state = PTP_PACKET_STATE_MATCHED;
+			rc = PTP_PACKET_STATE_MATCHED;
+			list_move(&evt->link, &ptp->evt_free_list);
+			break;
+		}
+	}
+	spin_unlock_bh(&ptp->evt_lock);
+
+	return rc;
+}
+
+/* Process any queued receive events and corresponding packets
+ *
+ * q is returned with all the packets that are ready for delivery.
+ */
+static void efx_ptp_process_events(struct efx_nic *efx, struct sk_buff_head *q)
+{
+	struct efx_ptp_data *ptp = efx->ptp_data;
+	struct sk_buff *skb;
+
+	while ((skb = skb_dequeue(&ptp->rxq))) {
+		struct efx_ptp_match *match;
+
+		match = (struct efx_ptp_match *)skb->cb;
+		if (match->state == PTP_PACKET_STATE_MATCH_UNWANTED) {
+			__skb_queue_tail(q, skb);
+		} else if (efx_ptp_match_rx(efx, skb) ==
+			   PTP_PACKET_STATE_MATCHED) {
+			__skb_queue_tail(q, skb);
+		} else if (time_after(jiffies, match->expiry)) {
+			match->state = PTP_PACKET_STATE_TIMED_OUT;
+			++ptp->rx_no_timestamp;
+			__skb_queue_tail(q, skb);
+		} else {
+			/* Replace unprocessed entry and stop */
+			skb_queue_head(&ptp->rxq, skb);
+			break;
+		}
+	}
+}
+
+/* Complete processing of a received packet */
+static inline void efx_ptp_process_rx(struct efx_nic *efx, struct sk_buff *skb)
+{
+	local_bh_disable();
+	netif_receive_skb(skb);
+	local_bh_enable();
+}
+
+static void efx_ptp_remove_multicast_filters(struct efx_nic *efx)
+{
+	struct efx_ptp_data *ptp = efx->ptp_data;
+
+	if (ptp->rxfilter_installed) {
+		efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
+					  ptp->rxfilter_general);
+		efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
+					  ptp->rxfilter_event);
+		ptp->rxfilter_installed = false;
+	}
+}
+
+static int efx_ptp_insert_multicast_filters(struct efx_nic *efx)
+{
+	struct efx_ptp_data *ptp = efx->ptp_data;
+	struct efx_filter_spec rxfilter;
+	int rc;
+
+	if (!ptp->channel || ptp->rxfilter_installed)
+		return 0;
+
+	/* Must filter on both event and general ports to ensure
+	 * that there is no packet re-ordering.
+	 */
+	efx_filter_init_rx(&rxfilter, EFX_FILTER_PRI_REQUIRED, 0,
+			   efx_rx_queue_index(
+				   efx_channel_get_rx_queue(ptp->channel)));
+	rc = efx_filter_set_ipv4_local(&rxfilter, IPPROTO_UDP,
+				       htonl(PTP_ADDRESS),
+				       htons(PTP_EVENT_PORT));
+	if (rc != 0)
+		return rc;
+
+	rc = efx_filter_insert_filter(efx, &rxfilter, true);
+	if (rc < 0)
+		return rc;
+	ptp->rxfilter_event = rc;
+
+	efx_filter_init_rx(&rxfilter, EFX_FILTER_PRI_REQUIRED, 0,
+			   efx_rx_queue_index(
+				   efx_channel_get_rx_queue(ptp->channel)));
+	rc = efx_filter_set_ipv4_local(&rxfilter, IPPROTO_UDP,
+				       htonl(PTP_ADDRESS),
+				       htons(PTP_GENERAL_PORT));
+	if (rc != 0)
+		goto fail;
+
+	rc = efx_filter_insert_filter(efx, &rxfilter, true);
+	if (rc < 0)
+		goto fail;
+	ptp->rxfilter_general = rc;
+
+	ptp->rxfilter_installed = true;
+	return 0;
+
+fail:
+	efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
+				  ptp->rxfilter_event);
+	return rc;
+}
+
+static int efx_ptp_start(struct efx_nic *efx)
+{
+	struct efx_ptp_data *ptp = efx->ptp_data;
+	int rc;
+
+	ptp->reset_required = false;
+
+	rc = efx_ptp_insert_multicast_filters(efx);
+	if (rc)
+		return rc;
+
+	rc = efx_ptp_enable(efx);
+	if (rc != 0)
+		goto fail;
+
+	ptp->evt_frag_idx = 0;
+	ptp->current_adjfreq = 0;
+
+	return 0;
+
+fail:
+	efx_ptp_remove_multicast_filters(efx);
+	return rc;
+}
+
+static int efx_ptp_stop(struct efx_nic *efx)
+{
+	struct efx_ptp_data *ptp = efx->ptp_data;
+	struct list_head *cursor;
+	struct list_head *next;
+	int rc;
+
+	if (ptp == NULL)
+		return 0;
+
+	rc = efx_ptp_disable(efx);
+
+	efx_ptp_remove_multicast_filters(efx);
+
+	/* Make sure RX packets are really delivered */
+	efx_ptp_deliver_rx_queue(&efx->ptp_data->rxq);
+	skb_queue_purge(&efx->ptp_data->txq);
+
+	/* Drop any pending receive events */
+	spin_lock_bh(&efx->ptp_data->evt_lock);
+	list_for_each_safe(cursor, next, &efx->ptp_data->evt_list) {
+		list_move(cursor, &efx->ptp_data->evt_free_list);
+	}
+	spin_unlock_bh(&efx->ptp_data->evt_lock);
+
+	return rc;
+}
+
+static int efx_ptp_restart(struct efx_nic *efx)
+{
+	if (efx->ptp_data && efx->ptp_data->enabled)
+		return efx_ptp_start(efx);
+	return 0;
+}
+
+static void efx_ptp_pps_worker(struct work_struct *work)
+{
+	struct efx_ptp_data *ptp =
+		container_of(work, struct efx_ptp_data, pps_work);
+	struct efx_nic *efx = ptp->efx;
+	struct ptp_clock_event ptp_evt;
+
+	if (efx_ptp_synchronize(efx, PTP_SYNC_ATTEMPTS))
+		return;
+
+	ptp_evt.type = PTP_CLOCK_PPSUSR;
+	ptp_evt.pps_times = ptp->host_time_pps;
+	ptp_clock_event(ptp->phc_clock, &ptp_evt);
+}
+
+static void efx_ptp_worker(struct work_struct *work)
+{
+	struct efx_ptp_data *ptp_data =
+		container_of(work, struct efx_ptp_data, work);
+	struct efx_nic *efx = ptp_data->efx;
+	struct sk_buff *skb;
+	struct sk_buff_head tempq;
+
+	if (ptp_data->reset_required) {
+		efx_ptp_stop(efx);
+		efx_ptp_start(efx);
+		return;
+	}
+
+	efx_ptp_drop_time_expired_events(efx);
+
+	__skb_queue_head_init(&tempq);
+	efx_ptp_process_events(efx, &tempq);
+
+	while ((skb = skb_dequeue(&ptp_data->txq)))
+		ptp_data->xmit_skb(efx, skb);
+
+	while ((skb = __skb_dequeue(&tempq)))
+		efx_ptp_process_rx(efx, skb);
+}
+
+static const struct ptp_clock_info efx_phc_clock_info = {
+	.owner		= THIS_MODULE,
+	.name		= "sfc",
+	.max_adj	= MAX_PPB,
+	.n_alarm	= 0,
+	.n_ext_ts	= 0,
+	.n_per_out	= 0,
+	.n_pins		= 0,
+	.pps		= 1,
+	.adjfreq	= efx_phc_adjfreq,
+	.adjtime	= efx_phc_adjtime,
+	.gettime64	= efx_phc_gettime,
+	.settime64	= efx_phc_settime,
+	.enable		= efx_phc_enable,
+};
+
+/* Initialise PTP state. */
+int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel)
+{
+	struct efx_ptp_data *ptp;
+	int rc = 0;
+	unsigned int pos;
+
+	ptp = kzalloc(sizeof(struct efx_ptp_data), GFP_KERNEL);
+	efx->ptp_data = ptp;
+	if (!efx->ptp_data)
+		return -ENOMEM;
+
+	ptp->efx = efx;
+	ptp->channel = channel;
+	ptp->rx_ts_inline = efx_nic_rev(efx) >= EFX_REV_HUNT_A0;
+
+	rc = efx_nic_alloc_buffer(efx, &ptp->start, sizeof(int), GFP_KERNEL);
+	if (rc != 0)
+		goto fail1;
+
+	skb_queue_head_init(&ptp->rxq);
+	skb_queue_head_init(&ptp->txq);
+	ptp->workwq = create_singlethread_workqueue("sfc_ptp");
+	if (!ptp->workwq) {
+		rc = -ENOMEM;
+		goto fail2;
+	}
+
+	if (efx_ptp_use_mac_tx_timestamps(efx)) {
+		ptp->xmit_skb = efx_ptp_xmit_skb_queue;
+		/* Request sync events on this channel. */
+		channel->sync_events_state = SYNC_EVENTS_QUIESCENT;
+	} else {
+		ptp->xmit_skb = efx_ptp_xmit_skb_mc;
+	}
+
+	INIT_WORK(&ptp->work, efx_ptp_worker);
+	ptp->config.flags = 0;
+	ptp->config.tx_type = HWTSTAMP_TX_OFF;
+	ptp->config.rx_filter = HWTSTAMP_FILTER_NONE;
+	INIT_LIST_HEAD(&ptp->evt_list);
+	INIT_LIST_HEAD(&ptp->evt_free_list);
+	spin_lock_init(&ptp->evt_lock);
+	for (pos = 0; pos < MAX_RECEIVE_EVENTS; pos++)
+		list_add(&ptp->rx_evts[pos].link, &ptp->evt_free_list);
+
+	/* Get the NIC PTP attributes and set up time conversions */
+	rc = efx_ptp_get_attributes(efx);
+	if (rc < 0)
+		goto fail3;
+
+	/* Get the timestamp corrections */
+	rc = efx_ptp_get_timestamp_corrections(efx);
+	if (rc < 0)
+		goto fail3;
+
+	if (efx->mcdi->fn_flags &
+	    (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY)) {
+		ptp->phc_clock_info = efx_phc_clock_info;
+		ptp->phc_clock = ptp_clock_register(&ptp->phc_clock_info,
+						    &efx->pci_dev->dev);
+		if (IS_ERR(ptp->phc_clock)) {
+			rc = PTR_ERR(ptp->phc_clock);
+			goto fail3;
+		} else if (ptp->phc_clock) {
+			INIT_WORK(&ptp->pps_work, efx_ptp_pps_worker);
+			ptp->pps_workwq = create_singlethread_workqueue("sfc_pps");
+			if (!ptp->pps_workwq) {
+				rc = -ENOMEM;
+				goto fail4;
+			}
+		}
+	}
+	ptp->nic_ts_enabled = false;
+
+	return 0;
+fail4:
+	ptp_clock_unregister(efx->ptp_data->phc_clock);
+
+fail3:
+	destroy_workqueue(efx->ptp_data->workwq);
+
+fail2:
+	efx_nic_free_buffer(efx, &ptp->start);
+
+fail1:
+	kfree(efx->ptp_data);
+	efx->ptp_data = NULL;
+
+	return rc;
+}
+
+/* Initialise PTP channel.
+ *
+ * Setting core_index to zero causes the queue to be initialised and doesn't
+ * overlap with 'rxq0' because ptp.c doesn't use skb_record_rx_queue.
+ */
+static int efx_ptp_probe_channel(struct efx_channel *channel)
+{
+	struct efx_nic *efx = channel->efx;
+	int rc;
+
+	channel->irq_moderation_us = 0;
+	channel->rx_queue.core_index = 0;
+
+	rc = efx_ptp_probe(efx, channel);
+	/* Failure to probe PTP is not fatal; this channel will just not be
+	 * used for anything.
+	 * In the case of EPERM, efx_ptp_probe will print its own message (in
+	 * efx_ptp_get_attributes()), so we don't need to.
+	 */
+	if (rc && rc != -EPERM)
+		netif_warn(efx, drv, efx->net_dev,
+			   "Failed to probe PTP, rc=%d\n", rc);
+	return 0;
+}
+
+void efx_ptp_remove(struct efx_nic *efx)
+{
+	if (!efx->ptp_data)
+		return;
+
+	(void)efx_ptp_disable(efx);
+
+	cancel_work_sync(&efx->ptp_data->work);
+	if (efx->ptp_data->pps_workwq)
+		cancel_work_sync(&efx->ptp_data->pps_work);
+
+	skb_queue_purge(&efx->ptp_data->rxq);
+	skb_queue_purge(&efx->ptp_data->txq);
+
+	if (efx->ptp_data->phc_clock) {
+		destroy_workqueue(efx->ptp_data->pps_workwq);
+		ptp_clock_unregister(efx->ptp_data->phc_clock);
+	}
+
+	destroy_workqueue(efx->ptp_data->workwq);
+
+	efx_nic_free_buffer(efx, &efx->ptp_data->start);
+	kfree(efx->ptp_data);
+	efx->ptp_data = NULL;
+}
+
+static void efx_ptp_remove_channel(struct efx_channel *channel)
+{
+	efx_ptp_remove(channel->efx);
+}
+
+static void efx_ptp_get_channel_name(struct efx_channel *channel,
+				     char *buf, size_t len)
+{
+	snprintf(buf, len, "%s-ptp", channel->efx->name);
+}
+
+/* Determine whether this packet should be processed by the PTP module
+ * or transmitted conventionally.
+ */
+bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb)
+{
+	return efx->ptp_data &&
+		efx->ptp_data->enabled &&
+		skb->len >= PTP_MIN_LENGTH &&
+		skb->len <= MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM &&
+		likely(skb->protocol == htons(ETH_P_IP)) &&
+		skb_transport_header_was_set(skb) &&
+		skb_network_header_len(skb) >= sizeof(struct iphdr) &&
+		ip_hdr(skb)->protocol == IPPROTO_UDP &&
+		skb_headlen(skb) >=
+		skb_transport_offset(skb) + sizeof(struct udphdr) &&
+		udp_hdr(skb)->dest == htons(PTP_EVENT_PORT);
+}
+
+/* Receive a PTP packet.  Packets are queued until the arrival of
+ * the receive timestamp from the MC - this will probably occur after the
+ * packet arrival because of the processing in the MC.
+ */
+static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
+{
+	struct efx_nic *efx = channel->efx;
+	struct efx_ptp_data *ptp = efx->ptp_data;
+	struct efx_ptp_match *match = (struct efx_ptp_match *)skb->cb;
+	u8 *match_data_012, *match_data_345;
+	unsigned int version;
+	u8 *data;
+
+	match->expiry = jiffies + msecs_to_jiffies(PKT_EVENT_LIFETIME_MS);
+
+	/* Correct version? */
+	if (ptp->mode == MC_CMD_PTP_MODE_V1) {
+		if (!pskb_may_pull(skb, PTP_V1_MIN_LENGTH)) {
+			return false;
+		}
+		data = skb->data;
+		version = ntohs(*(__be16 *)&data[PTP_V1_VERSION_OFFSET]);
+		if (version != PTP_VERSION_V1) {
+			return false;
+		}
+
+		/* PTP V1 uses all six bytes of the UUID to match the packet
+		 * to the timestamp
+		 */
+		match_data_012 = data + PTP_V1_UUID_OFFSET;
+		match_data_345 = data + PTP_V1_UUID_OFFSET + 3;
+	} else {
+		if (!pskb_may_pull(skb, PTP_V2_MIN_LENGTH)) {
+			return false;
+		}
+		data = skb->data;
+		version = data[PTP_V2_VERSION_OFFSET];
+		if ((version & PTP_VERSION_V2_MASK) != PTP_VERSION_V2) {
+			return false;
+		}
+
+		/* The original V2 implementation uses bytes 2-7 of
+		 * the UUID to match the packet to the timestamp. This
+		 * discards two of the bytes of the MAC address used
+		 * to create the UUID (SF bug 33070).  The PTP V2
+		 * enhanced mode fixes this issue and uses bytes 0-2
+		 * and byte 5-7 of the UUID.
+		 */
+		match_data_345 = data + PTP_V2_UUID_OFFSET + 5;
+		if (ptp->mode == MC_CMD_PTP_MODE_V2) {
+			match_data_012 = data + PTP_V2_UUID_OFFSET + 2;
+		} else {
+			match_data_012 = data + PTP_V2_UUID_OFFSET + 0;
+			BUG_ON(ptp->mode != MC_CMD_PTP_MODE_V2_ENHANCED);
+		}
+	}
+
+	/* Does this packet require timestamping? */
+	if (ntohs(*(__be16 *)&data[PTP_DPORT_OFFSET]) == PTP_EVENT_PORT) {
+		match->state = PTP_PACKET_STATE_UNMATCHED;
+
+		/* We expect the sequence number to be in the same position in
+		 * the packet for PTP V1 and V2
+		 */
+		BUILD_BUG_ON(PTP_V1_SEQUENCE_OFFSET != PTP_V2_SEQUENCE_OFFSET);
+		BUILD_BUG_ON(PTP_V1_SEQUENCE_LENGTH != PTP_V2_SEQUENCE_LENGTH);
+
+		/* Extract UUID/Sequence information */
+		match->words[0] = (match_data_012[0]         |
+				   (match_data_012[1] << 8)  |
+				   (match_data_012[2] << 16) |
+				   (match_data_345[0] << 24));
+		match->words[1] = (match_data_345[1]         |
+				   (match_data_345[2] << 8)  |
+				   (data[PTP_V1_SEQUENCE_OFFSET +
+					 PTP_V1_SEQUENCE_LENGTH - 1] <<
+				    16));
+	} else {
+		match->state = PTP_PACKET_STATE_MATCH_UNWANTED;
+	}
+
+	skb_queue_tail(&ptp->rxq, skb);
+	queue_work(ptp->workwq, &ptp->work);
+
+	return true;
+}
+
+/* Transmit a PTP packet.  This has to be transmitted by the MC
+ * itself, through an MCDI call.  MCDI calls aren't permitted
+ * in the transmit path so defer the actual transmission to a suitable worker.
+ */
+int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb)
+{
+	struct efx_ptp_data *ptp = efx->ptp_data;
+
+	skb_queue_tail(&ptp->txq, skb);
+
+	if ((udp_hdr(skb)->dest == htons(PTP_EVENT_PORT)) &&
+	    (skb->len <= MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM))
+		efx_xmit_hwtstamp_pending(skb);
+	queue_work(ptp->workwq, &ptp->work);
+
+	return NETDEV_TX_OK;
+}
+
+int efx_ptp_get_mode(struct efx_nic *efx)
+{
+	return efx->ptp_data->mode;
+}
+
+int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
+			unsigned int new_mode)
+{
+	if ((enable_wanted != efx->ptp_data->enabled) ||
+	    (enable_wanted && (efx->ptp_data->mode != new_mode))) {
+		int rc = 0;
+
+		if (enable_wanted) {
+			/* Change of mode requires disable */
+			if (efx->ptp_data->enabled &&
+			    (efx->ptp_data->mode != new_mode)) {
+				efx->ptp_data->enabled = false;
+				rc = efx_ptp_stop(efx);
+				if (rc != 0)
+					return rc;
+			}
+
+			/* Set new operating mode and establish
+			 * baseline synchronisation, which must
+			 * succeed.
+			 */
+			efx->ptp_data->mode = new_mode;
+			if (netif_running(efx->net_dev))
+				rc = efx_ptp_start(efx);
+			if (rc == 0) {
+				rc = efx_ptp_synchronize(efx,
+							 PTP_SYNC_ATTEMPTS * 2);
+				if (rc != 0)
+					efx_ptp_stop(efx);
+			}
+		} else {
+			rc = efx_ptp_stop(efx);
+		}
+
+		if (rc != 0)
+			return rc;
+
+		efx->ptp_data->enabled = enable_wanted;
+	}
+
+	return 0;
+}
+
+static int efx_ptp_ts_init(struct efx_nic *efx, struct hwtstamp_config *init)
+{
+	int rc;
+
+	if ((init->tx_type != HWTSTAMP_TX_OFF) &&
+	    (init->tx_type != HWTSTAMP_TX_ON))
+		return -ERANGE;
+
+	rc = efx->type->ptp_set_ts_config(efx, init);
+	if (rc)
+		return rc;
+
+	efx->ptp_data->config = *init;
+	return 0;
+}
+
+void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info)
+{
+	struct efx_ptp_data *ptp = efx->ptp_data;
+	struct efx_nic *primary = efx->primary;
+
+	ASSERT_RTNL();
+
+	if (!ptp)
+		return;
+
+	ts_info->so_timestamping |= (SOF_TIMESTAMPING_TX_HARDWARE |
+				     SOF_TIMESTAMPING_RX_HARDWARE |
+				     SOF_TIMESTAMPING_RAW_HARDWARE);
+	/* Check licensed features.  If we don't have the license for TX
+	 * timestamps, the NIC will not support them.
+	 */
+	if (efx_ptp_use_mac_tx_timestamps(efx)) {
+		struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+		if (!(nic_data->licensed_features &
+		      (1 << LICENSED_V3_FEATURES_TX_TIMESTAMPS_LBN)))
+			ts_info->so_timestamping &=
+				~SOF_TIMESTAMPING_TX_HARDWARE;
+	}
+	if (primary && primary->ptp_data && primary->ptp_data->phc_clock)
+		ts_info->phc_index =
+			ptp_clock_index(primary->ptp_data->phc_clock);
+	ts_info->tx_types = 1 << HWTSTAMP_TX_OFF | 1 << HWTSTAMP_TX_ON;
+	ts_info->rx_filters = ptp->efx->type->hwtstamp_filters;
+}
+
+int efx_ptp_set_ts_config(struct efx_nic *efx, struct ifreq *ifr)
+{
+	struct hwtstamp_config config;
+	int rc;
+
+	/* Not a PTP enabled port */
+	if (!efx->ptp_data)
+		return -EOPNOTSUPP;
+
+	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+		return -EFAULT;
+
+	rc = efx_ptp_ts_init(efx, &config);
+	if (rc != 0)
+		return rc;
+
+	return copy_to_user(ifr->ifr_data, &config, sizeof(config))
+		? -EFAULT : 0;
+}
+
+int efx_ptp_get_ts_config(struct efx_nic *efx, struct ifreq *ifr)
+{
+	if (!efx->ptp_data)
+		return -EOPNOTSUPP;
+
+	return copy_to_user(ifr->ifr_data, &efx->ptp_data->config,
+			    sizeof(efx->ptp_data->config)) ? -EFAULT : 0;
+}
+
+static void ptp_event_failure(struct efx_nic *efx, int expected_frag_len)
+{
+	struct efx_ptp_data *ptp = efx->ptp_data;
+
+	netif_err(efx, hw, efx->net_dev,
+		"PTP unexpected event length: got %d expected %d\n",
+		ptp->evt_frag_idx, expected_frag_len);
+	ptp->reset_required = true;
+	queue_work(ptp->workwq, &ptp->work);
+}
+
+/* Process a completed receive event.  Put it on the event queue and
+ * start worker thread.  This is required because event and their
+ * correspoding packets may come in either order.
+ */
+static void ptp_event_rx(struct efx_nic *efx, struct efx_ptp_data *ptp)
+{
+	struct efx_ptp_event_rx *evt = NULL;
+
+	if (WARN_ON_ONCE(ptp->rx_ts_inline))
+		return;
+
+	if (ptp->evt_frag_idx != 3) {
+		ptp_event_failure(efx, 3);
+		return;
+	}
+
+	spin_lock_bh(&ptp->evt_lock);
+	if (!list_empty(&ptp->evt_free_list)) {
+		evt = list_first_entry(&ptp->evt_free_list,
+				       struct efx_ptp_event_rx, link);
+		list_del(&evt->link);
+
+		evt->seq0 = EFX_QWORD_FIELD(ptp->evt_frags[2], MCDI_EVENT_DATA);
+		evt->seq1 = (EFX_QWORD_FIELD(ptp->evt_frags[2],
+					     MCDI_EVENT_SRC)        |
+			     (EFX_QWORD_FIELD(ptp->evt_frags[1],
+					      MCDI_EVENT_SRC) << 8) |
+			     (EFX_QWORD_FIELD(ptp->evt_frags[0],
+					      MCDI_EVENT_SRC) << 16));
+		evt->hwtimestamp = efx->ptp_data->nic_to_kernel_time(
+			EFX_QWORD_FIELD(ptp->evt_frags[0], MCDI_EVENT_DATA),
+			EFX_QWORD_FIELD(ptp->evt_frags[1], MCDI_EVENT_DATA),
+			ptp->ts_corrections.ptp_rx);
+		evt->expiry = jiffies + msecs_to_jiffies(PKT_EVENT_LIFETIME_MS);
+		list_add_tail(&evt->link, &ptp->evt_list);
+
+		queue_work(ptp->workwq, &ptp->work);
+	} else if (net_ratelimit()) {
+		/* Log a rate-limited warning message. */
+		netif_err(efx, rx_err, efx->net_dev, "PTP event queue overflow\n");
+	}
+	spin_unlock_bh(&ptp->evt_lock);
+}
+
+static void ptp_event_fault(struct efx_nic *efx, struct efx_ptp_data *ptp)
+{
+	int code = EFX_QWORD_FIELD(ptp->evt_frags[0], MCDI_EVENT_DATA);
+	if (ptp->evt_frag_idx != 1) {
+		ptp_event_failure(efx, 1);
+		return;
+	}
+
+	netif_err(efx, hw, efx->net_dev, "PTP error %d\n", code);
+}
+
+static void ptp_event_pps(struct efx_nic *efx, struct efx_ptp_data *ptp)
+{
+	if (ptp->nic_ts_enabled)
+		queue_work(ptp->pps_workwq, &ptp->pps_work);
+}
+
+void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev)
+{
+	struct efx_ptp_data *ptp = efx->ptp_data;
+	int code = EFX_QWORD_FIELD(*ev, MCDI_EVENT_CODE);
+
+	if (!ptp) {
+		if (!efx->ptp_warned) {
+			netif_warn(efx, drv, efx->net_dev,
+				   "Received PTP event but PTP not set up\n");
+			efx->ptp_warned = true;
+		}
+		return;
+	}
+
+	if (!ptp->enabled)
+		return;
+
+	if (ptp->evt_frag_idx == 0) {
+		ptp->evt_code = code;
+	} else if (ptp->evt_code != code) {
+		netif_err(efx, hw, efx->net_dev,
+			  "PTP out of sequence event %d\n", code);
+		ptp->evt_frag_idx = 0;
+	}
+
+	ptp->evt_frags[ptp->evt_frag_idx++] = *ev;
+	if (!MCDI_EVENT_FIELD(*ev, CONT)) {
+		/* Process resulting event */
+		switch (code) {
+		case MCDI_EVENT_CODE_PTP_RX:
+			ptp_event_rx(efx, ptp);
+			break;
+		case MCDI_EVENT_CODE_PTP_FAULT:
+			ptp_event_fault(efx, ptp);
+			break;
+		case MCDI_EVENT_CODE_PTP_PPS:
+			ptp_event_pps(efx, ptp);
+			break;
+		default:
+			netif_err(efx, hw, efx->net_dev,
+				  "PTP unknown event %d\n", code);
+			break;
+		}
+		ptp->evt_frag_idx = 0;
+	} else if (MAX_EVENT_FRAGS == ptp->evt_frag_idx) {
+		netif_err(efx, hw, efx->net_dev,
+			  "PTP too many event fragments\n");
+		ptp->evt_frag_idx = 0;
+	}
+}
+
+void efx_time_sync_event(struct efx_channel *channel, efx_qword_t *ev)
+{
+	struct efx_nic *efx = channel->efx;
+	struct efx_ptp_data *ptp = efx->ptp_data;
+
+	/* When extracting the sync timestamp minor value, we should discard
+	 * the least significant two bits. These are not required in order
+	 * to reconstruct full-range timestamps and they are optionally used
+	 * to report status depending on the options supplied when subscribing
+	 * for sync events.
+	 */
+	channel->sync_timestamp_major = MCDI_EVENT_FIELD(*ev, PTP_TIME_MAJOR);
+	channel->sync_timestamp_minor =
+		(MCDI_EVENT_FIELD(*ev, PTP_TIME_MINOR_MS_8BITS) & 0xFC)
+			<< ptp->nic_time.sync_event_minor_shift;
+
+	/* if sync events have been disabled then we want to silently ignore
+	 * this event, so throw away result.
+	 */
+	(void) cmpxchg(&channel->sync_events_state, SYNC_EVENTS_REQUESTED,
+		       SYNC_EVENTS_VALID);
+}
+
+static inline u32 efx_rx_buf_timestamp_minor(struct efx_nic *efx, const u8 *eh)
+{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+	return __le32_to_cpup((const __le32 *)(eh + efx->rx_packet_ts_offset));
+#else
+	const u8 *data = eh + efx->rx_packet_ts_offset;
+	return (u32)data[0]       |
+	       (u32)data[1] << 8  |
+	       (u32)data[2] << 16 |
+	       (u32)data[3] << 24;
+#endif
+}
+
+void __efx_rx_skb_attach_timestamp(struct efx_channel *channel,
+				   struct sk_buff *skb)
+{
+	struct efx_nic *efx = channel->efx;
+	struct efx_ptp_data *ptp = efx->ptp_data;
+	u32 pkt_timestamp_major, pkt_timestamp_minor;
+	u32 diff, carry;
+	struct skb_shared_hwtstamps *timestamps;
+
+	if (channel->sync_events_state != SYNC_EVENTS_VALID)
+		return;
+
+	pkt_timestamp_minor = efx_rx_buf_timestamp_minor(efx, skb_mac_header(skb));
+
+	/* get the difference between the packet and sync timestamps,
+	 * modulo one second
+	 */
+	diff = pkt_timestamp_minor - channel->sync_timestamp_minor;
+	if (pkt_timestamp_minor < channel->sync_timestamp_minor)
+		diff += ptp->nic_time.minor_max;
+
+	/* do we roll over a second boundary and need to carry the one? */
+	carry = (channel->sync_timestamp_minor >= ptp->nic_time.minor_max - diff) ?
+		1 : 0;
+
+	if (diff <= ptp->nic_time.sync_event_diff_max) {
+		/* packet is ahead of the sync event by a quarter of a second or
+		 * less (allowing for fuzz)
+		 */
+		pkt_timestamp_major = channel->sync_timestamp_major + carry;
+	} else if (diff >= ptp->nic_time.sync_event_diff_min) {
+		/* packet is behind the sync event but within the fuzz factor.
+		 * This means the RX packet and sync event crossed as they were
+		 * placed on the event queue, which can sometimes happen.
+		 */
+		pkt_timestamp_major = channel->sync_timestamp_major - 1 + carry;
+	} else {
+		/* it's outside tolerance in both directions. this might be
+		 * indicative of us missing sync events for some reason, so
+		 * we'll call it an error rather than risk giving a bogus
+		 * timestamp.
+		 */
+		netif_vdbg(efx, drv, efx->net_dev,
+			  "packet timestamp %x too far from sync event %x:%x\n",
+			  pkt_timestamp_minor, channel->sync_timestamp_major,
+			  channel->sync_timestamp_minor);
+		return;
+	}
+
+	/* attach the timestamps to the skb */
+	timestamps = skb_hwtstamps(skb);
+	timestamps->hwtstamp =
+		ptp->nic_to_kernel_time(pkt_timestamp_major,
+					pkt_timestamp_minor,
+					ptp->ts_corrections.general_rx);
+}
+
+static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
+{
+	struct efx_ptp_data *ptp_data = container_of(ptp,
+						     struct efx_ptp_data,
+						     phc_clock_info);
+	struct efx_nic *efx = ptp_data->efx;
+	MCDI_DECLARE_BUF(inadj, MC_CMD_PTP_IN_ADJUST_LEN);
+	s64 adjustment_ns;
+	int rc;
+
+	if (delta > MAX_PPB)
+		delta = MAX_PPB;
+	else if (delta < -MAX_PPB)
+		delta = -MAX_PPB;
+
+	/* Convert ppb to fixed point ns taking care to round correctly. */
+	adjustment_ns = ((s64)delta * PPB_SCALE_WORD +
+			 (1 << (ptp_data->adjfreq_ppb_shift - 1))) >>
+			ptp_data->adjfreq_ppb_shift;
+
+	MCDI_SET_DWORD(inadj, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST);
+	MCDI_SET_DWORD(inadj, PTP_IN_PERIPH_ID, 0);
+	MCDI_SET_QWORD(inadj, PTP_IN_ADJUST_FREQ, adjustment_ns);
+	MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_SECONDS, 0);
+	MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_NANOSECONDS, 0);
+	rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inadj, sizeof(inadj),
+			  NULL, 0, NULL);
+	if (rc != 0)
+		return rc;
+
+	ptp_data->current_adjfreq = adjustment_ns;
+	return 0;
+}
+
+static int efx_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+	u32 nic_major, nic_minor;
+	struct efx_ptp_data *ptp_data = container_of(ptp,
+						     struct efx_ptp_data,
+						     phc_clock_info);
+	struct efx_nic *efx = ptp_data->efx;
+	MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_ADJUST_LEN);
+
+	efx->ptp_data->ns_to_nic_time(delta, &nic_major, &nic_minor);
+
+	MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST);
+	MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
+	MCDI_SET_QWORD(inbuf, PTP_IN_ADJUST_FREQ, ptp_data->current_adjfreq);
+	MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_MAJOR, nic_major);
+	MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_MINOR, nic_minor);
+	return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+			    NULL, 0, NULL);
+}
+
+static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+{
+	struct efx_ptp_data *ptp_data = container_of(ptp,
+						     struct efx_ptp_data,
+						     phc_clock_info);
+	struct efx_nic *efx = ptp_data->efx;
+	MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_READ_NIC_TIME_LEN);
+	MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_READ_NIC_TIME_LEN);
+	int rc;
+	ktime_t kt;
+
+	MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_READ_NIC_TIME);
+	MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
+
+	rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
+			  outbuf, sizeof(outbuf), NULL);
+	if (rc != 0)
+		return rc;
+
+	kt = ptp_data->nic_to_kernel_time(
+		MCDI_DWORD(outbuf, PTP_OUT_READ_NIC_TIME_MAJOR),
+		MCDI_DWORD(outbuf, PTP_OUT_READ_NIC_TIME_MINOR), 0);
+	*ts = ktime_to_timespec64(kt);
+	return 0;
+}
+
+static int efx_phc_settime(struct ptp_clock_info *ptp,
+			   const struct timespec64 *e_ts)
+{
+	/* Get the current NIC time, efx_phc_gettime.
+	 * Subtract from the desired time to get the offset
+	 * call efx_phc_adjtime with the offset
+	 */
+	int rc;
+	struct timespec64 time_now;
+	struct timespec64 delta;
+
+	rc = efx_phc_gettime(ptp, &time_now);
+	if (rc != 0)
+		return rc;
+
+	delta = timespec64_sub(*e_ts, time_now);
+
+	rc = efx_phc_adjtime(ptp, timespec64_to_ns(&delta));
+	if (rc != 0)
+		return rc;
+
+	return 0;
+}
+
+static int efx_phc_enable(struct ptp_clock_info *ptp,
+			  struct ptp_clock_request *request,
+			  int enable)
+{
+	struct efx_ptp_data *ptp_data = container_of(ptp,
+						     struct efx_ptp_data,
+						     phc_clock_info);
+	if (request->type != PTP_CLK_REQ_PPS)
+		return -EOPNOTSUPP;
+
+	ptp_data->nic_ts_enabled = !!enable;
+	return 0;
+}
+
+static const struct efx_channel_type efx_ptp_channel_type = {
+	.handle_no_channel	= efx_ptp_handle_no_channel,
+	.pre_probe		= efx_ptp_probe_channel,
+	.post_remove		= efx_ptp_remove_channel,
+	.get_name		= efx_ptp_get_channel_name,
+	/* no copy operation; there is no need to reallocate this channel */
+	.receive_skb		= efx_ptp_rx,
+	.want_txqs		= efx_ptp_want_txqs,
+	.keep_eventq		= false,
+};
+
+void efx_ptp_defer_probe_with_channel(struct efx_nic *efx)
+{
+	/* Check whether PTP is implemented on this NIC.  The DISABLE
+	 * operation will succeed if and only if it is implemented.
+	 */
+	if (efx_ptp_disable(efx) == 0)
+		efx->extra_channel_type[EFX_EXTRA_CHANNEL_PTP] =
+			&efx_ptp_channel_type;
+}
+
+void efx_ptp_start_datapath(struct efx_nic *efx)
+{
+	if (efx_ptp_restart(efx))
+		netif_err(efx, drv, efx->net_dev, "Failed to restart PTP.\n");
+	/* re-enable timestamping if it was previously enabled */
+	if (efx->type->ptp_set_ts_sync_events)
+		efx->type->ptp_set_ts_sync_events(efx, true, true);
+}
+
+void efx_ptp_stop_datapath(struct efx_nic *efx)
+{
+	/* temporarily disable timestamping */
+	if (efx->type->ptp_set_ts_sync_events)
+		efx->type->ptp_set_ts_sync_events(efx, false, true);
+	efx_ptp_stop(efx);
+}
diff --git a/drivers/net/ethernet/sfc/siena/ptp.h b/drivers/net/ethernet/sfc/siena/ptp.h
new file mode 100644
index 000000000000..9855e8c9e544
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/ptp.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2013 Solarflare Communications Inc.
+ * Copyright 2019-2020 Xilinx Inc.
+ */
+
+#ifndef EFX_PTP_H
+#define EFX_PTP_H
+
+#include <linux/net_tstamp.h>
+#include "net_driver.h"
+
+struct ethtool_ts_info;
+int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel);
+void efx_ptp_defer_probe_with_channel(struct efx_nic *efx);
+struct efx_channel *efx_ptp_channel(struct efx_nic *efx);
+void efx_ptp_remove(struct efx_nic *efx);
+int efx_ptp_set_ts_config(struct efx_nic *efx, struct ifreq *ifr);
+int efx_ptp_get_ts_config(struct efx_nic *efx, struct ifreq *ifr);
+void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info);
+bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
+int efx_ptp_get_mode(struct efx_nic *efx);
+int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
+			unsigned int new_mode);
+int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
+void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev);
+size_t efx_ptp_describe_stats(struct efx_nic *efx, u8 *strings);
+size_t efx_ptp_update_stats(struct efx_nic *efx, u64 *stats);
+void efx_time_sync_event(struct efx_channel *channel, efx_qword_t *ev);
+void __efx_rx_skb_attach_timestamp(struct efx_channel *channel,
+				   struct sk_buff *skb);
+static inline void efx_rx_skb_attach_timestamp(struct efx_channel *channel,
+					       struct sk_buff *skb)
+{
+	if (channel->sync_events_state == SYNC_EVENTS_VALID)
+		__efx_rx_skb_attach_timestamp(channel, skb);
+}
+void efx_ptp_start_datapath(struct efx_nic *efx);
+void efx_ptp_stop_datapath(struct efx_nic *efx);
+bool efx_ptp_use_mac_tx_timestamps(struct efx_nic *efx);
+ktime_t efx_ptp_nic_to_kernel_time(struct efx_tx_queue *tx_queue);
+
+#endif /* EFX_PTP_H */
diff --git a/drivers/net/ethernet/sfc/siena/rx.c b/drivers/net/ethernet/sfc/siena/rx.c
new file mode 100644
index 000000000000..2375cef577e4
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/rx.c
@@ -0,0 +1,399 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2005-2013 Solarflare Communications Inc.
+ */
+
+#include <linux/socket.h>
+#include <linux/in.h>
+#include <linux/slab.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/prefetch.h>
+#include <linux/moduleparam.h>
+#include <linux/iommu.h>
+#include <net/ip.h>
+#include <net/checksum.h>
+#include <net/xdp.h>
+#include <linux/bpf_trace.h>
+#include "net_driver.h"
+#include "efx.h"
+#include "rx_common.h"
+#include "filter.h"
+#include "nic.h"
+#include "selftest.h"
+#include "workarounds.h"
+
+/* Preferred number of descriptors to fill at once */
+#define EFX_RX_PREFERRED_BATCH 8U
+
+/* Maximum rx prefix used by any architecture. */
+#define EFX_MAX_RX_PREFIX_SIZE 16
+
+/* Size of buffer allocated for skb header area. */
+#define EFX_SKB_HEADERS  128u
+
+/* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */
+#define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \
+				      EFX_RX_USR_BUF_SIZE)
+
+static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
+				     struct efx_rx_buffer *rx_buf,
+				     int len)
+{
+	struct efx_nic *efx = rx_queue->efx;
+	unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;
+
+	if (likely(len <= max_len))
+		return;
+
+	/* The packet must be discarded, but this is only a fatal error
+	 * if the caller indicated it was
+	 */
+	rx_buf->flags |= EFX_RX_PKT_DISCARD;
+
+	if (net_ratelimit())
+		netif_err(efx, rx_err, efx->net_dev,
+			  "RX queue %d overlength RX event (%#x > %#x)\n",
+			  efx_rx_queue_index(rx_queue), len, max_len);
+
+	efx_rx_queue_channel(rx_queue)->n_rx_overlength++;
+}
+
+/* Allocate and construct an SKB around page fragments */
+static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel,
+				     struct efx_rx_buffer *rx_buf,
+				     unsigned int n_frags,
+				     u8 *eh, int hdr_len)
+{
+	struct efx_nic *efx = channel->efx;
+	struct sk_buff *skb;
+
+	/* Allocate an SKB to store the headers */
+	skb = netdev_alloc_skb(efx->net_dev,
+			       efx->rx_ip_align + efx->rx_prefix_size +
+			       hdr_len);
+	if (unlikely(skb == NULL)) {
+		atomic_inc(&efx->n_rx_noskb_drops);
+		return NULL;
+	}
+
+	EFX_WARN_ON_ONCE_PARANOID(rx_buf->len < hdr_len);
+
+	memcpy(skb->data + efx->rx_ip_align, eh - efx->rx_prefix_size,
+	       efx->rx_prefix_size + hdr_len);
+	skb_reserve(skb, efx->rx_ip_align + efx->rx_prefix_size);
+	__skb_put(skb, hdr_len);
+
+	/* Append the remaining page(s) onto the frag list */
+	if (rx_buf->len > hdr_len) {
+		rx_buf->page_offset += hdr_len;
+		rx_buf->len -= hdr_len;
+
+		for (;;) {
+			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+					rx_buf->page, rx_buf->page_offset,
+					rx_buf->len, efx->rx_buffer_truesize);
+			rx_buf->page = NULL;
+
+			if (skb_shinfo(skb)->nr_frags == n_frags)
+				break;
+
+			rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
+		}
+	} else {
+		__free_pages(rx_buf->page, efx->rx_buffer_order);
+		rx_buf->page = NULL;
+		n_frags = 0;
+	}
+
+	/* Move past the ethernet header */
+	skb->protocol = eth_type_trans(skb, efx->net_dev);
+
+	skb_mark_napi_id(skb, &channel->napi_str);
+
+	return skb;
+}
+
+void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
+		   unsigned int n_frags, unsigned int len, u16 flags)
+{
+	struct efx_nic *efx = rx_queue->efx;
+	struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
+	struct efx_rx_buffer *rx_buf;
+
+	rx_queue->rx_packets++;
+
+	rx_buf = efx_rx_buffer(rx_queue, index);
+	rx_buf->flags |= flags;
+
+	/* Validate the number of fragments and completed length */
+	if (n_frags == 1) {
+		if (!(flags & EFX_RX_PKT_PREFIX_LEN))
+			efx_rx_packet__check_len(rx_queue, rx_buf, len);
+	} else if (unlikely(n_frags > EFX_RX_MAX_FRAGS) ||
+		   unlikely(len <= (n_frags - 1) * efx->rx_dma_len) ||
+		   unlikely(len > n_frags * efx->rx_dma_len) ||
+		   unlikely(!efx->rx_scatter)) {
+		/* If this isn't an explicit discard request, either
+		 * the hardware or the driver is broken.
+		 */
+		WARN_ON(!(len == 0 && rx_buf->flags & EFX_RX_PKT_DISCARD));
+		rx_buf->flags |= EFX_RX_PKT_DISCARD;
+	}
+
+	netif_vdbg(efx, rx_status, efx->net_dev,
+		   "RX queue %d received ids %x-%x len %d %s%s\n",
+		   efx_rx_queue_index(rx_queue), index,
+		   (index + n_frags - 1) & rx_queue->ptr_mask, len,
+		   (rx_buf->flags & EFX_RX_PKT_CSUMMED) ? " [SUMMED]" : "",
+		   (rx_buf->flags & EFX_RX_PKT_DISCARD) ? " [DISCARD]" : "");
+
+	/* Discard packet, if instructed to do so.  Process the
+	 * previous receive first.
+	 */
+	if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) {
+		efx_rx_flush_packet(channel);
+		efx_discard_rx_packet(channel, rx_buf, n_frags);
+		return;
+	}
+
+	if (n_frags == 1 && !(flags & EFX_RX_PKT_PREFIX_LEN))
+		rx_buf->len = len;
+
+	/* Release and/or sync the DMA mapping - assumes all RX buffers
+	 * consumed in-order per RX queue.
+	 */
+	efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
+
+	/* Prefetch nice and early so data will (hopefully) be in cache by
+	 * the time we look at it.
+	 */
+	prefetch(efx_rx_buf_va(rx_buf));
+
+	rx_buf->page_offset += efx->rx_prefix_size;
+	rx_buf->len -= efx->rx_prefix_size;
+
+	if (n_frags > 1) {
+		/* Release/sync DMA mapping for additional fragments.
+		 * Fix length for last fragment.
+		 */
+		unsigned int tail_frags = n_frags - 1;
+
+		for (;;) {
+			rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
+			if (--tail_frags == 0)
+				break;
+			efx_sync_rx_buffer(efx, rx_buf, efx->rx_dma_len);
+		}
+		rx_buf->len = len - (n_frags - 1) * efx->rx_dma_len;
+		efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
+	}
+
+	/* All fragments have been DMA-synced, so recycle pages. */
+	rx_buf = efx_rx_buffer(rx_queue, index);
+	efx_recycle_rx_pages(channel, rx_buf, n_frags);
+
+	/* Pipeline receives so that we give time for packet headers to be
+	 * prefetched into cache.
+	 */
+	efx_rx_flush_packet(channel);
+	channel->rx_pkt_n_frags = n_frags;
+	channel->rx_pkt_index = index;
+}
+
+static void efx_rx_deliver(struct efx_channel *channel, u8 *eh,
+			   struct efx_rx_buffer *rx_buf,
+			   unsigned int n_frags)
+{
+	struct sk_buff *skb;
+	u16 hdr_len = min_t(u16, rx_buf->len, EFX_SKB_HEADERS);
+
+	skb = efx_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len);
+	if (unlikely(skb == NULL)) {
+		struct efx_rx_queue *rx_queue;
+
+		rx_queue = efx_channel_get_rx_queue(channel);
+		efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
+		return;
+	}
+	skb_record_rx_queue(skb, channel->rx_queue.core_index);
+
+	/* Set the SKB flags */
+	skb_checksum_none_assert(skb);
+	if (likely(rx_buf->flags & EFX_RX_PKT_CSUMMED)) {
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+		skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL);
+	}
+
+	efx_rx_skb_attach_timestamp(channel, skb);
+
+	if (channel->type->receive_skb)
+		if (channel->type->receive_skb(channel, skb))
+			return;
+
+	/* Pass the packet up */
+	if (channel->rx_list != NULL)
+		/* Add to list, will pass up later */
+		list_add_tail(&skb->list, channel->rx_list);
+	else
+		/* No list, so pass it up now */
+		netif_receive_skb(skb);
+}
+
+/** efx_do_xdp: perform XDP processing on a received packet
+ *
+ * Returns true if packet should still be delivered.
+ */
+static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel,
+		       struct efx_rx_buffer *rx_buf, u8 **ehp)
+{
+	u8 rx_prefix[EFX_MAX_RX_PREFIX_SIZE];
+	struct efx_rx_queue *rx_queue;
+	struct bpf_prog *xdp_prog;
+	struct xdp_frame *xdpf;
+	struct xdp_buff xdp;
+	u32 xdp_act;
+	s16 offset;
+	int err;
+
+	xdp_prog = rcu_dereference_bh(efx->xdp_prog);
+	if (!xdp_prog)
+		return true;
+
+	rx_queue = efx_channel_get_rx_queue(channel);
+
+	if (unlikely(channel->rx_pkt_n_frags > 1)) {
+		/* We can't do XDP on fragmented packets - drop. */
+		efx_free_rx_buffers(rx_queue, rx_buf,
+				    channel->rx_pkt_n_frags);
+		if (net_ratelimit())
+			netif_err(efx, rx_err, efx->net_dev,
+				  "XDP is not possible with multiple receive fragments (%d)\n",
+				  channel->rx_pkt_n_frags);
+		channel->n_rx_xdp_bad_drops++;
+		return false;
+	}
+
+	dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr,
+				rx_buf->len, DMA_FROM_DEVICE);
+
+	/* Save the rx prefix. */
+	EFX_WARN_ON_PARANOID(efx->rx_prefix_size > EFX_MAX_RX_PREFIX_SIZE);
+	memcpy(rx_prefix, *ehp - efx->rx_prefix_size,
+	       efx->rx_prefix_size);
+
+	xdp_init_buff(&xdp, efx->rx_page_buf_step, &rx_queue->xdp_rxq_info);
+	/* No support yet for XDP metadata */
+	xdp_prepare_buff(&xdp, *ehp - EFX_XDP_HEADROOM, EFX_XDP_HEADROOM,
+			 rx_buf->len, false);
+
+	xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
+
+	offset = (u8 *)xdp.data - *ehp;
+
+	switch (xdp_act) {
+	case XDP_PASS:
+		/* Fix up rx prefix. */
+		if (offset) {
+			*ehp += offset;
+			rx_buf->page_offset += offset;
+			rx_buf->len -= offset;
+			memcpy(*ehp - efx->rx_prefix_size, rx_prefix,
+			       efx->rx_prefix_size);
+		}
+		break;
+
+	case XDP_TX:
+		/* Buffer ownership passes to tx on success. */
+		xdpf = xdp_convert_buff_to_frame(&xdp);
+		err = efx_xdp_tx_buffers(efx, 1, &xdpf, true);
+		if (unlikely(err != 1)) {
+			efx_free_rx_buffers(rx_queue, rx_buf, 1);
+			if (net_ratelimit())
+				netif_err(efx, rx_err, efx->net_dev,
+					  "XDP TX failed (%d)\n", err);
+			channel->n_rx_xdp_bad_drops++;
+			trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act);
+		} else {
+			channel->n_rx_xdp_tx++;
+		}
+		break;
+
+	case XDP_REDIRECT:
+		err = xdp_do_redirect(efx->net_dev, &xdp, xdp_prog);
+		if (unlikely(err)) {
+			efx_free_rx_buffers(rx_queue, rx_buf, 1);
+			if (net_ratelimit())
+				netif_err(efx, rx_err, efx->net_dev,
+					  "XDP redirect failed (%d)\n", err);
+			channel->n_rx_xdp_bad_drops++;
+			trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act);
+		} else {
+			channel->n_rx_xdp_redirect++;
+		}
+		break;
+
+	default:
+		bpf_warn_invalid_xdp_action(efx->net_dev, xdp_prog, xdp_act);
+		efx_free_rx_buffers(rx_queue, rx_buf, 1);
+		channel->n_rx_xdp_bad_drops++;
+		trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act);
+		break;
+
+	case XDP_ABORTED:
+		trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act);
+		fallthrough;
+	case XDP_DROP:
+		efx_free_rx_buffers(rx_queue, rx_buf, 1);
+		channel->n_rx_xdp_drops++;
+		break;
+	}
+
+	return xdp_act == XDP_PASS;
+}
+
+/* Handle a received packet.  Second half: Touches packet payload. */
+void __efx_rx_packet(struct efx_channel *channel)
+{
+	struct efx_nic *efx = channel->efx;
+	struct efx_rx_buffer *rx_buf =
+		efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index);
+	u8 *eh = efx_rx_buf_va(rx_buf);
+
+	/* Read length from the prefix if necessary.  This already
+	 * excludes the length of the prefix itself.
+	 */
+	if (rx_buf->flags & EFX_RX_PKT_PREFIX_LEN)
+		rx_buf->len = le16_to_cpup((__le16 *)
+					   (eh + efx->rx_packet_len_offset));
+
+	/* If we're in loopback test, then pass the packet directly to the
+	 * loopback layer, and free the rx_buf here
+	 */
+	if (unlikely(efx->loopback_selftest)) {
+		struct efx_rx_queue *rx_queue;
+
+		efx_loopback_rx_packet(efx, eh, rx_buf->len);
+		rx_queue = efx_channel_get_rx_queue(channel);
+		efx_free_rx_buffers(rx_queue, rx_buf,
+				    channel->rx_pkt_n_frags);
+		goto out;
+	}
+
+	if (!efx_do_xdp(efx, channel, rx_buf, &eh))
+		goto out;
+
+	if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
+		rx_buf->flags &= ~EFX_RX_PKT_CSUMMED;
+
+	if ((rx_buf->flags & EFX_RX_PKT_TCP) && !channel->type->receive_skb)
+		efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh, 0);
+	else
+		efx_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags);
+out:
+	channel->rx_pkt_n_frags = 0;
+}
diff --git a/drivers/net/ethernet/sfc/siena/rx_common.c b/drivers/net/ethernet/sfc/siena/rx_common.c
new file mode 100644
index 000000000000..1b22c7be0088
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/rx_common.c
@@ -0,0 +1,1083 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include "net_driver.h"
+#include <linux/module.h>
+#include <linux/iommu.h>
+#include "efx.h"
+#include "nic.h"
+#include "rx_common.h"
+
+/* This is the percentage fill level below which new RX descriptors
+ * will be added to the RX descriptor ring.
+ */
+static unsigned int rx_refill_threshold;
+module_param(rx_refill_threshold, uint, 0444);
+MODULE_PARM_DESC(rx_refill_threshold,
+		 "RX descriptor ring refill threshold (%)");
+
+/* RX maximum head room required.
+ *
+ * This must be at least 1 to prevent overflow, plus one packet-worth
+ * to allow pipelined receives.
+ */
+#define EFX_RXD_HEAD_ROOM (1 + EFX_RX_MAX_FRAGS)
+
+/* Check the RX page recycle ring for a page that can be reused. */
+static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
+{
+	struct efx_nic *efx = rx_queue->efx;
+	struct efx_rx_page_state *state;
+	unsigned int index;
+	struct page *page;
+
+	if (unlikely(!rx_queue->page_ring))
+		return NULL;
+	index = rx_queue->page_remove & rx_queue->page_ptr_mask;
+	page = rx_queue->page_ring[index];
+	if (page == NULL)
+		return NULL;
+
+	rx_queue->page_ring[index] = NULL;
+	/* page_remove cannot exceed page_add. */
+	if (rx_queue->page_remove != rx_queue->page_add)
+		++rx_queue->page_remove;
+
+	/* If page_count is 1 then we hold the only reference to this page. */
+	if (page_count(page) == 1) {
+		++rx_queue->page_recycle_count;
+		return page;
+	} else {
+		state = page_address(page);
+		dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
+			       PAGE_SIZE << efx->rx_buffer_order,
+			       DMA_FROM_DEVICE);
+		put_page(page);
+		++rx_queue->page_recycle_failed;
+	}
+
+	return NULL;
+}
+
+/* Attempt to recycle the page if there is an RX recycle ring; the page can
+ * only be added if this is the final RX buffer, to prevent pages being used in
+ * the descriptor ring and appearing in the recycle ring simultaneously.
+ */
+static void efx_recycle_rx_page(struct efx_channel *channel,
+				struct efx_rx_buffer *rx_buf)
+{
+	struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
+	struct efx_nic *efx = rx_queue->efx;
+	struct page *page = rx_buf->page;
+	unsigned int index;
+
+	/* Only recycle the page after processing the final buffer. */
+	if (!(rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE))
+		return;
+
+	index = rx_queue->page_add & rx_queue->page_ptr_mask;
+	if (rx_queue->page_ring[index] == NULL) {
+		unsigned int read_index = rx_queue->page_remove &
+			rx_queue->page_ptr_mask;
+
+		/* The next slot in the recycle ring is available, but
+		 * increment page_remove if the read pointer currently
+		 * points here.
+		 */
+		if (read_index == index)
+			++rx_queue->page_remove;
+		rx_queue->page_ring[index] = page;
+		++rx_queue->page_add;
+		return;
+	}
+	++rx_queue->page_recycle_full;
+	efx_unmap_rx_buffer(efx, rx_buf);
+	put_page(rx_buf->page);
+}
+
+/* Recycle the pages that are used by buffers that have just been received. */
+void efx_recycle_rx_pages(struct efx_channel *channel,
+			  struct efx_rx_buffer *rx_buf,
+			  unsigned int n_frags)
+{
+	struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
+
+	if (unlikely(!rx_queue->page_ring))
+		return;
+
+	do {
+		efx_recycle_rx_page(channel, rx_buf);
+		rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
+	} while (--n_frags);
+}
+
+void efx_discard_rx_packet(struct efx_channel *channel,
+			   struct efx_rx_buffer *rx_buf,
+			   unsigned int n_frags)
+{
+	struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
+
+	efx_recycle_rx_pages(channel, rx_buf, n_frags);
+
+	efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
+}
+
+static void efx_init_rx_recycle_ring(struct efx_rx_queue *rx_queue)
+{
+	unsigned int bufs_in_recycle_ring, page_ring_size;
+	struct efx_nic *efx = rx_queue->efx;
+
+	bufs_in_recycle_ring = efx_rx_recycle_ring_size(efx);
+	page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring /
+					    efx->rx_bufs_per_page);
+	rx_queue->page_ring = kcalloc(page_ring_size,
+				      sizeof(*rx_queue->page_ring), GFP_KERNEL);
+	if (!rx_queue->page_ring)
+		rx_queue->page_ptr_mask = 0;
+	else
+		rx_queue->page_ptr_mask = page_ring_size - 1;
+}
+
+static void efx_fini_rx_recycle_ring(struct efx_rx_queue *rx_queue)
+{
+	struct efx_nic *efx = rx_queue->efx;
+	int i;
+
+	/* Unmap and release the pages in the recycle ring. Remove the ring. */
+	for (i = 0; i <= rx_queue->page_ptr_mask; i++) {
+		struct page *page = rx_queue->page_ring[i];
+		struct efx_rx_page_state *state;
+
+		if (page == NULL)
+			continue;
+
+		state = page_address(page);
+		dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
+			       PAGE_SIZE << efx->rx_buffer_order,
+			       DMA_FROM_DEVICE);
+		put_page(page);
+	}
+	kfree(rx_queue->page_ring);
+	rx_queue->page_ring = NULL;
+}
+
+static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
+			       struct efx_rx_buffer *rx_buf)
+{
+	/* Release the page reference we hold for the buffer. */
+	if (rx_buf->page)
+		put_page(rx_buf->page);
+
+	/* If this is the last buffer in a page, unmap and free it. */
+	if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) {
+		efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
+		efx_free_rx_buffers(rx_queue, rx_buf, 1);
+	}
+	rx_buf->page = NULL;
+}
+
+int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
+{
+	struct efx_nic *efx = rx_queue->efx;
+	unsigned int entries;
+	int rc;
+
+	/* Create the smallest power-of-two aligned ring */
+	entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE);
+	EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
+	rx_queue->ptr_mask = entries - 1;
+
+	netif_dbg(efx, probe, efx->net_dev,
+		  "creating RX queue %d size %#x mask %#x\n",
+		  efx_rx_queue_index(rx_queue), efx->rxq_entries,
+		  rx_queue->ptr_mask);
+
+	/* Allocate RX buffers */
+	rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer),
+				   GFP_KERNEL);
+	if (!rx_queue->buffer)
+		return -ENOMEM;
+
+	rc = efx_nic_probe_rx(rx_queue);
+	if (rc) {
+		kfree(rx_queue->buffer);
+		rx_queue->buffer = NULL;
+	}
+
+	return rc;
+}
+
+void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
+{
+	unsigned int max_fill, trigger, max_trigger;
+	struct efx_nic *efx = rx_queue->efx;
+	int rc = 0;
+
+	netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
+		  "initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
+
+	/* Initialise ptr fields */
+	rx_queue->added_count = 0;
+	rx_queue->notified_count = 0;
+	rx_queue->removed_count = 0;
+	rx_queue->min_fill = -1U;
+	efx_init_rx_recycle_ring(rx_queue);
+
+	rx_queue->page_remove = 0;
+	rx_queue->page_add = rx_queue->page_ptr_mask + 1;
+	rx_queue->page_recycle_count = 0;
+	rx_queue->page_recycle_failed = 0;
+	rx_queue->page_recycle_full = 0;
+
+	/* Initialise limit fields */
+	max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
+	max_trigger =
+		max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page;
+	if (rx_refill_threshold != 0) {
+		trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
+		if (trigger > max_trigger)
+			trigger = max_trigger;
+	} else {
+		trigger = max_trigger;
+	}
+
+	rx_queue->max_fill = max_fill;
+	rx_queue->fast_fill_trigger = trigger;
+	rx_queue->refill_enabled = true;
+
+	/* Initialise XDP queue information */
+	rc = xdp_rxq_info_reg(&rx_queue->xdp_rxq_info, efx->net_dev,
+			      rx_queue->core_index, 0);
+
+	if (rc) {
+		netif_err(efx, rx_err, efx->net_dev,
+			  "Failure to initialise XDP queue information rc=%d\n",
+			  rc);
+		efx->xdp_rxq_info_failed = true;
+	} else {
+		rx_queue->xdp_rxq_info_valid = true;
+	}
+
+	/* Set up RX descriptor ring */
+	efx_nic_init_rx(rx_queue);
+}
+
+void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
+{
+	struct efx_rx_buffer *rx_buf;
+	int i;
+
+	netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
+		  "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
+
+	del_timer_sync(&rx_queue->slow_fill);
+
+	/* Release RX buffers from the current read ptr to the write ptr */
+	if (rx_queue->buffer) {
+		for (i = rx_queue->removed_count; i < rx_queue->added_count;
+		     i++) {
+			unsigned int index = i & rx_queue->ptr_mask;
+
+			rx_buf = efx_rx_buffer(rx_queue, index);
+			efx_fini_rx_buffer(rx_queue, rx_buf);
+		}
+	}
+
+	efx_fini_rx_recycle_ring(rx_queue);
+
+	if (rx_queue->xdp_rxq_info_valid)
+		xdp_rxq_info_unreg(&rx_queue->xdp_rxq_info);
+
+	rx_queue->xdp_rxq_info_valid = false;
+}
+
+void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
+{
+	netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
+		  "destroying RX queue %d\n", efx_rx_queue_index(rx_queue));
+
+	efx_nic_remove_rx(rx_queue);
+
+	kfree(rx_queue->buffer);
+	rx_queue->buffer = NULL;
+}
+
+/* Unmap a DMA-mapped page.  This function is only called for the final RX
+ * buffer in a page.
+ */
+void efx_unmap_rx_buffer(struct efx_nic *efx,
+			 struct efx_rx_buffer *rx_buf)
+{
+	struct page *page = rx_buf->page;
+
+	if (page) {
+		struct efx_rx_page_state *state = page_address(page);
+
+		dma_unmap_page(&efx->pci_dev->dev,
+			       state->dma_addr,
+			       PAGE_SIZE << efx->rx_buffer_order,
+			       DMA_FROM_DEVICE);
+	}
+}
+
+void efx_free_rx_buffers(struct efx_rx_queue *rx_queue,
+			 struct efx_rx_buffer *rx_buf,
+			 unsigned int num_bufs)
+{
+	do {
+		if (rx_buf->page) {
+			put_page(rx_buf->page);
+			rx_buf->page = NULL;
+		}
+		rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
+	} while (--num_bufs);
+}
+
+void efx_rx_slow_fill(struct timer_list *t)
+{
+	struct efx_rx_queue *rx_queue = from_timer(rx_queue, t, slow_fill);
+
+	/* Post an event to cause NAPI to run and refill the queue */
+	efx_nic_generate_fill_event(rx_queue);
+	++rx_queue->slow_fill_count;
+}
+
+void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
+{
+	mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(10));
+}
+
+/* efx_init_rx_buffers - create EFX_RX_BATCH page-based RX buffers
+ *
+ * @rx_queue:		Efx RX queue
+ *
+ * This allocates a batch of pages, maps them for DMA, and populates
+ * struct efx_rx_buffers for each one. Return a negative error code or
+ * 0 on success. If a single page can be used for multiple buffers,
+ * then the page will either be inserted fully, or not at all.
+ */
+static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
+{
+	unsigned int page_offset, index, count;
+	struct efx_nic *efx = rx_queue->efx;
+	struct efx_rx_page_state *state;
+	struct efx_rx_buffer *rx_buf;
+	dma_addr_t dma_addr;
+	struct page *page;
+
+	count = 0;
+	do {
+		page = efx_reuse_page(rx_queue);
+		if (page == NULL) {
+			page = alloc_pages(__GFP_COMP |
+					   (atomic ? GFP_ATOMIC : GFP_KERNEL),
+					   efx->rx_buffer_order);
+			if (unlikely(page == NULL))
+				return -ENOMEM;
+			dma_addr =
+				dma_map_page(&efx->pci_dev->dev, page, 0,
+					     PAGE_SIZE << efx->rx_buffer_order,
+					     DMA_FROM_DEVICE);
+			if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
+						       dma_addr))) {
+				__free_pages(page, efx->rx_buffer_order);
+				return -EIO;
+			}
+			state = page_address(page);
+			state->dma_addr = dma_addr;
+		} else {
+			state = page_address(page);
+			dma_addr = state->dma_addr;
+		}
+
+		dma_addr += sizeof(struct efx_rx_page_state);
+		page_offset = sizeof(struct efx_rx_page_state);
+
+		do {
+			index = rx_queue->added_count & rx_queue->ptr_mask;
+			rx_buf = efx_rx_buffer(rx_queue, index);
+			rx_buf->dma_addr = dma_addr + efx->rx_ip_align +
+					   EFX_XDP_HEADROOM;
+			rx_buf->page = page;
+			rx_buf->page_offset = page_offset + efx->rx_ip_align +
+					      EFX_XDP_HEADROOM;
+			rx_buf->len = efx->rx_dma_len;
+			rx_buf->flags = 0;
+			++rx_queue->added_count;
+			get_page(page);
+			dma_addr += efx->rx_page_buf_step;
+			page_offset += efx->rx_page_buf_step;
+		} while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE);
+
+		rx_buf->flags = EFX_RX_BUF_LAST_IN_PAGE;
+	} while (++count < efx->rx_pages_per_batch);
+
+	return 0;
+}
+
+void efx_rx_config_page_split(struct efx_nic *efx)
+{
+	efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align +
+				      EFX_XDP_HEADROOM + EFX_XDP_TAILROOM,
+				      EFX_RX_BUF_ALIGNMENT);
+	efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
+		((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
+		efx->rx_page_buf_step);
+	efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) /
+		efx->rx_bufs_per_page;
+	efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH,
+					       efx->rx_bufs_per_page);
+}
+
+/* efx_fast_push_rx_descriptors - push new RX descriptors quickly
+ * @rx_queue:		RX descriptor queue
+ *
+ * This will aim to fill the RX descriptor queue up to
+ * @rx_queue->@max_fill. If there is insufficient atomic
+ * memory to do so, a slow fill will be scheduled.
+ *
+ * The caller must provide serialisation (none is used here). In practise,
+ * this means this function must run from the NAPI handler, or be called
+ * when NAPI is disabled.
+ */
+void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic)
+{
+	struct efx_nic *efx = rx_queue->efx;
+	unsigned int fill_level, batch_size;
+	int space, rc = 0;
+
+	if (!rx_queue->refill_enabled)
+		return;
+
+	/* Calculate current fill level, and exit if we don't need to fill */
+	fill_level = (rx_queue->added_count - rx_queue->removed_count);
+	EFX_WARN_ON_ONCE_PARANOID(fill_level > rx_queue->efx->rxq_entries);
+	if (fill_level >= rx_queue->fast_fill_trigger)
+		goto out;
+
+	/* Record minimum fill level */
+	if (unlikely(fill_level < rx_queue->min_fill)) {
+		if (fill_level)
+			rx_queue->min_fill = fill_level;
+	}
+
+	batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page;
+	space = rx_queue->max_fill - fill_level;
+	EFX_WARN_ON_ONCE_PARANOID(space < batch_size);
+
+	netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
+		   "RX queue %d fast-filling descriptor ring from"
+		   " level %d to level %d\n",
+		   efx_rx_queue_index(rx_queue), fill_level,
+		   rx_queue->max_fill);
+
+	do {
+		rc = efx_init_rx_buffers(rx_queue, atomic);
+		if (unlikely(rc)) {
+			/* Ensure that we don't leave the rx queue empty */
+			efx_schedule_slow_fill(rx_queue);
+			goto out;
+		}
+	} while ((space -= batch_size) >= batch_size);
+
+	netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
+		   "RX queue %d fast-filled descriptor ring "
+		   "to level %d\n", efx_rx_queue_index(rx_queue),
+		   rx_queue->added_count - rx_queue->removed_count);
+
+ out:
+	if (rx_queue->notified_count != rx_queue->added_count)
+		efx_nic_notify_rx_desc(rx_queue);
+}
+
+/* Pass a received packet up through GRO.  GRO can handle pages
+ * regardless of checksum state and skbs with a good checksum.
+ */
+void
+efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
+		  unsigned int n_frags, u8 *eh, __wsum csum)
+{
+	struct napi_struct *napi = &channel->napi_str;
+	struct efx_nic *efx = channel->efx;
+	struct sk_buff *skb;
+
+	skb = napi_get_frags(napi);
+	if (unlikely(!skb)) {
+		struct efx_rx_queue *rx_queue;
+
+		rx_queue = efx_channel_get_rx_queue(channel);
+		efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
+		return;
+	}
+
+	if (efx->net_dev->features & NETIF_F_RXHASH &&
+	    efx_rx_buf_hash_valid(efx, eh))
+		skb_set_hash(skb, efx_rx_buf_hash(efx, eh),
+			     PKT_HASH_TYPE_L3);
+	if (csum) {
+		skb->csum = csum;
+		skb->ip_summed = CHECKSUM_COMPLETE;
+	} else {
+		skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
+				  CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
+	}
+	skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL);
+
+	for (;;) {
+		skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
+				   rx_buf->page, rx_buf->page_offset,
+				   rx_buf->len);
+		rx_buf->page = NULL;
+		skb->len += rx_buf->len;
+		if (skb_shinfo(skb)->nr_frags == n_frags)
+			break;
+
+		rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
+	}
+
+	skb->data_len = skb->len;
+	skb->truesize += n_frags * efx->rx_buffer_truesize;
+
+	skb_record_rx_queue(skb, channel->rx_queue.core_index);
+
+	napi_gro_frags(napi);
+}
+
+/* RSS contexts.  We're using linked lists and crappy O(n) algorithms, because
+ * (a) this is an infrequent control-plane operation and (b) n is small (max 64)
+ */
+struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx)
+{
+	struct list_head *head = &efx->rss_context.list;
+	struct efx_rss_context *ctx, *new;
+	u32 id = 1; /* Don't use zero, that refers to the master RSS context */
+
+	WARN_ON(!mutex_is_locked(&efx->rss_lock));
+
+	/* Search for first gap in the numbering */
+	list_for_each_entry(ctx, head, list) {
+		if (ctx->user_id != id)
+			break;
+		id++;
+		/* Check for wrap.  If this happens, we have nearly 2^32
+		 * allocated RSS contexts, which seems unlikely.
+		 */
+		if (WARN_ON_ONCE(!id))
+			return NULL;
+	}
+
+	/* Create the new entry */
+	new = kmalloc(sizeof(*new), GFP_KERNEL);
+	if (!new)
+		return NULL;
+	new->context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
+	new->rx_hash_udp_4tuple = false;
+
+	/* Insert the new entry into the gap */
+	new->user_id = id;
+	list_add_tail(&new->list, &ctx->list);
+	return new;
+}
+
+struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id)
+{
+	struct list_head *head = &efx->rss_context.list;
+	struct efx_rss_context *ctx;
+
+	WARN_ON(!mutex_is_locked(&efx->rss_lock));
+
+	list_for_each_entry(ctx, head, list)
+		if (ctx->user_id == id)
+			return ctx;
+	return NULL;
+}
+
+void efx_free_rss_context_entry(struct efx_rss_context *ctx)
+{
+	list_del(&ctx->list);
+	kfree(ctx);
+}
+
+void efx_set_default_rx_indir_table(struct efx_nic *efx,
+				    struct efx_rss_context *ctx)
+{
+	size_t i;
+
+	for (i = 0; i < ARRAY_SIZE(ctx->rx_indir_table); i++)
+		ctx->rx_indir_table[i] =
+			ethtool_rxfh_indir_default(i, efx->rss_spread);
+}
+
+/**
+ * efx_filter_is_mc_recipient - test whether spec is a multicast recipient
+ * @spec: Specification to test
+ *
+ * Return: %true if the specification is a non-drop RX filter that
+ * matches a local MAC address I/G bit value of 1 or matches a local
+ * IPv4 or IPv6 address value in the respective multicast address
+ * range.  Otherwise %false.
+ */
+bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec)
+{
+	if (!(spec->flags & EFX_FILTER_FLAG_RX) ||
+	    spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP)
+		return false;
+
+	if (spec->match_flags &
+	    (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG) &&
+	    is_multicast_ether_addr(spec->loc_mac))
+		return true;
+
+	if ((spec->match_flags &
+	     (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
+	    (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
+		if (spec->ether_type == htons(ETH_P_IP) &&
+		    ipv4_is_multicast(spec->loc_host[0]))
+			return true;
+		if (spec->ether_type == htons(ETH_P_IPV6) &&
+		    ((const u8 *)spec->loc_host)[0] == 0xff)
+			return true;
+	}
+
+	return false;
+}
+
+bool efx_filter_spec_equal(const struct efx_filter_spec *left,
+			   const struct efx_filter_spec *right)
+{
+	if ((left->match_flags ^ right->match_flags) |
+	    ((left->flags ^ right->flags) &
+	     (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
+		return false;
+
+	return memcmp(&left->outer_vid, &right->outer_vid,
+		      sizeof(struct efx_filter_spec) -
+		      offsetof(struct efx_filter_spec, outer_vid)) == 0;
+}
+
+u32 efx_filter_spec_hash(const struct efx_filter_spec *spec)
+{
+	BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3);
+	return jhash2((const u32 *)&spec->outer_vid,
+		      (sizeof(struct efx_filter_spec) -
+		       offsetof(struct efx_filter_spec, outer_vid)) / 4,
+		      0);
+}
+
+#ifdef CONFIG_RFS_ACCEL
+bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx,
+			bool *force)
+{
+	if (rule->filter_id == EFX_ARFS_FILTER_ID_PENDING) {
+		/* ARFS is currently updating this entry, leave it */
+		return false;
+	}
+	if (rule->filter_id == EFX_ARFS_FILTER_ID_ERROR) {
+		/* ARFS tried and failed to update this, so it's probably out
+		 * of date.  Remove the filter and the ARFS rule entry.
+		 */
+		rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING;
+		*force = true;
+		return true;
+	} else if (WARN_ON(rule->filter_id != filter_idx)) { /* can't happen */
+		/* ARFS has moved on, so old filter is not needed.  Since we did
+		 * not mark the rule with EFX_ARFS_FILTER_ID_REMOVING, it will
+		 * not be removed by efx_rps_hash_del() subsequently.
+		 */
+		*force = true;
+		return true;
+	}
+	/* Remove it iff ARFS wants to. */
+	return true;
+}
+
+static
+struct hlist_head *efx_rps_hash_bucket(struct efx_nic *efx,
+				       const struct efx_filter_spec *spec)
+{
+	u32 hash = efx_filter_spec_hash(spec);
+
+	lockdep_assert_held(&efx->rps_hash_lock);
+	if (!efx->rps_hash_table)
+		return NULL;
+	return &efx->rps_hash_table[hash % EFX_ARFS_HASH_TABLE_SIZE];
+}
+
+struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx,
+					const struct efx_filter_spec *spec)
+{
+	struct efx_arfs_rule *rule;
+	struct hlist_head *head;
+	struct hlist_node *node;
+
+	head = efx_rps_hash_bucket(efx, spec);
+	if (!head)
+		return NULL;
+	hlist_for_each(node, head) {
+		rule = container_of(node, struct efx_arfs_rule, node);
+		if (efx_filter_spec_equal(spec, &rule->spec))
+			return rule;
+	}
+	return NULL;
+}
+
+struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx,
+				       const struct efx_filter_spec *spec,
+				       bool *new)
+{
+	struct efx_arfs_rule *rule;
+	struct hlist_head *head;
+	struct hlist_node *node;
+
+	head = efx_rps_hash_bucket(efx, spec);
+	if (!head)
+		return NULL;
+	hlist_for_each(node, head) {
+		rule = container_of(node, struct efx_arfs_rule, node);
+		if (efx_filter_spec_equal(spec, &rule->spec)) {
+			*new = false;
+			return rule;
+		}
+	}
+	rule = kmalloc(sizeof(*rule), GFP_ATOMIC);
+	*new = true;
+	if (rule) {
+		memcpy(&rule->spec, spec, sizeof(rule->spec));
+		hlist_add_head(&rule->node, head);
+	}
+	return rule;
+}
+
+void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec)
+{
+	struct efx_arfs_rule *rule;
+	struct hlist_head *head;
+	struct hlist_node *node;
+
+	head = efx_rps_hash_bucket(efx, spec);
+	if (WARN_ON(!head))
+		return;
+	hlist_for_each(node, head) {
+		rule = container_of(node, struct efx_arfs_rule, node);
+		if (efx_filter_spec_equal(spec, &rule->spec)) {
+			/* Someone already reused the entry.  We know that if
+			 * this check doesn't fire (i.e. filter_id == REMOVING)
+			 * then the REMOVING mark was put there by our caller,
+			 * because caller is holding a lock on filter table and
+			 * only holders of that lock set REMOVING.
+			 */
+			if (rule->filter_id != EFX_ARFS_FILTER_ID_REMOVING)
+				return;
+			hlist_del(node);
+			kfree(rule);
+			return;
+		}
+	}
+	/* We didn't find it. */
+	WARN_ON(1);
+}
+#endif
+
+int efx_probe_filters(struct efx_nic *efx)
+{
+	int rc;
+
+	mutex_lock(&efx->mac_lock);
+	down_write(&efx->filter_sem);
+	rc = efx->type->filter_table_probe(efx);
+	if (rc)
+		goto out_unlock;
+
+#ifdef CONFIG_RFS_ACCEL
+	if (efx->type->offload_features & NETIF_F_NTUPLE) {
+		struct efx_channel *channel;
+		int i, success = 1;
+
+		efx_for_each_channel(channel, efx) {
+			channel->rps_flow_id =
+				kcalloc(efx->type->max_rx_ip_filters,
+					sizeof(*channel->rps_flow_id),
+					GFP_KERNEL);
+			if (!channel->rps_flow_id)
+				success = 0;
+			else
+				for (i = 0;
+				     i < efx->type->max_rx_ip_filters;
+				     ++i)
+					channel->rps_flow_id[i] =
+						RPS_FLOW_ID_INVALID;
+			channel->rfs_expire_index = 0;
+			channel->rfs_filter_count = 0;
+		}
+
+		if (!success) {
+			efx_for_each_channel(channel, efx)
+				kfree(channel->rps_flow_id);
+			efx->type->filter_table_remove(efx);
+			rc = -ENOMEM;
+			goto out_unlock;
+		}
+	}
+#endif
+out_unlock:
+	up_write(&efx->filter_sem);
+	mutex_unlock(&efx->mac_lock);
+	return rc;
+}
+
+void efx_remove_filters(struct efx_nic *efx)
+{
+#ifdef CONFIG_RFS_ACCEL
+	struct efx_channel *channel;
+
+	efx_for_each_channel(channel, efx) {
+		cancel_delayed_work_sync(&channel->filter_work);
+		kfree(channel->rps_flow_id);
+		channel->rps_flow_id = NULL;
+	}
+#endif
+	down_write(&efx->filter_sem);
+	efx->type->filter_table_remove(efx);
+	up_write(&efx->filter_sem);
+}
+
+#ifdef CONFIG_RFS_ACCEL
+
+static void efx_filter_rfs_work(struct work_struct *data)
+{
+	struct efx_async_filter_insertion *req = container_of(data, struct efx_async_filter_insertion,
+							      work);
+	struct efx_nic *efx = netdev_priv(req->net_dev);
+	struct efx_channel *channel = efx_get_channel(efx, req->rxq_index);
+	int slot_idx = req - efx->rps_slot;
+	struct efx_arfs_rule *rule;
+	u16 arfs_id = 0;
+	int rc;
+
+	rc = efx->type->filter_insert(efx, &req->spec, true);
+	if (rc >= 0)
+		/* Discard 'priority' part of EF10+ filter ID (mcdi_filters) */
+		rc %= efx->type->max_rx_ip_filters;
+	if (efx->rps_hash_table) {
+		spin_lock_bh(&efx->rps_hash_lock);
+		rule = efx_rps_hash_find(efx, &req->spec);
+		/* The rule might have already gone, if someone else's request
+		 * for the same spec was already worked and then expired before
+		 * we got around to our work.  In that case we have nothing
+		 * tying us to an arfs_id, meaning that as soon as the filter
+		 * is considered for expiry it will be removed.
+		 */
+		if (rule) {
+			if (rc < 0)
+				rule->filter_id = EFX_ARFS_FILTER_ID_ERROR;
+			else
+				rule->filter_id = rc;
+			arfs_id = rule->arfs_id;
+		}
+		spin_unlock_bh(&efx->rps_hash_lock);
+	}
+	if (rc >= 0) {
+		/* Remember this so we can check whether to expire the filter
+		 * later.
+		 */
+		mutex_lock(&efx->rps_mutex);
+		if (channel->rps_flow_id[rc] == RPS_FLOW_ID_INVALID)
+			channel->rfs_filter_count++;
+		channel->rps_flow_id[rc] = req->flow_id;
+		mutex_unlock(&efx->rps_mutex);
+
+		if (req->spec.ether_type == htons(ETH_P_IP))
+			netif_info(efx, rx_status, efx->net_dev,
+				   "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d id %u]\n",
+				   (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
+				   req->spec.rem_host, ntohs(req->spec.rem_port),
+				   req->spec.loc_host, ntohs(req->spec.loc_port),
+				   req->rxq_index, req->flow_id, rc, arfs_id);
+		else
+			netif_info(efx, rx_status, efx->net_dev,
+				   "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d id %u]\n",
+				   (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
+				   req->spec.rem_host, ntohs(req->spec.rem_port),
+				   req->spec.loc_host, ntohs(req->spec.loc_port),
+				   req->rxq_index, req->flow_id, rc, arfs_id);
+		channel->n_rfs_succeeded++;
+	} else {
+		if (req->spec.ether_type == htons(ETH_P_IP))
+			netif_dbg(efx, rx_status, efx->net_dev,
+				  "failed to steer %s %pI4:%u:%pI4:%u to queue %u [flow %u rc %d id %u]\n",
+				  (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
+				  req->spec.rem_host, ntohs(req->spec.rem_port),
+				  req->spec.loc_host, ntohs(req->spec.loc_port),
+				  req->rxq_index, req->flow_id, rc, arfs_id);
+		else
+			netif_dbg(efx, rx_status, efx->net_dev,
+				  "failed to steer %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u rc %d id %u]\n",
+				  (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
+				  req->spec.rem_host, ntohs(req->spec.rem_port),
+				  req->spec.loc_host, ntohs(req->spec.loc_port),
+				  req->rxq_index, req->flow_id, rc, arfs_id);
+		channel->n_rfs_failed++;
+		/* We're overloading the NIC's filter tables, so let's do a
+		 * chunk of extra expiry work.
+		 */
+		__efx_filter_rfs_expire(channel, min(channel->rfs_filter_count,
+						     100u));
+	}
+
+	/* Release references */
+	clear_bit(slot_idx, &efx->rps_slot_map);
+	dev_put(req->net_dev);
+}
+
+int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
+		   u16 rxq_index, u32 flow_id)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+	struct efx_async_filter_insertion *req;
+	struct efx_arfs_rule *rule;
+	struct flow_keys fk;
+	int slot_idx;
+	bool new;
+	int rc;
+
+	/* find a free slot */
+	for (slot_idx = 0; slot_idx < EFX_RPS_MAX_IN_FLIGHT; slot_idx++)
+		if (!test_and_set_bit(slot_idx, &efx->rps_slot_map))
+			break;
+	if (slot_idx >= EFX_RPS_MAX_IN_FLIGHT)
+		return -EBUSY;
+
+	if (flow_id == RPS_FLOW_ID_INVALID) {
+		rc = -EINVAL;
+		goto out_clear;
+	}
+
+	if (!skb_flow_dissect_flow_keys(skb, &fk, 0)) {
+		rc = -EPROTONOSUPPORT;
+		goto out_clear;
+	}
+
+	if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6)) {
+		rc = -EPROTONOSUPPORT;
+		goto out_clear;
+	}
+	if (fk.control.flags & FLOW_DIS_IS_FRAGMENT) {
+		rc = -EPROTONOSUPPORT;
+		goto out_clear;
+	}
+
+	req = efx->rps_slot + slot_idx;
+	efx_filter_init_rx(&req->spec, EFX_FILTER_PRI_HINT,
+			   efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
+			   rxq_index);
+	req->spec.match_flags =
+		EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
+		EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
+		EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
+	req->spec.ether_type = fk.basic.n_proto;
+	req->spec.ip_proto = fk.basic.ip_proto;
+
+	if (fk.basic.n_proto == htons(ETH_P_IP)) {
+		req->spec.rem_host[0] = fk.addrs.v4addrs.src;
+		req->spec.loc_host[0] = fk.addrs.v4addrs.dst;
+	} else {
+		memcpy(req->spec.rem_host, &fk.addrs.v6addrs.src,
+		       sizeof(struct in6_addr));
+		memcpy(req->spec.loc_host, &fk.addrs.v6addrs.dst,
+		       sizeof(struct in6_addr));
+	}
+
+	req->spec.rem_port = fk.ports.src;
+	req->spec.loc_port = fk.ports.dst;
+
+	if (efx->rps_hash_table) {
+		/* Add it to ARFS hash table */
+		spin_lock(&efx->rps_hash_lock);
+		rule = efx_rps_hash_add(efx, &req->spec, &new);
+		if (!rule) {
+			rc = -ENOMEM;
+			goto out_unlock;
+		}
+		if (new)
+			rule->arfs_id = efx->rps_next_id++ % RPS_NO_FILTER;
+		rc = rule->arfs_id;
+		/* Skip if existing or pending filter already does the right thing */
+		if (!new && rule->rxq_index == rxq_index &&
+		    rule->filter_id >= EFX_ARFS_FILTER_ID_PENDING)
+			goto out_unlock;
+		rule->rxq_index = rxq_index;
+		rule->filter_id = EFX_ARFS_FILTER_ID_PENDING;
+		spin_unlock(&efx->rps_hash_lock);
+	} else {
+		/* Without an ARFS hash table, we just use arfs_id 0 for all
+		 * filters.  This means if multiple flows hash to the same
+		 * flow_id, all but the most recently touched will be eligible
+		 * for expiry.
+		 */
+		rc = 0;
+	}
+
+	/* Queue the request */
+	dev_hold(req->net_dev = net_dev);
+	INIT_WORK(&req->work, efx_filter_rfs_work);
+	req->rxq_index = rxq_index;
+	req->flow_id = flow_id;
+	schedule_work(&req->work);
+	return rc;
+out_unlock:
+	spin_unlock(&efx->rps_hash_lock);
+out_clear:
+	clear_bit(slot_idx, &efx->rps_slot_map);
+	return rc;
+}
+
+bool __efx_filter_rfs_expire(struct efx_channel *channel, unsigned int quota)
+{
+	bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index);
+	struct efx_nic *efx = channel->efx;
+	unsigned int index, size, start;
+	u32 flow_id;
+
+	if (!mutex_trylock(&efx->rps_mutex))
+		return false;
+	expire_one = efx->type->filter_rfs_expire_one;
+	index = channel->rfs_expire_index;
+	start = index;
+	size = efx->type->max_rx_ip_filters;
+	while (quota) {
+		flow_id = channel->rps_flow_id[index];
+
+		if (flow_id != RPS_FLOW_ID_INVALID) {
+			quota--;
+			if (expire_one(efx, flow_id, index)) {
+				netif_info(efx, rx_status, efx->net_dev,
+					   "expired filter %d [channel %u flow %u]\n",
+					   index, channel->channel, flow_id);
+				channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID;
+				channel->rfs_filter_count--;
+			}
+		}
+		if (++index == size)
+			index = 0;
+		/* If we were called with a quota that exceeds the total number
+		 * of filters in the table (which shouldn't happen, but could
+		 * if two callers race), ensure that we don't loop forever -
+		 * stop when we've examined every row of the table.
+		 */
+		if (index == start)
+			break;
+	}
+
+	channel->rfs_expire_index = index;
+	mutex_unlock(&efx->rps_mutex);
+	return true;
+}
+
+#endif /* CONFIG_RFS_ACCEL */
diff --git a/drivers/net/ethernet/sfc/siena/rx_common.h b/drivers/net/ethernet/sfc/siena/rx_common.h
new file mode 100644
index 000000000000..fbd2769307f9
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/rx_common.h
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_RX_COMMON_H
+#define EFX_RX_COMMON_H
+
+/* Preferred number of descriptors to fill at once */
+#define EFX_RX_PREFERRED_BATCH 8U
+
+/* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */
+#define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \
+				      EFX_RX_USR_BUF_SIZE)
+
+/* Number of RX buffers to recycle pages for.  When creating the RX page recycle
+ * ring, this number is divided by the number of buffers per page to calculate
+ * the number of pages to store in the RX page recycle ring.
+ */
+#define EFX_RECYCLE_RING_SIZE_10G	256
+
+static inline u8 *efx_rx_buf_va(struct efx_rx_buffer *buf)
+{
+	return page_address(buf->page) + buf->page_offset;
+}
+
+static inline u32 efx_rx_buf_hash(struct efx_nic *efx, const u8 *eh)
+{
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+	return __le32_to_cpup((const __le32 *)(eh + efx->rx_packet_hash_offset));
+#else
+	const u8 *data = eh + efx->rx_packet_hash_offset;
+
+	return (u32)data[0]	  |
+	       (u32)data[1] << 8  |
+	       (u32)data[2] << 16 |
+	       (u32)data[3] << 24;
+#endif
+}
+
+void efx_rx_slow_fill(struct timer_list *t);
+
+void efx_recycle_rx_pages(struct efx_channel *channel,
+			  struct efx_rx_buffer *rx_buf,
+			  unsigned int n_frags);
+void efx_discard_rx_packet(struct efx_channel *channel,
+			   struct efx_rx_buffer *rx_buf,
+			   unsigned int n_frags);
+
+int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
+void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
+void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
+void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
+void efx_destroy_rx_queue(struct efx_rx_queue *rx_queue);
+
+void efx_init_rx_buffer(struct efx_rx_queue *rx_queue,
+			struct page *page,
+			unsigned int page_offset,
+			u16 flags);
+void efx_unmap_rx_buffer(struct efx_nic *efx, struct efx_rx_buffer *rx_buf);
+
+static inline void efx_sync_rx_buffer(struct efx_nic *efx,
+				      struct efx_rx_buffer *rx_buf,
+				      unsigned int len)
+{
+	dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr, len,
+				DMA_FROM_DEVICE);
+}
+
+void efx_free_rx_buffers(struct efx_rx_queue *rx_queue,
+			 struct efx_rx_buffer *rx_buf,
+			 unsigned int num_bufs);
+
+void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
+void efx_rx_config_page_split(struct efx_nic *efx);
+void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic);
+
+void
+efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
+		  unsigned int n_frags, u8 *eh, __wsum csum);
+
+struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx);
+struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id);
+void efx_free_rss_context_entry(struct efx_rss_context *ctx);
+void efx_set_default_rx_indir_table(struct efx_nic *efx,
+				    struct efx_rss_context *ctx);
+
+bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec);
+bool efx_filter_spec_equal(const struct efx_filter_spec *left,
+			   const struct efx_filter_spec *right);
+u32 efx_filter_spec_hash(const struct efx_filter_spec *spec);
+
+#ifdef CONFIG_RFS_ACCEL
+bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx,
+			bool *force);
+struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx,
+					const struct efx_filter_spec *spec);
+struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx,
+				       const struct efx_filter_spec *spec,
+				       bool *new);
+void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec);
+
+int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
+		   u16 rxq_index, u32 flow_id);
+bool __efx_filter_rfs_expire(struct efx_channel *channel, unsigned int quota);
+#endif
+
+int efx_probe_filters(struct efx_nic *efx);
+void efx_remove_filters(struct efx_nic *efx);
+
+#endif
diff --git a/drivers/net/ethernet/sfc/siena/selftest.c b/drivers/net/ethernet/sfc/siena/selftest.c
new file mode 100644
index 000000000000..3c5227afd497
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/selftest.c
@@ -0,0 +1,807 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2012 Solarflare Communications Inc.
+ */
+
+#include <linux/netdevice.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/kernel_stat.h>
+#include <linux/pci.h>
+#include <linux/ethtool.h>
+#include <linux/ip.h>
+#include <linux/in.h>
+#include <linux/udp.h>
+#include <linux/rtnetlink.h>
+#include <linux/slab.h>
+#include "net_driver.h"
+#include "efx.h"
+#include "efx_common.h"
+#include "efx_channels.h"
+#include "nic.h"
+#include "mcdi_port_common.h"
+#include "selftest.h"
+#include "workarounds.h"
+
+/* IRQ latency can be enormous because:
+ * - All IRQs may be disabled on a CPU for a *long* time by e.g. a
+ *   slow serial console or an old IDE driver doing error recovery
+ * - The PREEMPT_RT patches mostly deal with this, but also allow a
+ *   tasklet or normal task to be given higher priority than our IRQ
+ *   threads
+ * Try to avoid blaming the hardware for this.
+ */
+#define IRQ_TIMEOUT HZ
+
+/*
+ * Loopback test packet structure
+ *
+ * The self-test should stress every RSS vector, and unfortunately
+ * Falcon only performs RSS on TCP/UDP packets.
+ */
+struct efx_loopback_payload {
+	struct ethhdr header;
+	struct iphdr ip;
+	struct udphdr udp;
+	__be16 iteration;
+	char msg[64];
+} __packed;
+
+/* Loopback test source MAC address */
+static const u8 payload_source[ETH_ALEN] __aligned(2) = {
+	0x00, 0x0f, 0x53, 0x1b, 0x1b, 0x1b,
+};
+
+static const char payload_msg[] =
+	"Hello world! This is an Efx loopback test in progress!";
+
+/* Interrupt mode names */
+static const unsigned int efx_interrupt_mode_max = EFX_INT_MODE_MAX;
+static const char *const efx_interrupt_mode_names[] = {
+	[EFX_INT_MODE_MSIX]   = "MSI-X",
+	[EFX_INT_MODE_MSI]    = "MSI",
+	[EFX_INT_MODE_LEGACY] = "legacy",
+};
+#define INT_MODE(efx) \
+	STRING_TABLE_LOOKUP(efx->interrupt_mode, efx_interrupt_mode)
+
+/**
+ * struct efx_loopback_state - persistent state during a loopback selftest
+ * @flush:		Drop all packets in efx_loopback_rx_packet
+ * @packet_count:	Number of packets being used in this test
+ * @skbs:		An array of skbs transmitted
+ * @offload_csum:	Checksums are being offloaded
+ * @rx_good:		RX good packet count
+ * @rx_bad:		RX bad packet count
+ * @payload:		Payload used in tests
+ */
+struct efx_loopback_state {
+	bool flush;
+	int packet_count;
+	struct sk_buff **skbs;
+	bool offload_csum;
+	atomic_t rx_good;
+	atomic_t rx_bad;
+	struct efx_loopback_payload payload;
+};
+
+/* How long to wait for all the packets to arrive (in ms) */
+#define LOOPBACK_TIMEOUT_MS 1000
+
+/**************************************************************************
+ *
+ * MII, NVRAM and register tests
+ *
+ **************************************************************************/
+
+static int efx_test_phy_alive(struct efx_nic *efx, struct efx_self_tests *tests)
+{
+	int rc = 0;
+
+	rc = efx_mcdi_phy_test_alive(efx);
+	tests->phy_alive = rc ? -1 : 1;
+
+	return rc;
+}
+
+static int efx_test_nvram(struct efx_nic *efx, struct efx_self_tests *tests)
+{
+	int rc = 0;
+
+	if (efx->type->test_nvram) {
+		rc = efx->type->test_nvram(efx);
+		if (rc == -EPERM)
+			rc = 0;
+		else
+			tests->nvram = rc ? -1 : 1;
+	}
+
+	return rc;
+}
+
+/**************************************************************************
+ *
+ * Interrupt and event queue testing
+ *
+ **************************************************************************/
+
+/* Test generation and receipt of interrupts */
+static int efx_test_interrupts(struct efx_nic *efx,
+			       struct efx_self_tests *tests)
+{
+	unsigned long timeout, wait;
+	int cpu;
+	int rc;
+
+	netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n");
+	tests->interrupt = -1;
+
+	rc = efx_nic_irq_test_start(efx);
+	if (rc == -ENOTSUPP) {
+		netif_dbg(efx, drv, efx->net_dev,
+			  "direct interrupt testing not supported\n");
+		tests->interrupt = 0;
+		return 0;
+	}
+
+	timeout = jiffies + IRQ_TIMEOUT;
+	wait = 1;
+
+	/* Wait for arrival of test interrupt. */
+	netif_dbg(efx, drv, efx->net_dev, "waiting for test interrupt\n");
+	do {
+		schedule_timeout_uninterruptible(wait);
+		cpu = efx_nic_irq_test_irq_cpu(efx);
+		if (cpu >= 0)
+			goto success;
+		wait *= 2;
+	} while (time_before(jiffies, timeout));
+
+	netif_err(efx, drv, efx->net_dev, "timed out waiting for interrupt\n");
+	return -ETIMEDOUT;
+
+ success:
+	netif_dbg(efx, drv, efx->net_dev, "%s test interrupt seen on CPU%d\n",
+		  INT_MODE(efx), cpu);
+	tests->interrupt = 1;
+	return 0;
+}
+
+/* Test generation and receipt of interrupting events */
+static int efx_test_eventq_irq(struct efx_nic *efx,
+			       struct efx_self_tests *tests)
+{
+	struct efx_channel *channel;
+	unsigned int read_ptr[EFX_MAX_CHANNELS];
+	unsigned long napi_ran = 0, dma_pend = 0, int_pend = 0;
+	unsigned long timeout, wait;
+
+	BUILD_BUG_ON(EFX_MAX_CHANNELS > BITS_PER_LONG);
+
+	efx_for_each_channel(channel, efx) {
+		read_ptr[channel->channel] = channel->eventq_read_ptr;
+		set_bit(channel->channel, &dma_pend);
+		set_bit(channel->channel, &int_pend);
+		efx_nic_event_test_start(channel);
+	}
+
+	timeout = jiffies + IRQ_TIMEOUT;
+	wait = 1;
+
+	/* Wait for arrival of interrupts.  NAPI processing may or may
+	 * not complete in time, but we can cope in any case.
+	 */
+	do {
+		schedule_timeout_uninterruptible(wait);
+
+		efx_for_each_channel(channel, efx) {
+			efx_stop_eventq(channel);
+			if (channel->eventq_read_ptr !=
+			    read_ptr[channel->channel]) {
+				set_bit(channel->channel, &napi_ran);
+				clear_bit(channel->channel, &dma_pend);
+				clear_bit(channel->channel, &int_pend);
+			} else {
+				if (efx_nic_event_present(channel))
+					clear_bit(channel->channel, &dma_pend);
+				if (efx_nic_event_test_irq_cpu(channel) >= 0)
+					clear_bit(channel->channel, &int_pend);
+			}
+			efx_start_eventq(channel);
+		}
+
+		wait *= 2;
+	} while ((dma_pend || int_pend) && time_before(jiffies, timeout));
+
+	efx_for_each_channel(channel, efx) {
+		bool dma_seen = !test_bit(channel->channel, &dma_pend);
+		bool int_seen = !test_bit(channel->channel, &int_pend);
+
+		tests->eventq_dma[channel->channel] = dma_seen ? 1 : -1;
+		tests->eventq_int[channel->channel] = int_seen ? 1 : -1;
+
+		if (dma_seen && int_seen) {
+			netif_dbg(efx, drv, efx->net_dev,
+				  "channel %d event queue passed (with%s NAPI)\n",
+				  channel->channel,
+				  test_bit(channel->channel, &napi_ran) ?
+				  "" : "out");
+		} else {
+			/* Report failure and whether either interrupt or DMA
+			 * worked
+			 */
+			netif_err(efx, drv, efx->net_dev,
+				  "channel %d timed out waiting for event queue\n",
+				  channel->channel);
+			if (int_seen)
+				netif_err(efx, drv, efx->net_dev,
+					  "channel %d saw interrupt "
+					  "during event queue test\n",
+					  channel->channel);
+			if (dma_seen)
+				netif_err(efx, drv, efx->net_dev,
+					  "channel %d event was generated, but "
+					  "failed to trigger an interrupt\n",
+					  channel->channel);
+		}
+	}
+
+	return (dma_pend || int_pend) ? -ETIMEDOUT : 0;
+}
+
+static int efx_test_phy(struct efx_nic *efx, struct efx_self_tests *tests,
+			unsigned flags)
+{
+	int rc;
+
+	mutex_lock(&efx->mac_lock);
+	rc = efx_mcdi_phy_run_tests(efx, tests->phy_ext, flags);
+	mutex_unlock(&efx->mac_lock);
+	if (rc == -EPERM)
+		rc = 0;
+	else
+		netif_info(efx, drv, efx->net_dev,
+			   "%s phy selftest\n", rc ? "Failed" : "Passed");
+
+	return rc;
+}
+
+/**************************************************************************
+ *
+ * Loopback testing
+ * NB Only one loopback test can be executing concurrently.
+ *
+ **************************************************************************/
+
+/* Loopback test RX callback
+ * This is called for each received packet during loopback testing.
+ */
+void efx_loopback_rx_packet(struct efx_nic *efx,
+			    const char *buf_ptr, int pkt_len)
+{
+	struct efx_loopback_state *state = efx->loopback_selftest;
+	struct efx_loopback_payload *received;
+	struct efx_loopback_payload *payload;
+
+	BUG_ON(!buf_ptr);
+
+	/* If we are just flushing, then drop the packet */
+	if ((state == NULL) || state->flush)
+		return;
+
+	payload = &state->payload;
+
+	received = (struct efx_loopback_payload *) buf_ptr;
+	received->ip.saddr = payload->ip.saddr;
+	if (state->offload_csum)
+		received->ip.check = payload->ip.check;
+
+	/* Check that header exists */
+	if (pkt_len < sizeof(received->header)) {
+		netif_err(efx, drv, efx->net_dev,
+			  "saw runt RX packet (length %d) in %s loopback "
+			  "test\n", pkt_len, LOOPBACK_MODE(efx));
+		goto err;
+	}
+
+	/* Check that the ethernet header exists */
+	if (memcmp(&received->header, &payload->header, ETH_HLEN) != 0) {
+		netif_err(efx, drv, efx->net_dev,
+			  "saw non-loopback RX packet in %s loopback test\n",
+			  LOOPBACK_MODE(efx));
+		goto err;
+	}
+
+	/* Check packet length */
+	if (pkt_len != sizeof(*payload)) {
+		netif_err(efx, drv, efx->net_dev,
+			  "saw incorrect RX packet length %d (wanted %d) in "
+			  "%s loopback test\n", pkt_len, (int)sizeof(*payload),
+			  LOOPBACK_MODE(efx));
+		goto err;
+	}
+
+	/* Check that IP header matches */
+	if (memcmp(&received->ip, &payload->ip, sizeof(payload->ip)) != 0) {
+		netif_err(efx, drv, efx->net_dev,
+			  "saw corrupted IP header in %s loopback test\n",
+			  LOOPBACK_MODE(efx));
+		goto err;
+	}
+
+	/* Check that msg and padding matches */
+	if (memcmp(&received->msg, &payload->msg, sizeof(received->msg)) != 0) {
+		netif_err(efx, drv, efx->net_dev,
+			  "saw corrupted RX packet in %s loopback test\n",
+			  LOOPBACK_MODE(efx));
+		goto err;
+	}
+
+	/* Check that iteration matches */
+	if (received->iteration != payload->iteration) {
+		netif_err(efx, drv, efx->net_dev,
+			  "saw RX packet from iteration %d (wanted %d) in "
+			  "%s loopback test\n", ntohs(received->iteration),
+			  ntohs(payload->iteration), LOOPBACK_MODE(efx));
+		goto err;
+	}
+
+	/* Increase correct RX count */
+	netif_vdbg(efx, drv, efx->net_dev,
+		   "got loopback RX in %s loopback test\n", LOOPBACK_MODE(efx));
+
+	atomic_inc(&state->rx_good);
+	return;
+
+ err:
+#ifdef DEBUG
+	if (atomic_read(&state->rx_bad) == 0) {
+		netif_err(efx, drv, efx->net_dev, "received packet:\n");
+		print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1,
+			       buf_ptr, pkt_len, 0);
+		netif_err(efx, drv, efx->net_dev, "expected packet:\n");
+		print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1,
+			       &state->payload, sizeof(state->payload), 0);
+	}
+#endif
+	atomic_inc(&state->rx_bad);
+}
+
+/* Initialise an efx_selftest_state for a new iteration */
+static void efx_iterate_state(struct efx_nic *efx)
+{
+	struct efx_loopback_state *state = efx->loopback_selftest;
+	struct net_device *net_dev = efx->net_dev;
+	struct efx_loopback_payload *payload = &state->payload;
+
+	/* Initialise the layerII header */
+	ether_addr_copy((u8 *)&payload->header.h_dest, net_dev->dev_addr);
+	ether_addr_copy((u8 *)&payload->header.h_source, payload_source);
+	payload->header.h_proto = htons(ETH_P_IP);
+
+	/* saddr set later and used as incrementing count */
+	payload->ip.daddr = htonl(INADDR_LOOPBACK);
+	payload->ip.ihl = 5;
+	payload->ip.check = (__force __sum16) htons(0xdead);
+	payload->ip.tot_len = htons(sizeof(*payload) - sizeof(struct ethhdr));
+	payload->ip.version = IPVERSION;
+	payload->ip.protocol = IPPROTO_UDP;
+
+	/* Initialise udp header */
+	payload->udp.source = 0;
+	payload->udp.len = htons(sizeof(*payload) - sizeof(struct ethhdr) -
+				 sizeof(struct iphdr));
+	payload->udp.check = 0;	/* checksum ignored */
+
+	/* Fill out payload */
+	payload->iteration = htons(ntohs(payload->iteration) + 1);
+	memcpy(&payload->msg, payload_msg, sizeof(payload_msg));
+
+	/* Fill out remaining state members */
+	atomic_set(&state->rx_good, 0);
+	atomic_set(&state->rx_bad, 0);
+	smp_wmb();
+}
+
+static int efx_begin_loopback(struct efx_tx_queue *tx_queue)
+{
+	struct efx_nic *efx = tx_queue->efx;
+	struct efx_loopback_state *state = efx->loopback_selftest;
+	struct efx_loopback_payload *payload;
+	struct sk_buff *skb;
+	int i;
+	netdev_tx_t rc;
+
+	/* Transmit N copies of buffer */
+	for (i = 0; i < state->packet_count; i++) {
+		/* Allocate an skb, holding an extra reference for
+		 * transmit completion counting */
+		skb = alloc_skb(sizeof(state->payload), GFP_KERNEL);
+		if (!skb)
+			return -ENOMEM;
+		state->skbs[i] = skb;
+		skb_get(skb);
+
+		/* Copy the payload in, incrementing the source address to
+		 * exercise the rss vectors */
+		payload = skb_put(skb, sizeof(state->payload));
+		memcpy(payload, &state->payload, sizeof(state->payload));
+		payload->ip.saddr = htonl(INADDR_LOOPBACK | (i << 2));
+
+		/* Ensure everything we've written is visible to the
+		 * interrupt handler. */
+		smp_wmb();
+
+		netif_tx_lock_bh(efx->net_dev);
+		rc = efx_enqueue_skb(tx_queue, skb);
+		netif_tx_unlock_bh(efx->net_dev);
+
+		if (rc != NETDEV_TX_OK) {
+			netif_err(efx, drv, efx->net_dev,
+				  "TX queue %d could not transmit packet %d of "
+				  "%d in %s loopback test\n", tx_queue->label,
+				  i + 1, state->packet_count,
+				  LOOPBACK_MODE(efx));
+
+			/* Defer cleaning up the other skbs for the caller */
+			kfree_skb(skb);
+			return -EPIPE;
+		}
+	}
+
+	return 0;
+}
+
+static int efx_poll_loopback(struct efx_nic *efx)
+{
+	struct efx_loopback_state *state = efx->loopback_selftest;
+
+	return atomic_read(&state->rx_good) == state->packet_count;
+}
+
+static int efx_end_loopback(struct efx_tx_queue *tx_queue,
+			    struct efx_loopback_self_tests *lb_tests)
+{
+	struct efx_nic *efx = tx_queue->efx;
+	struct efx_loopback_state *state = efx->loopback_selftest;
+	struct sk_buff *skb;
+	int tx_done = 0, rx_good, rx_bad;
+	int i, rc = 0;
+
+	netif_tx_lock_bh(efx->net_dev);
+
+	/* Count the number of tx completions, and decrement the refcnt. Any
+	 * skbs not already completed will be free'd when the queue is flushed */
+	for (i = 0; i < state->packet_count; i++) {
+		skb = state->skbs[i];
+		if (skb && !skb_shared(skb))
+			++tx_done;
+		dev_kfree_skb(skb);
+	}
+
+	netif_tx_unlock_bh(efx->net_dev);
+
+	/* Check TX completion and received packet counts */
+	rx_good = atomic_read(&state->rx_good);
+	rx_bad = atomic_read(&state->rx_bad);
+	if (tx_done != state->packet_count) {
+		/* Don't free the skbs; they will be picked up on TX
+		 * overflow or channel teardown.
+		 */
+		netif_err(efx, drv, efx->net_dev,
+			  "TX queue %d saw only %d out of an expected %d "
+			  "TX completion events in %s loopback test\n",
+			  tx_queue->label, tx_done, state->packet_count,
+			  LOOPBACK_MODE(efx));
+		rc = -ETIMEDOUT;
+		/* Allow to fall through so we see the RX errors as well */
+	}
+
+	/* We may always be up to a flush away from our desired packet total */
+	if (rx_good != state->packet_count) {
+		netif_dbg(efx, drv, efx->net_dev,
+			  "TX queue %d saw only %d out of an expected %d "
+			  "received packets in %s loopback test\n",
+			  tx_queue->label, rx_good, state->packet_count,
+			  LOOPBACK_MODE(efx));
+		rc = -ETIMEDOUT;
+		/* Fall through */
+	}
+
+	/* Update loopback test structure */
+	lb_tests->tx_sent[tx_queue->label] += state->packet_count;
+	lb_tests->tx_done[tx_queue->label] += tx_done;
+	lb_tests->rx_good += rx_good;
+	lb_tests->rx_bad += rx_bad;
+
+	return rc;
+}
+
+static int
+efx_test_loopback(struct efx_tx_queue *tx_queue,
+		  struct efx_loopback_self_tests *lb_tests)
+{
+	struct efx_nic *efx = tx_queue->efx;
+	struct efx_loopback_state *state = efx->loopback_selftest;
+	int i, begin_rc, end_rc;
+
+	for (i = 0; i < 3; i++) {
+		/* Determine how many packets to send */
+		state->packet_count = efx->txq_entries / 3;
+		state->packet_count = min(1 << (i << 2), state->packet_count);
+		state->skbs = kcalloc(state->packet_count,
+				      sizeof(state->skbs[0]), GFP_KERNEL);
+		if (!state->skbs)
+			return -ENOMEM;
+		state->flush = false;
+
+		netif_dbg(efx, drv, efx->net_dev,
+			  "TX queue %d (hw %d) testing %s loopback with %d packets\n",
+			  tx_queue->label, tx_queue->queue, LOOPBACK_MODE(efx),
+			  state->packet_count);
+
+		efx_iterate_state(efx);
+		begin_rc = efx_begin_loopback(tx_queue);
+
+		/* This will normally complete very quickly, but be
+		 * prepared to wait much longer. */
+		msleep(1);
+		if (!efx_poll_loopback(efx)) {
+			msleep(LOOPBACK_TIMEOUT_MS);
+			efx_poll_loopback(efx);
+		}
+
+		end_rc = efx_end_loopback(tx_queue, lb_tests);
+		kfree(state->skbs);
+
+		if (begin_rc || end_rc) {
+			/* Wait a while to ensure there are no packets
+			 * floating around after a failure. */
+			schedule_timeout_uninterruptible(HZ / 10);
+			return begin_rc ? begin_rc : end_rc;
+		}
+	}
+
+	netif_dbg(efx, drv, efx->net_dev,
+		  "TX queue %d passed %s loopback test with a burst length "
+		  "of %d packets\n", tx_queue->label, LOOPBACK_MODE(efx),
+		  state->packet_count);
+
+	return 0;
+}
+
+/* Wait for link up. On Falcon, we would prefer to rely on efx_monitor, but
+ * any contention on the mac lock (via e.g. efx_mac_mcast_work) causes it
+ * to delay and retry. Therefore, it's safer to just poll directly. Wait
+ * for link up and any faults to dissipate. */
+static int efx_wait_for_link(struct efx_nic *efx)
+{
+	struct efx_link_state *link_state = &efx->link_state;
+	int count, link_up_count = 0;
+	bool link_up;
+
+	for (count = 0; count < 40; count++) {
+		schedule_timeout_uninterruptible(HZ / 10);
+
+		if (efx->type->monitor != NULL) {
+			mutex_lock(&efx->mac_lock);
+			efx->type->monitor(efx);
+			mutex_unlock(&efx->mac_lock);
+		}
+
+		mutex_lock(&efx->mac_lock);
+		link_up = link_state->up;
+		if (link_up)
+			link_up = !efx->type->check_mac_fault(efx);
+		mutex_unlock(&efx->mac_lock);
+
+		if (link_up) {
+			if (++link_up_count == 2)
+				return 0;
+		} else {
+			link_up_count = 0;
+		}
+	}
+
+	return -ETIMEDOUT;
+}
+
+static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
+			      unsigned int loopback_modes)
+{
+	enum efx_loopback_mode mode;
+	struct efx_loopback_state *state;
+	struct efx_channel *channel =
+		efx_get_channel(efx, efx->tx_channel_offset);
+	struct efx_tx_queue *tx_queue;
+	int rc = 0;
+
+	/* Set the port loopback_selftest member. From this point on
+	 * all received packets will be dropped. Mark the state as
+	 * "flushing" so all inflight packets are dropped */
+	state = kzalloc(sizeof(*state), GFP_KERNEL);
+	if (state == NULL)
+		return -ENOMEM;
+	BUG_ON(efx->loopback_selftest);
+	state->flush = true;
+	efx->loopback_selftest = state;
+
+	/* Test all supported loopback modes */
+	for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) {
+		if (!(loopback_modes & (1 << mode)))
+			continue;
+
+		/* Move the port into the specified loopback mode. */
+		state->flush = true;
+		mutex_lock(&efx->mac_lock);
+		efx->loopback_mode = mode;
+		rc = __efx_reconfigure_port(efx);
+		mutex_unlock(&efx->mac_lock);
+		if (rc) {
+			netif_err(efx, drv, efx->net_dev,
+				  "unable to move into %s loopback\n",
+				  LOOPBACK_MODE(efx));
+			goto out;
+		}
+
+		rc = efx_wait_for_link(efx);
+		if (rc) {
+			netif_err(efx, drv, efx->net_dev,
+				  "loopback %s never came up\n",
+				  LOOPBACK_MODE(efx));
+			goto out;
+		}
+
+		/* Test all enabled types of TX queue */
+		efx_for_each_channel_tx_queue(tx_queue, channel) {
+			state->offload_csum = (tx_queue->type &
+					       EFX_TXQ_TYPE_OUTER_CSUM);
+			rc = efx_test_loopback(tx_queue,
+					       &tests->loopback[mode]);
+			if (rc)
+				goto out;
+		}
+	}
+
+ out:
+	/* Remove the flush. The caller will remove the loopback setting */
+	state->flush = true;
+	efx->loopback_selftest = NULL;
+	wmb();
+	kfree(state);
+
+	if (rc == -EPERM)
+		rc = 0;
+
+	return rc;
+}
+
+/**************************************************************************
+ *
+ * Entry point
+ *
+ *************************************************************************/
+
+int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
+		 unsigned flags)
+{
+	enum efx_loopback_mode loopback_mode = efx->loopback_mode;
+	int phy_mode = efx->phy_mode;
+	int rc_test = 0, rc_reset, rc;
+
+	efx_selftest_async_cancel(efx);
+
+	/* Online (i.e. non-disruptive) testing
+	 * This checks interrupt generation, event delivery and PHY presence. */
+
+	rc = efx_test_phy_alive(efx, tests);
+	if (rc && !rc_test)
+		rc_test = rc;
+
+	rc = efx_test_nvram(efx, tests);
+	if (rc && !rc_test)
+		rc_test = rc;
+
+	rc = efx_test_interrupts(efx, tests);
+	if (rc && !rc_test)
+		rc_test = rc;
+
+	rc = efx_test_eventq_irq(efx, tests);
+	if (rc && !rc_test)
+		rc_test = rc;
+
+	if (rc_test)
+		return rc_test;
+
+	if (!(flags & ETH_TEST_FL_OFFLINE))
+		return efx_test_phy(efx, tests, flags);
+
+	/* Offline (i.e. disruptive) testing
+	 * This checks MAC and PHY loopback on the specified port. */
+
+	/* Detach the device so the kernel doesn't transmit during the
+	 * loopback test and the watchdog timeout doesn't fire.
+	 */
+	efx_device_detach_sync(efx);
+
+	if (efx->type->test_chip) {
+		rc_reset = efx->type->test_chip(efx, tests);
+		if (rc_reset) {
+			netif_err(efx, hw, efx->net_dev,
+				  "Unable to recover from chip test\n");
+			efx_schedule_reset(efx, RESET_TYPE_DISABLE);
+			return rc_reset;
+		}
+
+		if ((tests->memory < 0 || tests->registers < 0) && !rc_test)
+			rc_test = -EIO;
+	}
+
+	/* Ensure that the phy is powered and out of loopback
+	 * for the bist and loopback tests */
+	mutex_lock(&efx->mac_lock);
+	efx->phy_mode &= ~PHY_MODE_LOW_POWER;
+	efx->loopback_mode = LOOPBACK_NONE;
+	__efx_reconfigure_port(efx);
+	mutex_unlock(&efx->mac_lock);
+
+	rc = efx_test_phy(efx, tests, flags);
+	if (rc && !rc_test)
+		rc_test = rc;
+
+	rc = efx_test_loopbacks(efx, tests, efx->loopback_modes);
+	if (rc && !rc_test)
+		rc_test = rc;
+
+	/* restore the PHY to the previous state */
+	mutex_lock(&efx->mac_lock);
+	efx->phy_mode = phy_mode;
+	efx->loopback_mode = loopback_mode;
+	__efx_reconfigure_port(efx);
+	mutex_unlock(&efx->mac_lock);
+
+	efx_device_attach_if_not_resetting(efx);
+
+	return rc_test;
+}
+
+void efx_selftest_async_start(struct efx_nic *efx)
+{
+	struct efx_channel *channel;
+
+	efx_for_each_channel(channel, efx)
+		efx_nic_event_test_start(channel);
+	schedule_delayed_work(&efx->selftest_work, IRQ_TIMEOUT);
+}
+
+void efx_selftest_async_cancel(struct efx_nic *efx)
+{
+	cancel_delayed_work_sync(&efx->selftest_work);
+}
+
+static void efx_selftest_async_work(struct work_struct *data)
+{
+	struct efx_nic *efx = container_of(data, struct efx_nic,
+					   selftest_work.work);
+	struct efx_channel *channel;
+	int cpu;
+
+	efx_for_each_channel(channel, efx) {
+		cpu = efx_nic_event_test_irq_cpu(channel);
+		if (cpu < 0)
+			netif_err(efx, ifup, efx->net_dev,
+				  "channel %d failed to trigger an interrupt\n",
+				  channel->channel);
+		else
+			netif_dbg(efx, ifup, efx->net_dev,
+				  "channel %d triggered interrupt on CPU %d\n",
+				  channel->channel, cpu);
+	}
+}
+
+void efx_selftest_async_init(struct efx_nic *efx)
+{
+	INIT_DELAYED_WORK(&efx->selftest_work, efx_selftest_async_work);
+}
diff --git a/drivers/net/ethernet/sfc/siena/selftest.h b/drivers/net/ethernet/sfc/siena/selftest.h
new file mode 100644
index 000000000000..a23f085bf298
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/selftest.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2012 Solarflare Communications Inc.
+ */
+
+#ifndef EFX_SELFTEST_H
+#define EFX_SELFTEST_H
+
+#include "net_driver.h"
+
+/*
+ * Self tests
+ */
+
+struct efx_loopback_self_tests {
+	int tx_sent[EFX_MAX_TXQ_PER_CHANNEL];
+	int tx_done[EFX_MAX_TXQ_PER_CHANNEL];
+	int rx_good;
+	int rx_bad;
+};
+
+#define EFX_MAX_PHY_TESTS 20
+
+/* Efx self test results
+ * For fields which are not counters, 1 indicates success and -1
+ * indicates failure; 0 indicates test could not be run.
+ */
+struct efx_self_tests {
+	/* online tests */
+	int phy_alive;
+	int nvram;
+	int interrupt;
+	int eventq_dma[EFX_MAX_CHANNELS];
+	int eventq_int[EFX_MAX_CHANNELS];
+	/* offline tests */
+	int memory;
+	int registers;
+	int phy_ext[EFX_MAX_PHY_TESTS];
+	struct efx_loopback_self_tests loopback[LOOPBACK_TEST_MAX + 1];
+};
+
+void efx_loopback_rx_packet(struct efx_nic *efx, const char *buf_ptr,
+			    int pkt_len);
+int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
+		 unsigned flags);
+void efx_selftest_async_init(struct efx_nic *efx);
+void efx_selftest_async_start(struct efx_nic *efx);
+void efx_selftest_async_cancel(struct efx_nic *efx);
+
+#endif /* EFX_SELFTEST_H */
diff --git a/drivers/net/ethernet/sfc/siena/sriov.c b/drivers/net/ethernet/sfc/siena/sriov.c
new file mode 100644
index 000000000000..3f241e6c881a
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/sriov.c
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2014-2015 Solarflare Communications Inc.
+ */
+#include <linux/module.h>
+#include "net_driver.h"
+#include "nic.h"
+#include "sriov.h"
+
+int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+
+	if (efx->type->sriov_set_vf_mac)
+		return efx->type->sriov_set_vf_mac(efx, vf_i, mac);
+	else
+		return -EOPNOTSUPP;
+}
+
+int efx_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i, u16 vlan,
+			  u8 qos, __be16 vlan_proto)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+
+	if (efx->type->sriov_set_vf_vlan) {
+		if ((vlan & ~VLAN_VID_MASK) ||
+		    (qos & ~(VLAN_PRIO_MASK >> VLAN_PRIO_SHIFT)))
+			return -EINVAL;
+
+		if (vlan_proto != htons(ETH_P_8021Q))
+			return -EPROTONOSUPPORT;
+
+		return efx->type->sriov_set_vf_vlan(efx, vf_i, vlan, qos);
+	} else {
+		return -EOPNOTSUPP;
+	}
+}
+
+int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i,
+			      bool spoofchk)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+
+	if (efx->type->sriov_set_vf_spoofchk)
+		return efx->type->sriov_set_vf_spoofchk(efx, vf_i, spoofchk);
+	else
+		return -EOPNOTSUPP;
+}
+
+int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
+			    struct ifla_vf_info *ivi)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+
+	if (efx->type->sriov_get_vf_config)
+		return efx->type->sriov_get_vf_config(efx, vf_i, ivi);
+	else
+		return -EOPNOTSUPP;
+}
+
+int efx_sriov_set_vf_link_state(struct net_device *net_dev, int vf_i,
+				int link_state)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+
+	if (efx->type->sriov_set_vf_link_state)
+		return efx->type->sriov_set_vf_link_state(efx, vf_i,
+							  link_state);
+	else
+		return -EOPNOTSUPP;
+}
diff --git a/drivers/net/ethernet/sfc/siena/sriov.h b/drivers/net/ethernet/sfc/siena/sriov.h
new file mode 100644
index 000000000000..747707bee483
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/sriov.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2014-2015 Solarflare Communications Inc.
+ */
+
+#ifndef EFX_SRIOV_H
+#define EFX_SRIOV_H
+
+#include "net_driver.h"
+
+#ifdef CONFIG_SFC_SRIOV
+
+int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac);
+int efx_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i, u16 vlan,
+			  u8 qos, __be16 vlan_proto);
+int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i,
+			      bool spoofchk);
+int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
+			    struct ifla_vf_info *ivi);
+int efx_sriov_set_vf_link_state(struct net_device *net_dev, int vf_i,
+				int link_state);
+#endif /* CONFIG_SFC_SRIOV */
+
+#endif /* EFX_SRIOV_H */
diff --git a/drivers/net/ethernet/sfc/siena/tx.c b/drivers/net/ethernet/sfc/siena/tx.c
new file mode 100644
index 000000000000..d16e031e95f4
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/tx.c
@@ -0,0 +1,639 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2005-2013 Solarflare Communications Inc.
+ */
+
+#include <linux/pci.h>
+#include <linux/tcp.h>
+#include <linux/ip.h>
+#include <linux/in.h>
+#include <linux/ipv6.h>
+#include <linux/slab.h>
+#include <net/ipv6.h>
+#include <linux/if_ether.h>
+#include <linux/highmem.h>
+#include <linux/cache.h>
+#include "net_driver.h"
+#include "efx.h"
+#include "io.h"
+#include "nic.h"
+#include "tx.h"
+#include "tx_common.h"
+#include "workarounds.h"
+#include "ef10_regs.h"
+
+#ifdef EFX_USE_PIO
+
+#define EFX_PIOBUF_SIZE_DEF ALIGN(256, L1_CACHE_BYTES)
+unsigned int efx_piobuf_size __read_mostly = EFX_PIOBUF_SIZE_DEF;
+
+#endif /* EFX_USE_PIO */
+
+static inline u8 *efx_tx_get_copy_buffer(struct efx_tx_queue *tx_queue,
+					 struct efx_tx_buffer *buffer)
+{
+	unsigned int index = efx_tx_queue_get_insert_index(tx_queue);
+	struct efx_buffer *page_buf =
+		&tx_queue->cb_page[index >> (PAGE_SHIFT - EFX_TX_CB_ORDER)];
+	unsigned int offset =
+		((index << EFX_TX_CB_ORDER) + NET_IP_ALIGN) & (PAGE_SIZE - 1);
+
+	if (unlikely(!page_buf->addr) &&
+	    efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE,
+				 GFP_ATOMIC))
+		return NULL;
+	buffer->dma_addr = page_buf->dma_addr + offset;
+	buffer->unmap_len = 0;
+	return (u8 *)page_buf->addr + offset;
+}
+
+u8 *efx_tx_get_copy_buffer_limited(struct efx_tx_queue *tx_queue,
+				   struct efx_tx_buffer *buffer, size_t len)
+{
+	if (len > EFX_TX_CB_SIZE)
+		return NULL;
+	return efx_tx_get_copy_buffer(tx_queue, buffer);
+}
+
+static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
+{
+	/* We need to consider all queues that the net core sees as one */
+	struct efx_nic *efx = txq1->efx;
+	struct efx_tx_queue *txq2;
+	unsigned int fill_level;
+
+	fill_level = efx_channel_tx_old_fill_level(txq1->channel);
+	if (likely(fill_level < efx->txq_stop_thresh))
+		return;
+
+	/* We used the stale old_read_count above, which gives us a
+	 * pessimistic estimate of the fill level (which may even
+	 * validly be >= efx->txq_entries).  Now try again using
+	 * read_count (more likely to be a cache miss).
+	 *
+	 * If we read read_count and then conditionally stop the
+	 * queue, it is possible for the completion path to race with
+	 * us and complete all outstanding descriptors in the middle,
+	 * after which there will be no more completions to wake it.
+	 * Therefore we stop the queue first, then read read_count
+	 * (with a memory barrier to ensure the ordering), then
+	 * restart the queue if the fill level turns out to be low
+	 * enough.
+	 */
+	netif_tx_stop_queue(txq1->core_txq);
+	smp_mb();
+	efx_for_each_channel_tx_queue(txq2, txq1->channel)
+		txq2->old_read_count = READ_ONCE(txq2->read_count);
+
+	fill_level = efx_channel_tx_old_fill_level(txq1->channel);
+	EFX_WARN_ON_ONCE_PARANOID(fill_level >= efx->txq_entries);
+	if (likely(fill_level < efx->txq_stop_thresh)) {
+		smp_mb();
+		if (likely(!efx->loopback_selftest))
+			netif_tx_start_queue(txq1->core_txq);
+	}
+}
+
+static int efx_enqueue_skb_copy(struct efx_tx_queue *tx_queue,
+				struct sk_buff *skb)
+{
+	unsigned int copy_len = skb->len;
+	struct efx_tx_buffer *buffer;
+	u8 *copy_buffer;
+	int rc;
+
+	EFX_WARN_ON_ONCE_PARANOID(copy_len > EFX_TX_CB_SIZE);
+
+	buffer = efx_tx_queue_get_insert_buffer(tx_queue);
+
+	copy_buffer = efx_tx_get_copy_buffer(tx_queue, buffer);
+	if (unlikely(!copy_buffer))
+		return -ENOMEM;
+
+	rc = skb_copy_bits(skb, 0, copy_buffer, copy_len);
+	EFX_WARN_ON_PARANOID(rc);
+	buffer->len = copy_len;
+
+	buffer->skb = skb;
+	buffer->flags = EFX_TX_BUF_SKB;
+
+	++tx_queue->insert_count;
+	return rc;
+}
+
+#ifdef EFX_USE_PIO
+
+struct efx_short_copy_buffer {
+	int used;
+	u8 buf[L1_CACHE_BYTES];
+};
+
+/* Copy to PIO, respecting that writes to PIO buffers must be dword aligned.
+ * Advances piobuf pointer. Leaves additional data in the copy buffer.
+ */
+static void efx_memcpy_toio_aligned(struct efx_nic *efx, u8 __iomem **piobuf,
+				    u8 *data, int len,
+				    struct efx_short_copy_buffer *copy_buf)
+{
+	int block_len = len & ~(sizeof(copy_buf->buf) - 1);
+
+	__iowrite64_copy(*piobuf, data, block_len >> 3);
+	*piobuf += block_len;
+	len -= block_len;
+
+	if (len) {
+		data += block_len;
+		BUG_ON(copy_buf->used);
+		BUG_ON(len > sizeof(copy_buf->buf));
+		memcpy(copy_buf->buf, data, len);
+		copy_buf->used = len;
+	}
+}
+
+/* Copy to PIO, respecting dword alignment, popping data from copy buffer first.
+ * Advances piobuf pointer. Leaves additional data in the copy buffer.
+ */
+static void efx_memcpy_toio_aligned_cb(struct efx_nic *efx, u8 __iomem **piobuf,
+				       u8 *data, int len,
+				       struct efx_short_copy_buffer *copy_buf)
+{
+	if (copy_buf->used) {
+		/* if the copy buffer is partially full, fill it up and write */
+		int copy_to_buf =
+			min_t(int, sizeof(copy_buf->buf) - copy_buf->used, len);
+
+		memcpy(copy_buf->buf + copy_buf->used, data, copy_to_buf);
+		copy_buf->used += copy_to_buf;
+
+		/* if we didn't fill it up then we're done for now */
+		if (copy_buf->used < sizeof(copy_buf->buf))
+			return;
+
+		__iowrite64_copy(*piobuf, copy_buf->buf,
+				 sizeof(copy_buf->buf) >> 3);
+		*piobuf += sizeof(copy_buf->buf);
+		data += copy_to_buf;
+		len -= copy_to_buf;
+		copy_buf->used = 0;
+	}
+
+	efx_memcpy_toio_aligned(efx, piobuf, data, len, copy_buf);
+}
+
+static void efx_flush_copy_buffer(struct efx_nic *efx, u8 __iomem *piobuf,
+				  struct efx_short_copy_buffer *copy_buf)
+{
+	/* if there's anything in it, write the whole buffer, including junk */
+	if (copy_buf->used)
+		__iowrite64_copy(piobuf, copy_buf->buf,
+				 sizeof(copy_buf->buf) >> 3);
+}
+
+/* Traverse skb structure and copy fragments in to PIO buffer.
+ * Advances piobuf pointer.
+ */
+static void efx_skb_copy_bits_to_pio(struct efx_nic *efx, struct sk_buff *skb,
+				     u8 __iomem **piobuf,
+				     struct efx_short_copy_buffer *copy_buf)
+{
+	int i;
+
+	efx_memcpy_toio_aligned(efx, piobuf, skb->data, skb_headlen(skb),
+				copy_buf);
+
+	for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
+		skb_frag_t *f = &skb_shinfo(skb)->frags[i];
+		u8 *vaddr;
+
+		vaddr = kmap_atomic(skb_frag_page(f));
+
+		efx_memcpy_toio_aligned_cb(efx, piobuf, vaddr + skb_frag_off(f),
+					   skb_frag_size(f), copy_buf);
+		kunmap_atomic(vaddr);
+	}
+
+	EFX_WARN_ON_ONCE_PARANOID(skb_shinfo(skb)->frag_list);
+}
+
+static int efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue,
+			       struct sk_buff *skb)
+{
+	struct efx_tx_buffer *buffer =
+		efx_tx_queue_get_insert_buffer(tx_queue);
+	u8 __iomem *piobuf = tx_queue->piobuf;
+
+	/* Copy to PIO buffer. Ensure the writes are padded to the end
+	 * of a cache line, as this is required for write-combining to be
+	 * effective on at least x86.
+	 */
+
+	if (skb_shinfo(skb)->nr_frags) {
+		/* The size of the copy buffer will ensure all writes
+		 * are the size of a cache line.
+		 */
+		struct efx_short_copy_buffer copy_buf;
+
+		copy_buf.used = 0;
+
+		efx_skb_copy_bits_to_pio(tx_queue->efx, skb,
+					 &piobuf, &copy_buf);
+		efx_flush_copy_buffer(tx_queue->efx, piobuf, &copy_buf);
+	} else {
+		/* Pad the write to the size of a cache line.
+		 * We can do this because we know the skb_shared_info struct is
+		 * after the source, and the destination buffer is big enough.
+		 */
+		BUILD_BUG_ON(L1_CACHE_BYTES >
+			     SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
+		__iowrite64_copy(tx_queue->piobuf, skb->data,
+				 ALIGN(skb->len, L1_CACHE_BYTES) >> 3);
+	}
+
+	buffer->skb = skb;
+	buffer->flags = EFX_TX_BUF_SKB | EFX_TX_BUF_OPTION;
+
+	EFX_POPULATE_QWORD_5(buffer->option,
+			     ESF_DZ_TX_DESC_IS_OPT, 1,
+			     ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_PIO,
+			     ESF_DZ_TX_PIO_CONT, 0,
+			     ESF_DZ_TX_PIO_BYTE_CNT, skb->len,
+			     ESF_DZ_TX_PIO_BUF_ADDR,
+			     tx_queue->piobuf_offset);
+	++tx_queue->insert_count;
+	return 0;
+}
+
+/* Decide whether we can use TX PIO, ie. write packet data directly into
+ * a buffer on the device.  This can reduce latency at the expense of
+ * throughput, so we only do this if both hardware and software TX rings
+ * are empty, including all queues for the channel.  This also ensures that
+ * only one packet at a time can be using the PIO buffer. If the xmit_more
+ * flag is set then we don't use this - there'll be another packet along
+ * shortly and we want to hold off the doorbell.
+ */
+static bool efx_tx_may_pio(struct efx_tx_queue *tx_queue)
+{
+	struct efx_channel *channel = tx_queue->channel;
+
+	if (!tx_queue->piobuf)
+		return false;
+
+	EFX_WARN_ON_ONCE_PARANOID(!channel->efx->type->option_descriptors);
+
+	efx_for_each_channel_tx_queue(tx_queue, channel)
+		if (!efx_nic_tx_is_empty(tx_queue, tx_queue->packet_write_count))
+			return false;
+
+	return true;
+}
+#endif /* EFX_USE_PIO */
+
+/* Send any pending traffic for a channel. xmit_more is shared across all
+ * queues for a channel, so we must check all of them.
+ */
+static void efx_tx_send_pending(struct efx_channel *channel)
+{
+	struct efx_tx_queue *q;
+
+	efx_for_each_channel_tx_queue(q, channel) {
+		if (q->xmit_pending)
+			efx_nic_push_buffers(q);
+	}
+}
+
+/*
+ * Add a socket buffer to a TX queue
+ *
+ * This maps all fragments of a socket buffer for DMA and adds them to
+ * the TX queue.  The queue's insert pointer will be incremented by
+ * the number of fragments in the socket buffer.
+ *
+ * If any DMA mapping fails, any mapped fragments will be unmapped,
+ * the queue's insert pointer will be restored to its original value.
+ *
+ * This function is split out from efx_hard_start_xmit to allow the
+ * loopback test to direct packets via specific TX queues.
+ *
+ * Returns NETDEV_TX_OK.
+ * You must hold netif_tx_lock() to call this function.
+ */
+netdev_tx_t __efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
+{
+	unsigned int old_insert_count = tx_queue->insert_count;
+	bool xmit_more = netdev_xmit_more();
+	bool data_mapped = false;
+	unsigned int segments;
+	unsigned int skb_len;
+	int rc;
+
+	skb_len = skb->len;
+	segments = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 0;
+	if (segments == 1)
+		segments = 0; /* Don't use TSO for a single segment. */
+
+	/* Handle TSO first - it's *possible* (although unlikely) that we might
+	 * be passed a packet to segment that's smaller than the copybreak/PIO
+	 * size limit.
+	 */
+	if (segments) {
+		switch (tx_queue->tso_version) {
+		case 1:
+			rc = efx_enqueue_skb_tso(tx_queue, skb, &data_mapped);
+			break;
+		case 2:
+			rc = efx_ef10_tx_tso_desc(tx_queue, skb, &data_mapped);
+			break;
+		case 0: /* No TSO on this queue, SW fallback needed */
+		default:
+			rc = -EINVAL;
+			break;
+		}
+		if (rc == -EINVAL) {
+			rc = efx_tx_tso_fallback(tx_queue, skb);
+			tx_queue->tso_fallbacks++;
+			if (rc == 0)
+				return 0;
+		}
+		if (rc)
+			goto err;
+#ifdef EFX_USE_PIO
+	} else if (skb_len <= efx_piobuf_size && !xmit_more &&
+		   efx_tx_may_pio(tx_queue)) {
+		/* Use PIO for short packets with an empty queue. */
+		if (efx_enqueue_skb_pio(tx_queue, skb))
+			goto err;
+		tx_queue->pio_packets++;
+		data_mapped = true;
+#endif
+	} else if (skb->data_len && skb_len <= EFX_TX_CB_SIZE) {
+		/* Pad short packets or coalesce short fragmented packets. */
+		if (efx_enqueue_skb_copy(tx_queue, skb))
+			goto err;
+		tx_queue->cb_packets++;
+		data_mapped = true;
+	}
+
+	/* Map for DMA and create descriptors if we haven't done so already. */
+	if (!data_mapped && (efx_tx_map_data(tx_queue, skb, segments)))
+		goto err;
+
+	efx_tx_maybe_stop_queue(tx_queue);
+
+	tx_queue->xmit_pending = true;
+
+	/* Pass off to hardware */
+	if (__netdev_tx_sent_queue(tx_queue->core_txq, skb_len, xmit_more))
+		efx_tx_send_pending(tx_queue->channel);
+
+	if (segments) {
+		tx_queue->tso_bursts++;
+		tx_queue->tso_packets += segments;
+		tx_queue->tx_packets  += segments;
+	} else {
+		tx_queue->tx_packets++;
+	}
+
+	return NETDEV_TX_OK;
+
+
+err:
+	efx_enqueue_unwind(tx_queue, old_insert_count);
+	dev_kfree_skb_any(skb);
+
+	/* If we're not expecting another transmit and we had something to push
+	 * on this queue or a partner queue then we need to push here to get the
+	 * previous packets out.
+	 */
+	if (!xmit_more)
+		efx_tx_send_pending(tx_queue->channel);
+
+	return NETDEV_TX_OK;
+}
+
+/* Transmit a packet from an XDP buffer
+ *
+ * Returns number of packets sent on success, error code otherwise.
+ * Runs in NAPI context, either in our poll (for XDP TX) or a different NIC
+ * (for XDP redirect).
+ */
+int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs,
+		       bool flush)
+{
+	struct efx_tx_buffer *tx_buffer;
+	struct efx_tx_queue *tx_queue;
+	struct xdp_frame *xdpf;
+	dma_addr_t dma_addr;
+	unsigned int len;
+	int space;
+	int cpu;
+	int i = 0;
+
+	if (unlikely(n && !xdpfs))
+		return -EINVAL;
+	if (unlikely(!n))
+		return 0;
+
+	cpu = raw_smp_processor_id();
+	if (unlikely(cpu >= efx->xdp_tx_queue_count))
+		return -EINVAL;
+
+	tx_queue = efx->xdp_tx_queues[cpu];
+	if (unlikely(!tx_queue))
+		return -EINVAL;
+
+	if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED)
+		HARD_TX_LOCK(efx->net_dev, tx_queue->core_txq, cpu);
+
+	/* If we're borrowing net stack queues we have to handle stop-restart
+	 * or we might block the queue and it will be considered as frozen
+	 */
+	if (efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_BORROWED) {
+		if (netif_tx_queue_stopped(tx_queue->core_txq))
+			goto unlock;
+		efx_tx_maybe_stop_queue(tx_queue);
+	}
+
+	/* Check for available space. We should never need multiple
+	 * descriptors per frame.
+	 */
+	space = efx->txq_entries +
+		tx_queue->read_count - tx_queue->insert_count;
+
+	for (i = 0; i < n; i++) {
+		xdpf = xdpfs[i];
+
+		if (i >= space)
+			break;
+
+		/* We'll want a descriptor for this tx. */
+		prefetchw(__efx_tx_queue_get_insert_buffer(tx_queue));
+
+		len = xdpf->len;
+
+		/* Map for DMA. */
+		dma_addr = dma_map_single(&efx->pci_dev->dev,
+					  xdpf->data, len,
+					  DMA_TO_DEVICE);
+		if (dma_mapping_error(&efx->pci_dev->dev, dma_addr))
+			break;
+
+		/*  Create descriptor and set up for unmapping DMA. */
+		tx_buffer = efx_tx_map_chunk(tx_queue, dma_addr, len);
+		tx_buffer->xdpf = xdpf;
+		tx_buffer->flags = EFX_TX_BUF_XDP |
+				   EFX_TX_BUF_MAP_SINGLE;
+		tx_buffer->dma_offset = 0;
+		tx_buffer->unmap_len = len;
+		tx_queue->tx_packets++;
+	}
+
+	/* Pass mapped frames to hardware. */
+	if (flush && i > 0)
+		efx_nic_push_buffers(tx_queue);
+
+unlock:
+	if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED)
+		HARD_TX_UNLOCK(efx->net_dev, tx_queue->core_txq);
+
+	return i == 0 ? -EIO : i;
+}
+
+/* Initiate a packet transmission.  We use one channel per CPU
+ * (sharing when we have more CPUs than channels).
+ *
+ * Context: non-blocking.
+ * Should always return NETDEV_TX_OK and consume the skb.
+ */
+netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
+				struct net_device *net_dev)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+	struct efx_tx_queue *tx_queue;
+	unsigned index, type;
+
+	EFX_WARN_ON_PARANOID(!netif_device_present(net_dev));
+
+	index = skb_get_queue_mapping(skb);
+	type = efx_tx_csum_type_skb(skb);
+	if (index >= efx->n_tx_channels) {
+		index -= efx->n_tx_channels;
+		type |= EFX_TXQ_TYPE_HIGHPRI;
+	}
+
+	/* PTP "event" packet */
+	if (unlikely(efx_xmit_with_hwtstamp(skb)) &&
+	    unlikely(efx_ptp_is_ptp_tx(efx, skb))) {
+		/* There may be existing transmits on the channel that are
+		 * waiting for this packet to trigger the doorbell write.
+		 * We need to send the packets at this point.
+		 */
+		efx_tx_send_pending(efx_get_tx_channel(efx, index));
+		return efx_ptp_tx(efx, skb);
+	}
+
+	tx_queue = efx_get_tx_queue(efx, index, type);
+	if (WARN_ON_ONCE(!tx_queue)) {
+		/* We don't have a TXQ of the right type.
+		 * This should never happen, as we don't advertise offload
+		 * features unless we can support them.
+		 */
+		dev_kfree_skb_any(skb);
+		/* If we're not expecting another transmit and we had something to push
+		 * on this queue or a partner queue then we need to push here to get the
+		 * previous packets out.
+		 */
+		if (!netdev_xmit_more())
+			efx_tx_send_pending(tx_queue->channel);
+		return NETDEV_TX_OK;
+	}
+
+	return __efx_enqueue_skb(tx_queue, skb);
+}
+
+void efx_xmit_done_single(struct efx_tx_queue *tx_queue)
+{
+	unsigned int pkts_compl = 0, bytes_compl = 0;
+	unsigned int read_ptr;
+	bool finished = false;
+
+	read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
+
+	while (!finished) {
+		struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
+
+		if (!efx_tx_buffer_in_use(buffer)) {
+			struct efx_nic *efx = tx_queue->efx;
+
+			netif_err(efx, hw, efx->net_dev,
+				  "TX queue %d spurious single TX completion\n",
+				  tx_queue->queue);
+			efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
+			return;
+		}
+
+		/* Need to check the flag before dequeueing. */
+		if (buffer->flags & EFX_TX_BUF_SKB)
+			finished = true;
+		efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
+
+		++tx_queue->read_count;
+		read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
+	}
+
+	tx_queue->pkts_compl += pkts_compl;
+	tx_queue->bytes_compl += bytes_compl;
+
+	EFX_WARN_ON_PARANOID(pkts_compl != 1);
+
+	efx_xmit_done_check_empty(tx_queue);
+}
+
+void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
+{
+	struct efx_nic *efx = tx_queue->efx;
+
+	/* Must be inverse of queue lookup in efx_hard_start_xmit() */
+	tx_queue->core_txq =
+		netdev_get_tx_queue(efx->net_dev,
+				    tx_queue->channel->channel +
+				    ((tx_queue->type & EFX_TXQ_TYPE_HIGHPRI) ?
+				     efx->n_tx_channels : 0));
+}
+
+int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
+		 void *type_data)
+{
+	struct efx_nic *efx = netdev_priv(net_dev);
+	struct tc_mqprio_qopt *mqprio = type_data;
+	unsigned tc, num_tc;
+
+	if (type != TC_SETUP_QDISC_MQPRIO)
+		return -EOPNOTSUPP;
+
+	/* Only Siena supported highpri queues */
+	if (efx_nic_rev(efx) > EFX_REV_SIENA_A0)
+		return -EOPNOTSUPP;
+
+	num_tc = mqprio->num_tc;
+
+	if (num_tc > EFX_MAX_TX_TC)
+		return -EINVAL;
+
+	mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+
+	if (num_tc == net_dev->num_tc)
+		return 0;
+
+	for (tc = 0; tc < num_tc; tc++) {
+		net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels;
+		net_dev->tc_to_txq[tc].count = efx->n_tx_channels;
+	}
+
+	net_dev->num_tc = num_tc;
+
+	return netif_set_real_num_tx_queues(net_dev,
+					    max_t(int, num_tc, 1) *
+					    efx->n_tx_channels);
+}
diff --git a/drivers/net/ethernet/sfc/siena/tx.h b/drivers/net/ethernet/sfc/siena/tx.h
new file mode 100644
index 000000000000..f2c4d2f89919
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/tx.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2015 Solarflare Communications Inc.
+ */
+
+#ifndef EFX_TX_H
+#define EFX_TX_H
+
+#include <linux/types.h>
+
+/* Driver internal tx-path related declarations. */
+
+unsigned int efx_tx_limit_len(struct efx_tx_queue *tx_queue,
+			      dma_addr_t dma_addr, unsigned int len);
+
+u8 *efx_tx_get_copy_buffer_limited(struct efx_tx_queue *tx_queue,
+				   struct efx_tx_buffer *buffer, size_t len);
+
+/* What TXQ type will satisfy the checksum offloads required for this skb? */
+static inline unsigned int efx_tx_csum_type_skb(struct sk_buff *skb)
+{
+	if (skb->ip_summed != CHECKSUM_PARTIAL)
+		return 0; /* no checksum offload */
+
+	if (skb->encapsulation &&
+	    skb_checksum_start_offset(skb) == skb_inner_transport_offset(skb)) {
+		/* we only advertise features for IPv4 and IPv6 checksums on
+		 * encapsulated packets, so if the checksum is for the inner
+		 * packet, it must be one of them; no further checking required.
+		 */
+
+		/* Do we also need to offload the outer header checksum? */
+		if (skb_shinfo(skb)->gso_segs > 1 &&
+		    !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
+		    (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
+			return EFX_TXQ_TYPE_OUTER_CSUM | EFX_TXQ_TYPE_INNER_CSUM;
+		return EFX_TXQ_TYPE_INNER_CSUM;
+	}
+
+	/* similarly, we only advertise features for IPv4 and IPv6 checksums,
+	 * so it must be one of them. No need for further checks.
+	 */
+	return EFX_TXQ_TYPE_OUTER_CSUM;
+}
+#endif /* EFX_TX_H */
diff --git a/drivers/net/ethernet/sfc/siena/tx_common.c b/drivers/net/ethernet/sfc/siena/tx_common.c
new file mode 100644
index 000000000000..d530cde2b864
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/tx_common.c
@@ -0,0 +1,447 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include "net_driver.h"
+#include "efx.h"
+#include "nic_common.h"
+#include "tx_common.h"
+
+static unsigned int efx_tx_cb_page_count(struct efx_tx_queue *tx_queue)
+{
+	return DIV_ROUND_UP(tx_queue->ptr_mask + 1,
+			    PAGE_SIZE >> EFX_TX_CB_ORDER);
+}
+
+int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
+{
+	struct efx_nic *efx = tx_queue->efx;
+	unsigned int entries;
+	int rc;
+
+	/* Create the smallest power-of-two aligned ring */
+	entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
+	EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
+	tx_queue->ptr_mask = entries - 1;
+
+	netif_dbg(efx, probe, efx->net_dev,
+		  "creating TX queue %d size %#x mask %#x\n",
+		  tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
+
+	/* Allocate software ring */
+	tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer),
+				   GFP_KERNEL);
+	if (!tx_queue->buffer)
+		return -ENOMEM;
+
+	tx_queue->cb_page = kcalloc(efx_tx_cb_page_count(tx_queue),
+				    sizeof(tx_queue->cb_page[0]), GFP_KERNEL);
+	if (!tx_queue->cb_page) {
+		rc = -ENOMEM;
+		goto fail1;
+	}
+
+	/* Allocate hardware ring, determine TXQ type */
+	rc = efx_nic_probe_tx(tx_queue);
+	if (rc)
+		goto fail2;
+
+	tx_queue->channel->tx_queue_by_type[tx_queue->type] = tx_queue;
+	return 0;
+
+fail2:
+	kfree(tx_queue->cb_page);
+	tx_queue->cb_page = NULL;
+fail1:
+	kfree(tx_queue->buffer);
+	tx_queue->buffer = NULL;
+	return rc;
+}
+
+void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
+{
+	struct efx_nic *efx = tx_queue->efx;
+
+	netif_dbg(efx, drv, efx->net_dev,
+		  "initialising TX queue %d\n", tx_queue->queue);
+
+	tx_queue->insert_count = 0;
+	tx_queue->notify_count = 0;
+	tx_queue->write_count = 0;
+	tx_queue->packet_write_count = 0;
+	tx_queue->old_write_count = 0;
+	tx_queue->read_count = 0;
+	tx_queue->old_read_count = 0;
+	tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
+	tx_queue->xmit_pending = false;
+	tx_queue->timestamping = (efx_ptp_use_mac_tx_timestamps(efx) &&
+				  tx_queue->channel == efx_ptp_channel(efx));
+	tx_queue->completed_timestamp_major = 0;
+	tx_queue->completed_timestamp_minor = 0;
+
+	tx_queue->xdp_tx = efx_channel_is_xdp_tx(tx_queue->channel);
+	tx_queue->tso_version = 0;
+
+	/* Set up TX descriptor ring */
+	efx_nic_init_tx(tx_queue);
+
+	tx_queue->initialised = true;
+}
+
+void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
+{
+	struct efx_tx_buffer *buffer;
+
+	netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
+		  "shutting down TX queue %d\n", tx_queue->queue);
+
+	if (!tx_queue->buffer)
+		return;
+
+	/* Free any buffers left in the ring */
+	while (tx_queue->read_count != tx_queue->write_count) {
+		unsigned int pkts_compl = 0, bytes_compl = 0;
+
+		buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
+		efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
+
+		++tx_queue->read_count;
+	}
+	tx_queue->xmit_pending = false;
+	netdev_tx_reset_queue(tx_queue->core_txq);
+}
+
+void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
+{
+	int i;
+
+	if (!tx_queue->buffer)
+		return;
+
+	netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
+		  "destroying TX queue %d\n", tx_queue->queue);
+	efx_nic_remove_tx(tx_queue);
+
+	if (tx_queue->cb_page) {
+		for (i = 0; i < efx_tx_cb_page_count(tx_queue); i++)
+			efx_nic_free_buffer(tx_queue->efx,
+					    &tx_queue->cb_page[i]);
+		kfree(tx_queue->cb_page);
+		tx_queue->cb_page = NULL;
+	}
+
+	kfree(tx_queue->buffer);
+	tx_queue->buffer = NULL;
+	tx_queue->channel->tx_queue_by_type[tx_queue->type] = NULL;
+}
+
+void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
+			struct efx_tx_buffer *buffer,
+			unsigned int *pkts_compl,
+			unsigned int *bytes_compl)
+{
+	if (buffer->unmap_len) {
+		struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
+		dma_addr_t unmap_addr = buffer->dma_addr - buffer->dma_offset;
+
+		if (buffer->flags & EFX_TX_BUF_MAP_SINGLE)
+			dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
+					 DMA_TO_DEVICE);
+		else
+			dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
+				       DMA_TO_DEVICE);
+		buffer->unmap_len = 0;
+	}
+
+	if (buffer->flags & EFX_TX_BUF_SKB) {
+		struct sk_buff *skb = (struct sk_buff *)buffer->skb;
+
+		EFX_WARN_ON_PARANOID(!pkts_compl || !bytes_compl);
+		(*pkts_compl)++;
+		(*bytes_compl) += skb->len;
+		if (tx_queue->timestamping &&
+		    (tx_queue->completed_timestamp_major ||
+		     tx_queue->completed_timestamp_minor)) {
+			struct skb_shared_hwtstamps hwtstamp;
+
+			hwtstamp.hwtstamp =
+				efx_ptp_nic_to_kernel_time(tx_queue);
+			skb_tstamp_tx(skb, &hwtstamp);
+
+			tx_queue->completed_timestamp_major = 0;
+			tx_queue->completed_timestamp_minor = 0;
+		}
+		dev_consume_skb_any((struct sk_buff *)buffer->skb);
+		netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
+			   "TX queue %d transmission id %x complete\n",
+			   tx_queue->queue, tx_queue->read_count);
+	} else if (buffer->flags & EFX_TX_BUF_XDP) {
+		xdp_return_frame_rx_napi(buffer->xdpf);
+	}
+
+	buffer->len = 0;
+	buffer->flags = 0;
+}
+
+/* Remove packets from the TX queue
+ *
+ * This removes packets from the TX queue, up to and including the
+ * specified index.
+ */
+static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
+				unsigned int index,
+				unsigned int *pkts_compl,
+				unsigned int *bytes_compl)
+{
+	struct efx_nic *efx = tx_queue->efx;
+	unsigned int stop_index, read_ptr;
+
+	stop_index = (index + 1) & tx_queue->ptr_mask;
+	read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
+
+	while (read_ptr != stop_index) {
+		struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
+
+		if (!efx_tx_buffer_in_use(buffer)) {
+			netif_err(efx, tx_err, efx->net_dev,
+				  "TX queue %d spurious TX completion id %d\n",
+				  tx_queue->queue, read_ptr);
+			efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
+			return;
+		}
+
+		efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
+
+		++tx_queue->read_count;
+		read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
+	}
+}
+
+void efx_xmit_done_check_empty(struct efx_tx_queue *tx_queue)
+{
+	if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
+		tx_queue->old_write_count = READ_ONCE(tx_queue->write_count);
+		if (tx_queue->read_count == tx_queue->old_write_count) {
+			/* Ensure that read_count is flushed. */
+			smp_mb();
+			tx_queue->empty_read_count =
+				tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
+		}
+	}
+}
+
+void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
+{
+	unsigned int fill_level, pkts_compl = 0, bytes_compl = 0;
+	struct efx_nic *efx = tx_queue->efx;
+
+	EFX_WARN_ON_ONCE_PARANOID(index > tx_queue->ptr_mask);
+
+	efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
+	tx_queue->pkts_compl += pkts_compl;
+	tx_queue->bytes_compl += bytes_compl;
+
+	if (pkts_compl > 1)
+		++tx_queue->merge_events;
+
+	/* See if we need to restart the netif queue.  This memory
+	 * barrier ensures that we write read_count (inside
+	 * efx_dequeue_buffers()) before reading the queue status.
+	 */
+	smp_mb();
+	if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
+	    likely(efx->port_enabled) &&
+	    likely(netif_device_present(efx->net_dev))) {
+		fill_level = efx_channel_tx_fill_level(tx_queue->channel);
+		if (fill_level <= efx->txq_wake_thresh)
+			netif_tx_wake_queue(tx_queue->core_txq);
+	}
+
+	efx_xmit_done_check_empty(tx_queue);
+}
+
+/* Remove buffers put into a tx_queue for the current packet.
+ * None of the buffers must have an skb attached.
+ */
+void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
+			unsigned int insert_count)
+{
+	struct efx_tx_buffer *buffer;
+	unsigned int bytes_compl = 0;
+	unsigned int pkts_compl = 0;
+
+	/* Work backwards until we hit the original insert pointer value */
+	while (tx_queue->insert_count != insert_count) {
+		--tx_queue->insert_count;
+		buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
+		efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
+	}
+}
+
+struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue,
+				       dma_addr_t dma_addr, size_t len)
+{
+	const struct efx_nic_type *nic_type = tx_queue->efx->type;
+	struct efx_tx_buffer *buffer;
+	unsigned int dma_len;
+
+	/* Map the fragment taking account of NIC-dependent DMA limits. */
+	do {
+		buffer = efx_tx_queue_get_insert_buffer(tx_queue);
+
+		if (nic_type->tx_limit_len)
+			dma_len = nic_type->tx_limit_len(tx_queue, dma_addr, len);
+		else
+			dma_len = len;
+
+		buffer->len = dma_len;
+		buffer->dma_addr = dma_addr;
+		buffer->flags = EFX_TX_BUF_CONT;
+		len -= dma_len;
+		dma_addr += dma_len;
+		++tx_queue->insert_count;
+	} while (len);
+
+	return buffer;
+}
+
+int efx_tx_tso_header_length(struct sk_buff *skb)
+{
+	size_t header_len;
+
+	if (skb->encapsulation)
+		header_len = skb_inner_transport_header(skb) -
+				skb->data +
+				(inner_tcp_hdr(skb)->doff << 2u);
+	else
+		header_len = skb_transport_header(skb) - skb->data +
+				(tcp_hdr(skb)->doff << 2u);
+	return header_len;
+}
+
+/* Map all data from an SKB for DMA and create descriptors on the queue. */
+int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
+		    unsigned int segment_count)
+{
+	struct efx_nic *efx = tx_queue->efx;
+	struct device *dma_dev = &efx->pci_dev->dev;
+	unsigned int frag_index, nr_frags;
+	dma_addr_t dma_addr, unmap_addr;
+	unsigned short dma_flags;
+	size_t len, unmap_len;
+
+	nr_frags = skb_shinfo(skb)->nr_frags;
+	frag_index = 0;
+
+	/* Map header data. */
+	len = skb_headlen(skb);
+	dma_addr = dma_map_single(dma_dev, skb->data, len, DMA_TO_DEVICE);
+	dma_flags = EFX_TX_BUF_MAP_SINGLE;
+	unmap_len = len;
+	unmap_addr = dma_addr;
+
+	if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
+		return -EIO;
+
+	if (segment_count) {
+		/* For TSO we need to put the header in to a separate
+		 * descriptor. Map this separately if necessary.
+		 */
+		size_t header_len = efx_tx_tso_header_length(skb);
+
+		if (header_len != len) {
+			tx_queue->tso_long_headers++;
+			efx_tx_map_chunk(tx_queue, dma_addr, header_len);
+			len -= header_len;
+			dma_addr += header_len;
+		}
+	}
+
+	/* Add descriptors for each fragment. */
+	do {
+		struct efx_tx_buffer *buffer;
+		skb_frag_t *fragment;
+
+		buffer = efx_tx_map_chunk(tx_queue, dma_addr, len);
+
+		/* The final descriptor for a fragment is responsible for
+		 * unmapping the whole fragment.
+		 */
+		buffer->flags = EFX_TX_BUF_CONT | dma_flags;
+		buffer->unmap_len = unmap_len;
+		buffer->dma_offset = buffer->dma_addr - unmap_addr;
+
+		if (frag_index >= nr_frags) {
+			/* Store SKB details with the final buffer for
+			 * the completion.
+			 */
+			buffer->skb = skb;
+			buffer->flags = EFX_TX_BUF_SKB | dma_flags;
+			return 0;
+		}
+
+		/* Move on to the next fragment. */
+		fragment = &skb_shinfo(skb)->frags[frag_index++];
+		len = skb_frag_size(fragment);
+		dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len,
+					    DMA_TO_DEVICE);
+		dma_flags = 0;
+		unmap_len = len;
+		unmap_addr = dma_addr;
+
+		if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
+			return -EIO;
+	} while (1);
+}
+
+unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
+{
+	/* Header and payload descriptor for each output segment, plus
+	 * one for every input fragment boundary within a segment
+	 */
+	unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
+
+	/* Possibly one more per segment for option descriptors */
+	if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
+		max_descs += EFX_TSO_MAX_SEGS;
+
+	/* Possibly more for PCIe page boundaries within input fragments */
+	if (PAGE_SIZE > EFX_PAGE_SIZE)
+		max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
+				   DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE));
+
+	return max_descs;
+}
+
+/*
+ * Fallback to software TSO.
+ *
+ * This is used if we are unable to send a GSO packet through hardware TSO.
+ * This should only ever happen due to per-queue restrictions - unsupported
+ * packets should first be filtered by the feature flags.
+ *
+ * Returns 0 on success, error code otherwise.
+ */
+int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
+{
+	struct sk_buff *segments, *next;
+
+	segments = skb_gso_segment(skb, 0);
+	if (IS_ERR(segments))
+		return PTR_ERR(segments);
+
+	dev_consume_skb_any(skb);
+
+	skb_list_walk_safe(segments, skb, next) {
+		skb_mark_not_on_list(skb);
+		efx_enqueue_skb(tx_queue, skb);
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ethernet/sfc/siena/tx_common.h b/drivers/net/ethernet/sfc/siena/tx_common.h
new file mode 100644
index 000000000000..bbab7f248250
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/tx_common.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2018 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_TX_COMMON_H
+#define EFX_TX_COMMON_H
+
+int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
+void efx_init_tx_queue(struct efx_tx_queue *tx_queue);
+void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
+void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
+
+void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
+			struct efx_tx_buffer *buffer,
+			unsigned int *pkts_compl,
+			unsigned int *bytes_compl);
+
+static inline bool efx_tx_buffer_in_use(struct efx_tx_buffer *buffer)
+{
+	return buffer->len || (buffer->flags & EFX_TX_BUF_OPTION);
+}
+
+void efx_xmit_done_check_empty(struct efx_tx_queue *tx_queue);
+void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
+
+void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
+			unsigned int insert_count);
+
+struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue,
+				       dma_addr_t dma_addr, size_t len);
+int efx_tx_tso_header_length(struct sk_buff *skb);
+int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
+		    unsigned int segment_count);
+
+unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
+int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
+
+extern bool efx_separate_tx_channels;
+#endif
diff --git a/drivers/net/ethernet/sfc/siena/vfdi.h b/drivers/net/ethernet/sfc/siena/vfdi.h
new file mode 100644
index 000000000000..480b872eb4d1
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/vfdi.h
@@ -0,0 +1,252 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2010-2012 Solarflare Communications Inc.
+ */
+#ifndef _VFDI_H
+#define _VFDI_H
+
+/**
+ * DOC: Virtual Function Driver Interface
+ *
+ * This file contains software structures used to form a two way
+ * communication channel between the VF driver and the PF driver,
+ * named Virtual Function Driver Interface (VFDI).
+ *
+ * For the purposes of VFDI, a page is a memory region with size and
+ * alignment of 4K.  All addresses are DMA addresses to be used within
+ * the domain of the relevant VF.
+ *
+ * The only hardware-defined channels for a VF driver to communicate
+ * with the PF driver are the event mailboxes (%FR_CZ_USR_EV
+ * registers).  Writing to these registers generates an event with
+ * EV_CODE = EV_CODE_USR_EV, USER_QID set to the index of the mailbox
+ * and USER_EV_REG_VALUE set to the value written.  The PF driver may
+ * direct or disable delivery of these events by setting
+ * %FR_CZ_USR_EV_CFG.
+ *
+ * The PF driver can send arbitrary events to arbitrary event queues.
+ * However, for consistency, VFDI events from the PF are defined to
+ * follow the same form and be sent to the first event queue assigned
+ * to the VF while that queue is enabled by the VF driver.
+ *
+ * The general form of the variable bits of VFDI events is:
+ *
+ *       0             16                       24   31
+ *      | DATA        | TYPE                   | SEQ   |
+ *
+ * SEQ is a sequence number which should be incremented by 1 (modulo
+ * 256) for each event.  The sequence numbers used in each direction
+ * are independent.
+ *
+ * The VF submits requests of type &struct vfdi_req by sending the
+ * address of the request (ADDR) in a series of 4 events:
+ *
+ *       0             16                       24   31
+ *      | ADDR[0:15]  | VFDI_EV_TYPE_REQ_WORD0 | SEQ   |
+ *      | ADDR[16:31] | VFDI_EV_TYPE_REQ_WORD1 | SEQ+1 |
+ *      | ADDR[32:47] | VFDI_EV_TYPE_REQ_WORD2 | SEQ+2 |
+ *      | ADDR[48:63] | VFDI_EV_TYPE_REQ_WORD3 | SEQ+3 |
+ *
+ * The address must be page-aligned.  After receiving such a valid
+ * series of events, the PF driver will attempt to read the request
+ * and write a response to the same address.  In case of an invalid
+ * sequence of events or a DMA error, there will be no response.
+ *
+ * The VF driver may request that the PF driver writes status
+ * information into its domain asynchronously.  After writing the
+ * status, the PF driver will send an event of the form:
+ *
+ *       0             16                       24   31
+ *      | reserved    | VFDI_EV_TYPE_STATUS    | SEQ   |
+ *
+ * In case the VF must be reset for any reason, the PF driver will
+ * send an event of the form:
+ *
+ *       0             16                       24   31
+ *      | reserved    | VFDI_EV_TYPE_RESET     | SEQ   |
+ *
+ * It is then the responsibility of the VF driver to request
+ * reinitialisation of its queues.
+ */
+#define VFDI_EV_SEQ_LBN 24
+#define VFDI_EV_SEQ_WIDTH 8
+#define VFDI_EV_TYPE_LBN 16
+#define VFDI_EV_TYPE_WIDTH 8
+#define VFDI_EV_TYPE_REQ_WORD0 0
+#define VFDI_EV_TYPE_REQ_WORD1 1
+#define VFDI_EV_TYPE_REQ_WORD2 2
+#define VFDI_EV_TYPE_REQ_WORD3 3
+#define VFDI_EV_TYPE_STATUS 4
+#define VFDI_EV_TYPE_RESET 5
+#define VFDI_EV_DATA_LBN 0
+#define VFDI_EV_DATA_WIDTH 16
+
+struct vfdi_endpoint {
+	u8 mac_addr[ETH_ALEN];
+	__be16 tci;
+};
+
+/**
+ * enum vfdi_op - VFDI operation enumeration
+ * @VFDI_OP_RESPONSE: Indicates a response to the request.
+ * @VFDI_OP_INIT_EVQ: Initialize SRAM entries and initialize an EVQ.
+ * @VFDI_OP_INIT_RXQ: Initialize SRAM entries and initialize an RXQ.
+ * @VFDI_OP_INIT_TXQ: Initialize SRAM entries and initialize a TXQ.
+ * @VFDI_OP_FINI_ALL_QUEUES: Flush all queues, finalize all queues, then
+ *	finalize the SRAM entries.
+ * @VFDI_OP_INSERT_FILTER: Insert a MAC filter targeting the given RXQ.
+ * @VFDI_OP_REMOVE_ALL_FILTERS: Remove all filters.
+ * @VFDI_OP_SET_STATUS_PAGE: Set the DMA page(s) used for status updates
+ *	from PF and write the initial status.
+ * @VFDI_OP_CLEAR_STATUS_PAGE: Clear the DMA page(s) used for status
+ *	updates from PF.
+ */
+enum vfdi_op {
+	VFDI_OP_RESPONSE = 0,
+	VFDI_OP_INIT_EVQ = 1,
+	VFDI_OP_INIT_RXQ = 2,
+	VFDI_OP_INIT_TXQ = 3,
+	VFDI_OP_FINI_ALL_QUEUES = 4,
+	VFDI_OP_INSERT_FILTER = 5,
+	VFDI_OP_REMOVE_ALL_FILTERS = 6,
+	VFDI_OP_SET_STATUS_PAGE = 7,
+	VFDI_OP_CLEAR_STATUS_PAGE = 8,
+	VFDI_OP_LIMIT,
+};
+
+/* Response codes for VFDI operations. Other values may be used in future. */
+#define VFDI_RC_SUCCESS		0
+#define VFDI_RC_ENOMEM		(-12)
+#define VFDI_RC_EINVAL		(-22)
+#define VFDI_RC_EOPNOTSUPP	(-95)
+#define VFDI_RC_ETIMEDOUT	(-110)
+
+/**
+ * struct vfdi_req - Request from VF driver to PF driver
+ * @op: Operation code or response indicator, taken from &enum vfdi_op.
+ * @rc: Response code.  Set to 0 on success or a negative error code on failure.
+ * @u.init_evq.index: Index of event queue to create.
+ * @u.init_evq.buf_count: Number of 4k buffers backing event queue.
+ * @u.init_evq.addr: Array of length %u.init_evq.buf_count containing DMA
+ *	address of each page backing the event queue.
+ * @u.init_rxq.index: Index of receive queue to create.
+ * @u.init_rxq.buf_count: Number of 4k buffers backing receive queue.
+ * @u.init_rxq.evq: Instance of event queue to target receive events at.
+ * @u.init_rxq.label: Label used in receive events.
+ * @u.init_rxq.flags: Unused.
+ * @u.init_rxq.addr: Array of length %u.init_rxq.buf_count containing DMA
+ *	address of each page backing the receive queue.
+ * @u.init_txq.index: Index of transmit queue to create.
+ * @u.init_txq.buf_count: Number of 4k buffers backing transmit queue.
+ * @u.init_txq.evq: Instance of event queue to target transmit completion
+ *	events at.
+ * @u.init_txq.label: Label used in transmit completion events.
+ * @u.init_txq.flags: Checksum offload flags.
+ * @u.init_txq.addr: Array of length %u.init_txq.buf_count containing DMA
+ *	address of each page backing the transmit queue.
+ * @u.mac_filter.rxq: Insert MAC filter at VF local address/VLAN targeting
+ *	all traffic at this receive queue.
+ * @u.mac_filter.flags: MAC filter flags.
+ * @u.set_status_page.dma_addr: Base address for the &struct vfdi_status.
+ *	This address must be page-aligned and the PF may write up to a
+ *	whole page (allowing for extension of the structure).
+ * @u.set_status_page.peer_page_count: Number of additional pages the VF
+ *	has provided into which peer addresses may be DMAd.
+ * @u.set_status_page.peer_page_addr: Array of DMA addresses of pages.
+ *	If the number of peers exceeds 256, then the VF must provide
+ *	additional pages in this array. The PF will then DMA up to
+ *	512 vfdi_endpoint structures into each page.  These addresses
+ *	must be page-aligned.
+ */
+struct vfdi_req {
+	u32 op;
+	u32 reserved1;
+	s32 rc;
+	u32 reserved2;
+	union {
+		struct {
+			u32 index;
+			u32 buf_count;
+			u64 addr[];
+		} init_evq;
+		struct {
+			u32 index;
+			u32 buf_count;
+			u32 evq;
+			u32 label;
+			u32 flags;
+#define VFDI_RXQ_FLAG_SCATTER_EN 1
+			u32 reserved;
+			u64 addr[];
+		} init_rxq;
+		struct {
+			u32 index;
+			u32 buf_count;
+			u32 evq;
+			u32 label;
+			u32 flags;
+#define VFDI_TXQ_FLAG_IP_CSUM_DIS 1
+#define VFDI_TXQ_FLAG_TCPUDP_CSUM_DIS 2
+			u32 reserved;
+			u64 addr[];
+		} init_txq;
+		struct {
+			u32 rxq;
+			u32 flags;
+#define VFDI_MAC_FILTER_FLAG_RSS 1
+#define VFDI_MAC_FILTER_FLAG_SCATTER 2
+		} mac_filter;
+		struct {
+			u64 dma_addr;
+			u64 peer_page_count;
+			u64 peer_page_addr[];
+		} set_status_page;
+	} u;
+};
+
+/**
+ * struct vfdi_status - Status provided by PF driver to VF driver
+ * @generation_start: A generation count DMA'd to VF *before* the
+ *	rest of the structure.
+ * @generation_end: A generation count DMA'd to VF *after* the
+ *	rest of the structure.
+ * @version: Version of this structure; currently set to 1.  Later
+ *	versions must either be layout-compatible or only be sent to VFs
+ *	that specifically request them.
+ * @length: Total length of this structure including embedded tables
+ * @vi_scale: log2 the number of VIs available on this VF. This quantity
+ *	is used by the hardware for register decoding.
+ * @max_tx_channels: The maximum number of transmit queues the VF can use.
+ * @rss_rxq_count: The number of receive queues present in the shared RSS
+ *	indirection table.
+ * @peer_count: Total number of peers in the complete peer list. If larger
+ *	than ARRAY_SIZE(%peers), then the VF must provide sufficient
+ *	additional pages each of which is filled with vfdi_endpoint structures.
+ * @local: The MAC address and outer VLAN tag of *this* VF
+ * @peers: Table of peer addresses.  The @tci fields in these structures
+ *	are currently unused and must be ignored.  Additional peers are
+ *	written into any additional pages provided by the VF.
+ * @timer_quantum_ns: Timer quantum (nominal period between timer ticks)
+ *	for interrupt moderation timers, in nanoseconds. This member is only
+ *	present if @length is sufficiently large.
+ */
+struct vfdi_status {
+	u32 generation_start;
+	u32 generation_end;
+	u32 version;
+	u32 length;
+	u8 vi_scale;
+	u8 max_tx_channels;
+	u8 rss_rxq_count;
+	u8 reserved1;
+	u16 peer_count;
+	u16 reserved2;
+	struct vfdi_endpoint local;
+	struct vfdi_endpoint peers[256];
+
+	/* Members below here extend version 1 of this structure */
+	u32 timer_quantum_ns;
+};
+
+#endif
diff --git a/drivers/net/ethernet/sfc/siena/workarounds.h b/drivers/net/ethernet/sfc/siena/workarounds.h
new file mode 100644
index 000000000000..815be2d20c4b
--- /dev/null
+++ b/drivers/net/ethernet/sfc/siena/workarounds.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2006-2013 Solarflare Communications Inc.
+ */
+
+#ifndef EFX_WORKAROUNDS_H
+#define EFX_WORKAROUNDS_H
+
+/*
+ * Hardware workarounds.
+ * Bug numbers are from Solarflare's Bugzilla.
+ */
+
+#define EFX_WORKAROUND_SIENA(efx) (efx_nic_rev(efx) == EFX_REV_SIENA_A0)
+#define EFX_WORKAROUND_EF10(efx) (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
+#define EFX_WORKAROUND_10G(efx) 1
+
+/* Bit-bashed I2C reads cause performance drop */
+#define EFX_WORKAROUND_7884 EFX_WORKAROUND_10G
+/* Legacy interrupt storm when interrupt fifo fills */
+#define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA
+
+/* Lockup when writing event block registers at gen2/gen3 */
+#define EFX_EF10_WORKAROUND_35388(efx)					\
+	(((struct efx_ef10_nic_data *)efx->nic_data)->workaround_35388)
+#define EFX_WORKAROUND_35388(efx)					\
+	(efx_nic_rev(efx) == EFX_REV_HUNT_A0 && EFX_EF10_WORKAROUND_35388(efx))
+
+/* Moderation timer access must go through MCDI */
+#define EFX_EF10_WORKAROUND_61265(efx)					\
+	(((struct efx_ef10_nic_data *)efx->nic_data)->workaround_61265)
+
+#endif /* EFX_WORKAROUNDS_H */

Powered by blists - more mailing lists