lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <1484241755-17603-2-git-send-email-saeedm@mellanox.com>
Date:   Thu, 12 Jan 2017 19:22:35 +0200
From:   Saeed Mahameed <saeedm@...lanox.com>
To:     "David S. Miller" <davem@...emloft.net>,
        Doug Ledford <dledford@...hat.com>
Cc:     netdev@...r.kernel.org, linux-rdma@...r.kernel.org,
        Leon Romanovsky <leon@...nel.org>,
        Leon Romanovsky <leonro@...lanox.com>,
        Saeed Mahameed <saeedm@...lanox.com>
Subject: [for-next] {net, IB}/mlx5: Reorganize driver file layout

From: Leon Romanovsky <leonro@...lanox.com>

This patch organizes mlx5 driver file layout to better reflect mlx5
modularity and allow future separation between EN, IB and shared code
parts.

The new structure:
 * drivers/net/ethernet/mellanox/mlx5/*       - core HW/PCI driver logic
 * drivers/net/ethernet/mellanox/mlx5/en/*    - ethernet
 * drivers/net/ethernet/mellanox/mlx5/fs/*    - flow steering
 * drivers/net/ethernet/mellanox/mlx5/sriov/* - SR-IOV and E-Switch
 * drivers/net/ethernet/mellanox/mlx5/lib/*   - common mlx5 commands and API library

In future submissions, we will shrink the "lib" directory to the code
related to both subsystems only, while IB part will be moved to
drivers/infiniband/hw/mlx5, and EN will be moved to "en" directory.
Such separation will make this library (shared) code to be lean and minimal,
and help avoid future conflicts between IB and net submissions.

The proposed structure allows us to remove include/linux/mlx5,
which belongs solely to Mellanox's devices and don't need to
be exposed in common linux include directory.

The following change goes together with update of MAINTAINERS file to
more granular maintainership roles:
 * drivers/net/ethernet/mellanox/mlx5/*       - Saeed, Matan and Leon
 * drivers/net/ethernet/mellanox/mlx5/en/*    - Saeed
 * drivers/net/ethernet/mellanox/mlx5/fs/*    - Saeed, Matan and Leon
 * drivers/net/ethernet/mellanox/mlx5/sriov/* - Saeed
 * drivers/net/ethernet/mellanox/mlx5/lib/*   - Saeed, Matan and Leon
 * include/uapi/rdma/mlx5-abi.h               - Matan and Leon

Signed-off-by: Leon Romanovsky <leonro@...lanox.com>
Signed-off-by: Saeed Mahameed <saeedm@...lanox.com>
---
 MAINTAINERS                                        |   11 +-
 drivers/infiniband/hw/mlx5/Makefile                |    1 +
 drivers/infiniband/hw/mlx5/ib_virt.c               |    2 +-
 drivers/infiniband/hw/mlx5/mad.c                   |    4 +-
 drivers/infiniband/hw/mlx5/main.c                  |    6 +-
 drivers/infiniband/hw/mlx5/mlx5_ib.h               |   10 +-
 drivers/infiniband/hw/mlx5/srq.c                   |    4 +-
 drivers/net/ethernet/mellanox/Kconfig              |    2 +-
 drivers/net/ethernet/mellanox/Makefile             |    2 +-
 drivers/net/ethernet/mellanox/mlx5/Kconfig         |   32 +
 drivers/net/ethernet/mellanox/mlx5/Makefile        |   21 +
 drivers/net/ethernet/mellanox/mlx5/cmd.c           | 1804 +++++
 drivers/net/ethernet/mellanox/mlx5/core/Kconfig    |   32 -
 drivers/net/ethernet/mellanox/mlx5/core/Makefile   |   13 -
 drivers/net/ethernet/mellanox/mlx5/core/alloc.c    |  298 -
 drivers/net/ethernet/mellanox/mlx5/core/cmd.c      | 1805 -----
 drivers/net/ethernet/mellanox/mlx5/core/cq.c       |  290 -
 drivers/net/ethernet/mellanox/mlx5/core/debugfs.c  |  614 --
 drivers/net/ethernet/mellanox/mlx5/core/dev.c      |  378 -
 drivers/net/ethernet/mellanox/mlx5/core/en.h       |  932 ---
 drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c  |  749 --
 drivers/net/ethernet/mellanox/mlx5/core/en_clock.c |  297 -
 .../net/ethernet/mellanox/mlx5/core/en_common.c    |  159 -
 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c |  751 --
 .../net/ethernet/mellanox/mlx5/core/en_ethtool.c   | 1608 ----
 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c    | 1143 ---
 .../ethernet/mellanox/mlx5/core/en_fs_ethtool.c    |  587 --
 drivers/net/ethernet/mellanox/mlx5/core/en_main.c  | 4125 ----------
 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c   |  604 --
 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c    |  966 ---
 drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c |  335 -
 .../net/ethernet/mellanox/mlx5/core/en_selftest.c  |  352 -
 drivers/net/ethernet/mellanox/mlx5/core/en_stats.h |  430 -
 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c    | 1094 ---
 drivers/net/ethernet/mellanox/mlx5/core/en_tc.h    |   54 -
 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c    |  559 --
 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c  |  232 -
 drivers/net/ethernet/mellanox/mlx5/core/eq.c       |  782 --
 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c  | 2133 -----
 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h  |  340 -
 .../ethernet/mellanox/mlx5/core/eswitch_offloads.c | 1002 ---
 drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c   |  514 --
 drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h   |   92 -
 drivers/net/ethernet/mellanox/mlx5/core/fs_core.c  | 2039 -----
 drivers/net/ethernet/mellanox/mlx5/core/fs_core.h  |  239 -
 .../net/ethernet/mellanox/mlx5/core/fs_counters.c  |  319 -
 drivers/net/ethernet/mellanox/mlx5/core/fw.c       |  176 -
 drivers/net/ethernet/mellanox/mlx5/core/health.c   |  386 -
 drivers/net/ethernet/mellanox/mlx5/core/lag.c      |  588 --
 drivers/net/ethernet/mellanox/mlx5/core/mad.c      |   75 -
 drivers/net/ethernet/mellanox/mlx5/core/main.c     | 1575 ----
 drivers/net/ethernet/mellanox/mlx5/core/mcg.c      |   66 -
 .../net/ethernet/mellanox/mlx5/core/mlx5_core.h    |  156 -
 drivers/net/ethernet/mellanox/mlx5/core/mr.c       |  210 -
 .../net/ethernet/mellanox/mlx5/core/pagealloc.c    |  580 --
 drivers/net/ethernet/mellanox/mlx5/core/pd.c       |   62 -
 drivers/net/ethernet/mellanox/mlx5/core/port.c     |  868 --
 drivers/net/ethernet/mellanox/mlx5/core/qp.c       |  541 --
 drivers/net/ethernet/mellanox/mlx5/core/rl.c       |  269 -
 drivers/net/ethernet/mellanox/mlx5/core/sriov.c    |  236 -
 drivers/net/ethernet/mellanox/mlx5/core/srq.c      |  562 --
 drivers/net/ethernet/mellanox/mlx5/core/transobj.c |  400 -
 drivers/net/ethernet/mellanox/mlx5/core/uar.c      |  330 -
 drivers/net/ethernet/mellanox/mlx5/core/vport.c    | 1025 ---
 drivers/net/ethernet/mellanox/mlx5/core/vxlan.c    |  182 -
 drivers/net/ethernet/mellanox/mlx5/core/vxlan.h    |   62 -
 drivers/net/ethernet/mellanox/mlx5/core/wq.c       |  196 -
 drivers/net/ethernet/mellanox/mlx5/core/wq.h       |  184 -
 drivers/net/ethernet/mellanox/mlx5/debugfs.c       |  614 ++
 drivers/net/ethernet/mellanox/mlx5/dev.c           |  378 +
 drivers/net/ethernet/mellanox/mlx5/en/arfs.c       |  749 ++
 drivers/net/ethernet/mellanox/mlx5/en/clock.c      |  297 +
 drivers/net/ethernet/mellanox/mlx5/en/common.c     |  159 +
 drivers/net/ethernet/mellanox/mlx5/en/dcbnl.c      |  751 ++
 drivers/net/ethernet/mellanox/mlx5/en/en.h         |  932 +++
 drivers/net/ethernet/mellanox/mlx5/en/ethtool.c    | 1608 ++++
 drivers/net/ethernet/mellanox/mlx5/en/fs.c         | 1143 +++
 drivers/net/ethernet/mellanox/mlx5/en/fs_ethtool.c |  587 ++
 drivers/net/ethernet/mellanox/mlx5/en/main.c       | 4125 ++++++++++
 drivers/net/ethernet/mellanox/mlx5/en/rep.c        |  604 ++
 drivers/net/ethernet/mellanox/mlx5/en/rx.c         |  966 +++
 drivers/net/ethernet/mellanox/mlx5/en/rx_am.c      |  335 +
 drivers/net/ethernet/mellanox/mlx5/en/selftest.c   |  352 +
 drivers/net/ethernet/mellanox/mlx5/en/stats.h      |  430 +
 drivers/net/ethernet/mellanox/mlx5/en/tc.c         | 1094 +++
 drivers/net/ethernet/mellanox/mlx5/en/tc.h         |   54 +
 drivers/net/ethernet/mellanox/mlx5/en/tx.c         |  559 ++
 drivers/net/ethernet/mellanox/mlx5/en/txrx.c       |  232 +
 drivers/net/ethernet/mellanox/mlx5/en/vxlan.c      |  182 +
 drivers/net/ethernet/mellanox/mlx5/en/vxlan.h      |   62 +
 drivers/net/ethernet/mellanox/mlx5/en/wq.c         |  196 +
 drivers/net/ethernet/mellanox/mlx5/en/wq.h         |  184 +
 drivers/net/ethernet/mellanox/mlx5/eq.c            |  782 ++
 drivers/net/ethernet/mellanox/mlx5/fs/cmd.c        |  513 ++
 drivers/net/ethernet/mellanox/mlx5/fs/cmd.h        |   92 +
 drivers/net/ethernet/mellanox/mlx5/fs/core.c       | 2038 +++++
 drivers/net/ethernet/mellanox/mlx5/fs/core.h       |  239 +
 drivers/net/ethernet/mellanox/mlx5/fs/counters.c   |  319 +
 drivers/net/ethernet/mellanox/mlx5/health.c        |  386 +
 drivers/net/ethernet/mellanox/mlx5/lag.c           |  588 ++
 drivers/net/ethernet/mellanox/mlx5/lib/alloc.c     |  297 +
 drivers/net/ethernet/mellanox/mlx5/lib/cmd.h       |   51 +
 drivers/net/ethernet/mellanox/mlx5/lib/cq.c        |  290 +
 drivers/net/ethernet/mellanox/mlx5/lib/cq.h        |  184 +
 drivers/net/ethernet/mellanox/mlx5/lib/device.h    | 1100 +++
 drivers/net/ethernet/mellanox/mlx5/lib/doorbell.h  |   81 +
 drivers/net/ethernet/mellanox/mlx5/lib/driver.h    | 1127 +++
 drivers/net/ethernet/mellanox/mlx5/lib/fs.h        |  160 +
 drivers/net/ethernet/mellanox/mlx5/lib/fw.c        |  176 +
 drivers/net/ethernet/mellanox/mlx5/lib/mad.c       |   75 +
 drivers/net/ethernet/mellanox/mlx5/lib/mcg.c       |   66 +
 drivers/net/ethernet/mellanox/mlx5/lib/mlx5_ifc.h  | 8287 ++++++++++++++++++++
 drivers/net/ethernet/mellanox/mlx5/lib/mr.c        |  210 +
 drivers/net/ethernet/mellanox/mlx5/lib/pd.c        |   62 +
 drivers/net/ethernet/mellanox/mlx5/lib/port.c      |  868 ++
 drivers/net/ethernet/mellanox/mlx5/lib/port.h      |  167 +
 drivers/net/ethernet/mellanox/mlx5/lib/qp.c        |  540 ++
 drivers/net/ethernet/mellanox/mlx5/lib/qp.h        |  598 ++
 drivers/net/ethernet/mellanox/mlx5/lib/rl.c        |  269 +
 drivers/net/ethernet/mellanox/mlx5/lib/srq.c       |  562 ++
 drivers/net/ethernet/mellanox/mlx5/lib/srq.h       |   66 +
 drivers/net/ethernet/mellanox/mlx5/lib/transobj.c  |  400 +
 drivers/net/ethernet/mellanox/mlx5/lib/transobj.h  |   78 +
 drivers/net/ethernet/mellanox/mlx5/lib/uar.c       |  330 +
 drivers/net/ethernet/mellanox/mlx5/lib/vport.c     | 1025 +++
 drivers/net/ethernet/mellanox/mlx5/lib/vport.h     |  117 +
 drivers/net/ethernet/mellanox/mlx5/main.c          | 1575 ++++
 drivers/net/ethernet/mellanox/mlx5/mlx5.h          |  156 +
 drivers/net/ethernet/mellanox/mlx5/pagealloc.c     |  580 ++
 drivers/net/ethernet/mellanox/mlx5/sriov/eswitch.c | 2133 +++++
 drivers/net/ethernet/mellanox/mlx5/sriov/eswitch.h |  340 +
 .../mellanox/mlx5/sriov/eswitch_offloads.c         | 1002 +++
 drivers/net/ethernet/mellanox/mlx5/sriov/sriov.c   |  236 +
 include/linux/mlx5/cmd.h                           |   51 -
 include/linux/mlx5/cq.h                            |  184 -
 include/linux/mlx5/device.h                        | 1100 ---
 include/linux/mlx5/doorbell.h                      |   81 -
 include/linux/mlx5/driver.h                        | 1127 ---
 include/linux/mlx5/fs.h                            |  160 -
 include/linux/mlx5/mlx5_ifc.h                      | 8287 --------------------
 include/linux/mlx5/port.h                          |  167 -
 include/linux/mlx5/qp.h                            |  598 --
 include/linux/mlx5/srq.h                           |   66 -
 include/linux/mlx5/transobj.h                      |   78 -
 include/linux/mlx5/vport.h                         |  117 -
 145 files changed, 46637 insertions(+), 46632 deletions(-)
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/Kconfig
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/Makefile
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/cmd.c
 delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/Kconfig
 delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/Makefile
 delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/alloc.c
 delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/cmd.c
 delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/cq.c
 delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
 delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/dev.c
 delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en.h
 delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
 delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
 delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en_common.c
 delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
 delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
 delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
 delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
 delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en_main.c
 delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
 delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
 delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
 delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
 delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
 delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
 delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
 delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
 delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
 delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/eq.c
 delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
 delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
 delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
 delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
 delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
 delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
 delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
 delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
 delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/fw.c
 delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/health.c
 delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/lag.c
 delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/mad.c
 delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/main.c
 delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/mcg.c
 delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
 delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/mr.c
 delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
 delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/pd.c
 delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/port.c
 delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/qp.c
 delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/rl.c
 delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/sriov.c
 delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/srq.c
 delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/transobj.c
 delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/uar.c
 delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/vport.c
 delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
 delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
 delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/wq.c
 delete mode 100644 drivers/net/ethernet/mellanox/mlx5/core/wq.h
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/debugfs.c
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/dev.c
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/en/arfs.c
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/en/clock.c
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/en/common.c
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/en/dcbnl.c
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/en/en.h
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/en/ethtool.c
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/en/fs.c
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/en/fs_ethtool.c
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/en/main.c
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/en/rep.c
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/en/rx.c
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/en/rx_am.c
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/en/selftest.c
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/en/stats.h
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/en/tc.c
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/en/tc.h
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/en/tx.c
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/en/txrx.c
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/en/vxlan.c
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/en/vxlan.h
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/en/wq.c
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/en/wq.h
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/eq.c
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/fs/cmd.c
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/fs/cmd.h
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/fs/core.c
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/fs/core.h
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/fs/counters.c
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/health.c
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/lag.c
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/lib/alloc.c
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/lib/cmd.h
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/lib/cq.c
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/lib/cq.h
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/lib/device.h
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/lib/doorbell.h
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/lib/driver.h
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/lib/fs.h
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/lib/fw.c
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/lib/mad.c
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/lib/mcg.c
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/lib/mlx5_ifc.h
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/lib/mr.c
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/lib/pd.c
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/lib/port.c
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/lib/port.h
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/lib/qp.c
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/lib/qp.h
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/lib/rl.c
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/lib/srq.c
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/lib/srq.h
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/lib/transobj.c
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/lib/transobj.h
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/lib/uar.c
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/lib/vport.c
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/lib/vport.h
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/main.c
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/mlx5.h
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/pagealloc.c
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/sriov/eswitch.c
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/sriov/eswitch.h
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/sriov/eswitch_offloads.c
 create mode 100644 drivers/net/ethernet/mellanox/mlx5/sriov/sriov.c
 delete mode 100644 include/linux/mlx5/cmd.h
 delete mode 100644 include/linux/mlx5/cq.h
 delete mode 100644 include/linux/mlx5/device.h
 delete mode 100644 include/linux/mlx5/doorbell.h
 delete mode 100644 include/linux/mlx5/driver.h
 delete mode 100644 include/linux/mlx5/fs.h
 delete mode 100644 include/linux/mlx5/mlx5_ifc.h
 delete mode 100644 include/linux/mlx5/port.h
 delete mode 100644 include/linux/mlx5/qp.h
 delete mode 100644 include/linux/mlx5/srq.h
 delete mode 100644 include/linux/mlx5/transobj.h
 delete mode 100644 include/linux/mlx5/vport.h

diff --git a/MAINTAINERS b/MAINTAINERS
index 11904a9..5c1f390 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -8045,7 +8045,8 @@ L:	netdev@...r.kernel.org
 S:	Supported
 W:	http://www.mellanox.com
 Q:	http://patchwork.ozlabs.org/project/netdev/list/
-F:	drivers/net/ethernet/mellanox/mlx5/core/en_*
+F:	drivers/net/ethernet/mellanox/mlx5/en/
+F:	drivers/net/ethernet/mellanox/mlx5/sriov/
 
 MELLANOX ETHERNET SWITCH DRIVERS
 M:	Jiri Pirko <jiri@...lanox.com>
@@ -8256,9 +8257,9 @@ L:	linux-rdma@...r.kernel.org
 W:	http://www.mellanox.com
 Q:	http://patchwork.ozlabs.org/project/netdev/list/
 S:	Supported
-F:	drivers/net/ethernet/mellanox/mlx5/core/
-F:	include/linux/mlx5/
-F:	include/uapi/rdma/mlx5-abi.h
+F:	drivers/net/ethernet/mellanox/mlx5/*
+F:	drivers/net/ethernet/mellanox/mlx5/lib/
+F:	drivers/net/ethernet/mellanox/mlx5/fs/
 
 MELLANOX MLX5 IB driver
 M:	Matan Barak <matanb@...lanox.com>
@@ -8268,7 +8269,7 @@ W:	http://www.mellanox.com
 Q:	http://patchwork.kernel.org/project/linux-rdma/list/
 S:	Supported
 F:	drivers/infiniband/hw/mlx5/
-F:	include/linux/mlx5/
+F:	include/uapi/rdma/mlx5-abi.h
 
 MELEXIS MLX90614 DRIVER
 M:	Crt Mori <cmo@...exis.com>
diff --git a/drivers/infiniband/hw/mlx5/Makefile b/drivers/infiniband/hw/mlx5/Makefile
index 7493a83..6d8e7e0 100644
--- a/drivers/infiniband/hw/mlx5/Makefile
+++ b/drivers/infiniband/hw/mlx5/Makefile
@@ -1,3 +1,4 @@
+ccflags-y := -I$(srctree)/drivers/net/ethernet/mellanox/mlx5/
 obj-$(CONFIG_MLX5_INFINIBAND)	+= mlx5_ib.o
 
 mlx5_ib-y :=	main.o cq.o doorbell.o qp.o mem.o srq.o mr.o ah.o mad.o gsi.o ib_virt.o
diff --git a/drivers/infiniband/hw/mlx5/ib_virt.c b/drivers/infiniband/hw/mlx5/ib_virt.c
index c1b9de8..c1a749b 100644
--- a/drivers/infiniband/hw/mlx5/ib_virt.c
+++ b/drivers/infiniband/hw/mlx5/ib_virt.c
@@ -31,7 +31,7 @@
  */
 
 #include <linux/module.h>
-#include <linux/mlx5/vport.h>
+#include "lib/vport.h"
 #include "mlx5_ib.h"
 
 static inline u32 mlx_to_net_policy(enum port_state_policy mlx_policy)
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
index 39e5848..72b081a 100644
--- a/drivers/infiniband/hw/mlx5/mad.c
+++ b/drivers/infiniband/hw/mlx5/mad.c
@@ -30,8 +30,8 @@
  * SOFTWARE.
  */
 
-#include <linux/mlx5/cmd.h>
-#include <linux/mlx5/vport.h>
+#include "lib/cmd.h"
+#include "lib/vport.h"
 #include <rdma/ib_mad.h>
 #include <rdma/ib_smi.h>
 #include <rdma/ib_pma.h>
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index a191b93..0867462 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -45,14 +45,14 @@
 #include <rdma/ib_user_verbs.h>
 #include <rdma/ib_addr.h>
 #include <rdma/ib_cache.h>
-#include <linux/mlx5/port.h>
-#include <linux/mlx5/vport.h>
+#include "lib/port.h"
+#include "lib/vport.h"
 #include <linux/list.h>
 #include <rdma/ib_smi.h>
 #include <rdma/ib_umem.h>
 #include <linux/in.h>
 #include <linux/etherdevice.h>
-#include <linux/mlx5/fs.h>
+#include "lib/fs.h"
 #include "mlx5_ib.h"
 
 #define DRIVER_NAME "mlx5_ib"
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index e1a4b93d..fe5c8da 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -37,12 +37,12 @@
 #include <linux/sched.h>
 #include <rdma/ib_verbs.h>
 #include <rdma/ib_smi.h>
-#include <linux/mlx5/driver.h>
-#include <linux/mlx5/cq.h>
-#include <linux/mlx5/qp.h>
-#include <linux/mlx5/srq.h>
+#include "lib/driver.h"
+#include "lib/cq.h"
+#include "lib/qp.h"
+#include "lib/srq.h"
 #include <linux/types.h>
-#include <linux/mlx5/transobj.h>
+#include "lib/transobj.h"
 #include <rdma/ib_user_verbs.h>
 #include <rdma/mlx5-abi.h>
 
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index 6f4397e..949b8d8 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -31,8 +31,8 @@
  */
 
 #include <linux/module.h>
-#include <linux/mlx5/qp.h>
-#include <linux/mlx5/srq.h>
+#include "lib/qp.h"
+#include "lib/srq.h"
 #include <linux/slab.h>
 #include <rdma/ib_umem.h>
 #include <rdma/ib_user_verbs.h>
diff --git a/drivers/net/ethernet/mellanox/Kconfig b/drivers/net/ethernet/mellanox/Kconfig
index d547010..214b378 100644
--- a/drivers/net/ethernet/mellanox/Kconfig
+++ b/drivers/net/ethernet/mellanox/Kconfig
@@ -17,7 +17,7 @@ config NET_VENDOR_MELLANOX
 if NET_VENDOR_MELLANOX
 
 source "drivers/net/ethernet/mellanox/mlx4/Kconfig"
-source "drivers/net/ethernet/mellanox/mlx5/core/Kconfig"
+source "drivers/net/ethernet/mellanox/mlx5/Kconfig"
 source "drivers/net/ethernet/mellanox/mlxsw/Kconfig"
 
 endif # NET_VENDOR_MELLANOX
diff --git a/drivers/net/ethernet/mellanox/Makefile b/drivers/net/ethernet/mellanox/Makefile
index 2e2a5ec..a65d592 100644
--- a/drivers/net/ethernet/mellanox/Makefile
+++ b/drivers/net/ethernet/mellanox/Makefile
@@ -3,5 +3,5 @@
 #
 
 obj-$(CONFIG_MLX4_CORE) += mlx4/
-obj-$(CONFIG_MLX5_CORE) += mlx5/core/
+obj-$(CONFIG_MLX5_CORE) += mlx5/
 obj-$(CONFIG_MLXSW_CORE) += mlxsw/
diff --git a/drivers/net/ethernet/mellanox/mlx5/Kconfig b/drivers/net/ethernet/mellanox/mlx5/Kconfig
new file mode 100644
index 0000000..ddb4ca4
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/Kconfig
@@ -0,0 +1,32 @@
+#
+# Mellanox driver configuration
+#
+
+config MLX5_CORE
+	tristate "Mellanox Technologies ConnectX-4 and Connect-IB core driver"
+	depends on MAY_USE_DEVLINK
+	depends on PCI
+	default n
+	---help---
+	  Core driver for low level functionality of the ConnectX-4 and
+	  Connect-IB cards by Mellanox Technologies.
+
+config MLX5_CORE_EN
+	bool "Mellanox Technologies ConnectX-4 Ethernet support"
+	depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE
+	imply PTP_1588_CLOCK
+	default n
+	---help---
+	  Ethernet support in Mellanox Technologies ConnectX-4 NIC.
+
+config MLX5_CORE_EN_DCB
+	bool "Data Center Bridging (DCB) Support"
+	default y
+	depends on MLX5_CORE_EN && DCB
+	---help---
+	  Say Y here if you want to use Data Center Bridging (DCB) in the
+	  driver.
+	  If set to N, will not be able to configure QoS and ratelimit attributes.
+	  This flag is depended on the kernel's DCB support.
+
+	  If unsure, set to Y
diff --git a/drivers/net/ethernet/mellanox/mlx5/Makefile b/drivers/net/ethernet/mellanox/mlx5/Makefile
new file mode 100644
index 0000000..9634f09
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/Makefile
@@ -0,0 +1,21 @@
+subdir-ccflags-y := -I$(srctree)/drivers/net/ethernet/mellanox/mlx5/
+obj-$(CONFIG_MLX5_CORE)		+= mlx5_core.o
+
+mlx5_core-y := main.o pagealloc.o health.o \
+	       cmd.o debugfs.o eq.o lag.o dev.o
+
+mlx5_core-y += lib/fw.o lib/uar.o lib/mcg.o lib/cq.o \
+	       lib/srq.o lib/alloc.o lib/qp.o lib/port.o lib/mr.o \
+	       lib/pd.o lib/mad.o lib/transobj.o lib/vport.o lib/rl.o
+
+mlx5_core-y += sriov/sriov.o
+mlx5_core-y += fs/cmd.o fs/core.o fs/counters.o
+
+mlx5_core-$(CONFIG_MLX5_CORE_EN) += en/wq.o en/main.o en/common.o \
+		en/fs.o en/ethtool.o en/tx.o en/rx.o en/rx_am.o \
+		en/txrx.o en/clock.o en/vxlan.o en/tc.o en/arfs.o \
+		en/rep.o en/fs_ethtool.o en/selftest.o
+
+mlx5_core-$(CONFIG_MLX5_CORE_EN) += sriov/eswitch.o sriov/eswitch_offloads.o
+
+mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en/dcbnl.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/cmd.c b/drivers/net/ethernet/mellanox/mlx5/cmd.c
new file mode 100644
index 0000000..1922e98
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/cmd.c
@@ -0,0 +1,1804 @@
+/*
+ * Copyright (c) 2013-2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/highmem.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/random.h>
+#include <linux/io-mapping.h>
+#include "lib/driver.h"
+#include <linux/debugfs.h>
+#include "mlx5.h"
+
+enum {
+	CMD_IF_REV = 5,
+};
+
+enum {
+	CMD_MODE_POLLING,
+	CMD_MODE_EVENTS
+};
+
+enum {
+	MLX5_CMD_DELIVERY_STAT_OK			= 0x0,
+	MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR		= 0x1,
+	MLX5_CMD_DELIVERY_STAT_TOK_ERR			= 0x2,
+	MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR		= 0x3,
+	MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR	= 0x4,
+	MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR		= 0x5,
+	MLX5_CMD_DELIVERY_STAT_FW_ERR			= 0x6,
+	MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR		= 0x7,
+	MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR		= 0x8,
+	MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR	= 0x9,
+	MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR		= 0x10,
+};
+
+static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd,
+					   struct mlx5_cmd_msg *in,
+					   struct mlx5_cmd_msg *out,
+					   void *uout, int uout_size,
+					   mlx5_cmd_cbk_t cbk,
+					   void *context, int page_queue)
+{
+	gfp_t alloc_flags = cbk ? GFP_ATOMIC : GFP_KERNEL;
+	struct mlx5_cmd_work_ent *ent;
+
+	ent = kzalloc(sizeof(*ent), alloc_flags);
+	if (!ent)
+		return ERR_PTR(-ENOMEM);
+
+	ent->in		= in;
+	ent->out	= out;
+	ent->uout	= uout;
+	ent->uout_size	= uout_size;
+	ent->callback	= cbk;
+	ent->context	= context;
+	ent->cmd	= cmd;
+	ent->page_queue = page_queue;
+
+	return ent;
+}
+
+static u8 alloc_token(struct mlx5_cmd *cmd)
+{
+	u8 token;
+
+	spin_lock(&cmd->token_lock);
+	cmd->token++;
+	if (cmd->token == 0)
+		cmd->token++;
+	token = cmd->token;
+	spin_unlock(&cmd->token_lock);
+
+	return token;
+}
+
+static int alloc_ent(struct mlx5_cmd *cmd)
+{
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&cmd->alloc_lock, flags);
+	ret = find_first_bit(&cmd->bitmask, cmd->max_reg_cmds);
+	if (ret < cmd->max_reg_cmds)
+		clear_bit(ret, &cmd->bitmask);
+	spin_unlock_irqrestore(&cmd->alloc_lock, flags);
+
+	return ret < cmd->max_reg_cmds ? ret : -ENOMEM;
+}
+
+static void free_ent(struct mlx5_cmd *cmd, int idx)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&cmd->alloc_lock, flags);
+	set_bit(idx, &cmd->bitmask);
+	spin_unlock_irqrestore(&cmd->alloc_lock, flags);
+}
+
+static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx)
+{
+	return cmd->cmd_buf + (idx << cmd->log_stride);
+}
+
+static u8 xor8_buf(void *buf, size_t offset, int len)
+{
+	u8 *ptr = buf;
+	u8 sum = 0;
+	int i;
+	int end = len + offset;
+
+	for (i = offset; i < end; i++)
+		sum ^= ptr[i];
+
+	return sum;
+}
+
+static int verify_block_sig(struct mlx5_cmd_prot_block *block)
+{
+	size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0);
+	int xor_len = sizeof(*block) - sizeof(block->data) - 1;
+
+	if (xor8_buf(block, rsvd0_off, xor_len) != 0xff)
+		return -EINVAL;
+
+	if (xor8_buf(block, 0, sizeof(*block)) != 0xff)
+		return -EINVAL;
+
+	return 0;
+}
+
+static void calc_block_sig(struct mlx5_cmd_prot_block *block)
+{
+	int ctrl_xor_len = sizeof(*block) - sizeof(block->data) - 2;
+	size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0);
+
+	block->ctrl_sig = ~xor8_buf(block, rsvd0_off, ctrl_xor_len);
+	block->sig = ~xor8_buf(block, 0, sizeof(*block) - 1);
+}
+
+static void calc_chain_sig(struct mlx5_cmd_msg *msg)
+{
+	struct mlx5_cmd_mailbox *next = msg->next;
+	int size = msg->len;
+	int blen = size - min_t(int, sizeof(msg->first.data), size);
+	int n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1)
+		/ MLX5_CMD_DATA_BLOCK_SIZE;
+	int i = 0;
+
+	for (i = 0; i < n && next; i++)  {
+		calc_block_sig(next->buf);
+		next = next->next;
+	}
+}
+
+static void set_signature(struct mlx5_cmd_work_ent *ent, int csum)
+{
+	ent->lay->sig = ~xor8_buf(ent->lay, 0,  sizeof(*ent->lay));
+	if (csum) {
+		calc_chain_sig(ent->in);
+		calc_chain_sig(ent->out);
+	}
+}
+
+static void poll_timeout(struct mlx5_cmd_work_ent *ent)
+{
+	unsigned long poll_end = jiffies + msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC + 1000);
+	u8 own;
+
+	do {
+		own = ent->lay->status_own;
+		if (!(own & CMD_OWNER_HW)) {
+			ent->ret = 0;
+			return;
+		}
+		usleep_range(5000, 10000);
+	} while (time_before(jiffies, poll_end));
+
+	ent->ret = -ETIMEDOUT;
+}
+
+static void free_cmd(struct mlx5_cmd_work_ent *ent)
+{
+	kfree(ent);
+}
+
+
+static int verify_signature(struct mlx5_cmd_work_ent *ent)
+{
+	struct mlx5_cmd_mailbox *next = ent->out->next;
+	int err;
+	u8 sig;
+	int size = ent->out->len;
+	int blen = size - min_t(int, sizeof(ent->out->first.data), size);
+	int n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1)
+		/ MLX5_CMD_DATA_BLOCK_SIZE;
+	int i = 0;
+
+	sig = xor8_buf(ent->lay, 0, sizeof(*ent->lay));
+	if (sig != 0xff)
+		return -EINVAL;
+
+	for (i = 0; i < n && next; i++) {
+		err = verify_block_sig(next->buf);
+		if (err)
+			return err;
+
+		next = next->next;
+	}
+
+	return 0;
+}
+
+static void dump_buf(void *buf, int size, int data_only, int offset)
+{
+	__be32 *p = buf;
+	int i;
+
+	for (i = 0; i < size; i += 16) {
+		pr_debug("%03x: %08x %08x %08x %08x\n", offset, be32_to_cpu(p[0]),
+			 be32_to_cpu(p[1]), be32_to_cpu(p[2]),
+			 be32_to_cpu(p[3]));
+		p += 4;
+		offset += 16;
+	}
+	if (!data_only)
+		pr_debug("\n");
+}
+
+static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
+				       u32 *synd, u8 *status)
+{
+	*synd = 0;
+	*status = 0;
+
+	switch (op) {
+	case MLX5_CMD_OP_TEARDOWN_HCA:
+	case MLX5_CMD_OP_DISABLE_HCA:
+	case MLX5_CMD_OP_MANAGE_PAGES:
+	case MLX5_CMD_OP_DESTROY_MKEY:
+	case MLX5_CMD_OP_DESTROY_EQ:
+	case MLX5_CMD_OP_DESTROY_CQ:
+	case MLX5_CMD_OP_DESTROY_QP:
+	case MLX5_CMD_OP_DESTROY_PSV:
+	case MLX5_CMD_OP_DESTROY_SRQ:
+	case MLX5_CMD_OP_DESTROY_XRC_SRQ:
+	case MLX5_CMD_OP_DESTROY_DCT:
+	case MLX5_CMD_OP_DEALLOC_Q_COUNTER:
+	case MLX5_CMD_OP_DEALLOC_PD:
+	case MLX5_CMD_OP_DEALLOC_UAR:
+	case MLX5_CMD_OP_DETACH_FROM_MCG:
+	case MLX5_CMD_OP_DEALLOC_XRCD:
+	case MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN:
+	case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT:
+	case MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY:
+	case MLX5_CMD_OP_DESTROY_LAG:
+	case MLX5_CMD_OP_DESTROY_VPORT_LAG:
+	case MLX5_CMD_OP_DESTROY_TIR:
+	case MLX5_CMD_OP_DESTROY_SQ:
+	case MLX5_CMD_OP_DESTROY_RQ:
+	case MLX5_CMD_OP_DESTROY_RMP:
+	case MLX5_CMD_OP_DESTROY_TIS:
+	case MLX5_CMD_OP_DESTROY_RQT:
+	case MLX5_CMD_OP_DESTROY_FLOW_TABLE:
+	case MLX5_CMD_OP_DESTROY_FLOW_GROUP:
+	case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY:
+	case MLX5_CMD_OP_DEALLOC_FLOW_COUNTER:
+	case MLX5_CMD_OP_2ERR_QP:
+	case MLX5_CMD_OP_2RST_QP:
+	case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT:
+	case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
+	case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
+	case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT:
+	case MLX5_CMD_OP_DEALLOC_ENCAP_HEADER:
+	case MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT:
+	case MLX5_CMD_OP_DESTROY_QOS_PARA_VPORT:
+		return MLX5_CMD_STAT_OK;
+
+	case MLX5_CMD_OP_QUERY_HCA_CAP:
+	case MLX5_CMD_OP_QUERY_ADAPTER:
+	case MLX5_CMD_OP_INIT_HCA:
+	case MLX5_CMD_OP_ENABLE_HCA:
+	case MLX5_CMD_OP_QUERY_PAGES:
+	case MLX5_CMD_OP_SET_HCA_CAP:
+	case MLX5_CMD_OP_QUERY_ISSI:
+	case MLX5_CMD_OP_SET_ISSI:
+	case MLX5_CMD_OP_CREATE_MKEY:
+	case MLX5_CMD_OP_QUERY_MKEY:
+	case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS:
+	case MLX5_CMD_OP_PAGE_FAULT_RESUME:
+	case MLX5_CMD_OP_CREATE_EQ:
+	case MLX5_CMD_OP_QUERY_EQ:
+	case MLX5_CMD_OP_GEN_EQE:
+	case MLX5_CMD_OP_CREATE_CQ:
+	case MLX5_CMD_OP_QUERY_CQ:
+	case MLX5_CMD_OP_MODIFY_CQ:
+	case MLX5_CMD_OP_CREATE_QP:
+	case MLX5_CMD_OP_RST2INIT_QP:
+	case MLX5_CMD_OP_INIT2RTR_QP:
+	case MLX5_CMD_OP_RTR2RTS_QP:
+	case MLX5_CMD_OP_RTS2RTS_QP:
+	case MLX5_CMD_OP_SQERR2RTS_QP:
+	case MLX5_CMD_OP_QUERY_QP:
+	case MLX5_CMD_OP_SQD_RTS_QP:
+	case MLX5_CMD_OP_INIT2INIT_QP:
+	case MLX5_CMD_OP_CREATE_PSV:
+	case MLX5_CMD_OP_CREATE_SRQ:
+	case MLX5_CMD_OP_QUERY_SRQ:
+	case MLX5_CMD_OP_ARM_RQ:
+	case MLX5_CMD_OP_CREATE_XRC_SRQ:
+	case MLX5_CMD_OP_QUERY_XRC_SRQ:
+	case MLX5_CMD_OP_ARM_XRC_SRQ:
+	case MLX5_CMD_OP_CREATE_DCT:
+	case MLX5_CMD_OP_DRAIN_DCT:
+	case MLX5_CMD_OP_QUERY_DCT:
+	case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
+	case MLX5_CMD_OP_QUERY_VPORT_STATE:
+	case MLX5_CMD_OP_MODIFY_VPORT_STATE:
+	case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
+	case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT:
+	case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
+	case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
+	case MLX5_CMD_OP_SET_ROCE_ADDRESS:
+	case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
+	case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT:
+	case MLX5_CMD_OP_QUERY_HCA_VPORT_GID:
+	case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY:
+	case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
+	case MLX5_CMD_OP_ALLOC_Q_COUNTER:
+	case MLX5_CMD_OP_QUERY_Q_COUNTER:
+	case MLX5_CMD_OP_ALLOC_PD:
+	case MLX5_CMD_OP_ALLOC_UAR:
+	case MLX5_CMD_OP_CONFIG_INT_MODERATION:
+	case MLX5_CMD_OP_ACCESS_REG:
+	case MLX5_CMD_OP_ATTACH_TO_MCG:
+	case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG:
+	case MLX5_CMD_OP_MAD_IFC:
+	case MLX5_CMD_OP_QUERY_MAD_DEMUX:
+	case MLX5_CMD_OP_SET_MAD_DEMUX:
+	case MLX5_CMD_OP_NOP:
+	case MLX5_CMD_OP_ALLOC_XRCD:
+	case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
+	case MLX5_CMD_OP_QUERY_CONG_STATUS:
+	case MLX5_CMD_OP_MODIFY_CONG_STATUS:
+	case MLX5_CMD_OP_QUERY_CONG_PARAMS:
+	case MLX5_CMD_OP_MODIFY_CONG_PARAMS:
+	case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
+	case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
+	case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
+	case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
+	case MLX5_CMD_OP_CREATE_LAG:
+	case MLX5_CMD_OP_MODIFY_LAG:
+	case MLX5_CMD_OP_QUERY_LAG:
+	case MLX5_CMD_OP_CREATE_VPORT_LAG:
+	case MLX5_CMD_OP_CREATE_TIR:
+	case MLX5_CMD_OP_MODIFY_TIR:
+	case MLX5_CMD_OP_QUERY_TIR:
+	case MLX5_CMD_OP_CREATE_SQ:
+	case MLX5_CMD_OP_MODIFY_SQ:
+	case MLX5_CMD_OP_QUERY_SQ:
+	case MLX5_CMD_OP_CREATE_RQ:
+	case MLX5_CMD_OP_MODIFY_RQ:
+	case MLX5_CMD_OP_QUERY_RQ:
+	case MLX5_CMD_OP_CREATE_RMP:
+	case MLX5_CMD_OP_MODIFY_RMP:
+	case MLX5_CMD_OP_QUERY_RMP:
+	case MLX5_CMD_OP_CREATE_TIS:
+	case MLX5_CMD_OP_MODIFY_TIS:
+	case MLX5_CMD_OP_QUERY_TIS:
+	case MLX5_CMD_OP_CREATE_RQT:
+	case MLX5_CMD_OP_MODIFY_RQT:
+	case MLX5_CMD_OP_QUERY_RQT:
+
+	case MLX5_CMD_OP_CREATE_FLOW_TABLE:
+	case MLX5_CMD_OP_QUERY_FLOW_TABLE:
+	case MLX5_CMD_OP_CREATE_FLOW_GROUP:
+	case MLX5_CMD_OP_QUERY_FLOW_GROUP:
+	case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
+	case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
+	case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
+	case MLX5_CMD_OP_ALLOC_ENCAP_HEADER:
+	case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
+	case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
+	case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
+	case MLX5_CMD_OP_CREATE_QOS_PARA_VPORT:
+		*status = MLX5_DRIVER_STATUS_ABORTED;
+		*synd = MLX5_DRIVER_SYND;
+		return -EIO;
+	default:
+		mlx5_core_err(dev, "Unknown FW command (%d)\n", op);
+		return -EINVAL;
+	}
+}
+
+const char *mlx5_command_str(int command)
+{
+#define MLX5_COMMAND_STR_CASE(__cmd) case MLX5_CMD_OP_ ## __cmd: return #__cmd
+
+	switch (command) {
+	MLX5_COMMAND_STR_CASE(QUERY_HCA_CAP);
+	MLX5_COMMAND_STR_CASE(QUERY_ADAPTER);
+	MLX5_COMMAND_STR_CASE(INIT_HCA);
+	MLX5_COMMAND_STR_CASE(TEARDOWN_HCA);
+	MLX5_COMMAND_STR_CASE(ENABLE_HCA);
+	MLX5_COMMAND_STR_CASE(DISABLE_HCA);
+	MLX5_COMMAND_STR_CASE(QUERY_PAGES);
+	MLX5_COMMAND_STR_CASE(MANAGE_PAGES);
+	MLX5_COMMAND_STR_CASE(SET_HCA_CAP);
+	MLX5_COMMAND_STR_CASE(QUERY_ISSI);
+	MLX5_COMMAND_STR_CASE(SET_ISSI);
+	MLX5_COMMAND_STR_CASE(CREATE_MKEY);
+	MLX5_COMMAND_STR_CASE(QUERY_MKEY);
+	MLX5_COMMAND_STR_CASE(DESTROY_MKEY);
+	MLX5_COMMAND_STR_CASE(QUERY_SPECIAL_CONTEXTS);
+	MLX5_COMMAND_STR_CASE(PAGE_FAULT_RESUME);
+	MLX5_COMMAND_STR_CASE(CREATE_EQ);
+	MLX5_COMMAND_STR_CASE(DESTROY_EQ);
+	MLX5_COMMAND_STR_CASE(QUERY_EQ);
+	MLX5_COMMAND_STR_CASE(GEN_EQE);
+	MLX5_COMMAND_STR_CASE(CREATE_CQ);
+	MLX5_COMMAND_STR_CASE(DESTROY_CQ);
+	MLX5_COMMAND_STR_CASE(QUERY_CQ);
+	MLX5_COMMAND_STR_CASE(MODIFY_CQ);
+	MLX5_COMMAND_STR_CASE(CREATE_QP);
+	MLX5_COMMAND_STR_CASE(DESTROY_QP);
+	MLX5_COMMAND_STR_CASE(RST2INIT_QP);
+	MLX5_COMMAND_STR_CASE(INIT2RTR_QP);
+	MLX5_COMMAND_STR_CASE(RTR2RTS_QP);
+	MLX5_COMMAND_STR_CASE(RTS2RTS_QP);
+	MLX5_COMMAND_STR_CASE(SQERR2RTS_QP);
+	MLX5_COMMAND_STR_CASE(2ERR_QP);
+	MLX5_COMMAND_STR_CASE(2RST_QP);
+	MLX5_COMMAND_STR_CASE(QUERY_QP);
+	MLX5_COMMAND_STR_CASE(SQD_RTS_QP);
+	MLX5_COMMAND_STR_CASE(INIT2INIT_QP);
+	MLX5_COMMAND_STR_CASE(CREATE_PSV);
+	MLX5_COMMAND_STR_CASE(DESTROY_PSV);
+	MLX5_COMMAND_STR_CASE(CREATE_SRQ);
+	MLX5_COMMAND_STR_CASE(DESTROY_SRQ);
+	MLX5_COMMAND_STR_CASE(QUERY_SRQ);
+	MLX5_COMMAND_STR_CASE(ARM_RQ);
+	MLX5_COMMAND_STR_CASE(CREATE_XRC_SRQ);
+	MLX5_COMMAND_STR_CASE(DESTROY_XRC_SRQ);
+	MLX5_COMMAND_STR_CASE(QUERY_XRC_SRQ);
+	MLX5_COMMAND_STR_CASE(ARM_XRC_SRQ);
+	MLX5_COMMAND_STR_CASE(CREATE_DCT);
+	MLX5_COMMAND_STR_CASE(DESTROY_DCT);
+	MLX5_COMMAND_STR_CASE(DRAIN_DCT);
+	MLX5_COMMAND_STR_CASE(QUERY_DCT);
+	MLX5_COMMAND_STR_CASE(ARM_DCT_FOR_KEY_VIOLATION);
+	MLX5_COMMAND_STR_CASE(QUERY_VPORT_STATE);
+	MLX5_COMMAND_STR_CASE(MODIFY_VPORT_STATE);
+	MLX5_COMMAND_STR_CASE(QUERY_ESW_VPORT_CONTEXT);
+	MLX5_COMMAND_STR_CASE(MODIFY_ESW_VPORT_CONTEXT);
+	MLX5_COMMAND_STR_CASE(QUERY_NIC_VPORT_CONTEXT);
+	MLX5_COMMAND_STR_CASE(MODIFY_NIC_VPORT_CONTEXT);
+	MLX5_COMMAND_STR_CASE(QUERY_ROCE_ADDRESS);
+	MLX5_COMMAND_STR_CASE(SET_ROCE_ADDRESS);
+	MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_CONTEXT);
+	MLX5_COMMAND_STR_CASE(MODIFY_HCA_VPORT_CONTEXT);
+	MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_GID);
+	MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_PKEY);
+	MLX5_COMMAND_STR_CASE(QUERY_VPORT_COUNTER);
+	MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER);
+	MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER);
+	MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER);
+	MLX5_COMMAND_STR_CASE(ALLOC_PD);
+	MLX5_COMMAND_STR_CASE(DEALLOC_PD);
+	MLX5_COMMAND_STR_CASE(ALLOC_UAR);
+	MLX5_COMMAND_STR_CASE(DEALLOC_UAR);
+	MLX5_COMMAND_STR_CASE(CONFIG_INT_MODERATION);
+	MLX5_COMMAND_STR_CASE(ACCESS_REG);
+	MLX5_COMMAND_STR_CASE(ATTACH_TO_MCG);
+	MLX5_COMMAND_STR_CASE(DETACH_FROM_MCG);
+	MLX5_COMMAND_STR_CASE(GET_DROPPED_PACKET_LOG);
+	MLX5_COMMAND_STR_CASE(MAD_IFC);
+	MLX5_COMMAND_STR_CASE(QUERY_MAD_DEMUX);
+	MLX5_COMMAND_STR_CASE(SET_MAD_DEMUX);
+	MLX5_COMMAND_STR_CASE(NOP);
+	MLX5_COMMAND_STR_CASE(ALLOC_XRCD);
+	MLX5_COMMAND_STR_CASE(DEALLOC_XRCD);
+	MLX5_COMMAND_STR_CASE(ALLOC_TRANSPORT_DOMAIN);
+	MLX5_COMMAND_STR_CASE(DEALLOC_TRANSPORT_DOMAIN);
+	MLX5_COMMAND_STR_CASE(QUERY_CONG_STATUS);
+	MLX5_COMMAND_STR_CASE(MODIFY_CONG_STATUS);
+	MLX5_COMMAND_STR_CASE(QUERY_CONG_PARAMS);
+	MLX5_COMMAND_STR_CASE(MODIFY_CONG_PARAMS);
+	MLX5_COMMAND_STR_CASE(QUERY_CONG_STATISTICS);
+	MLX5_COMMAND_STR_CASE(ADD_VXLAN_UDP_DPORT);
+	MLX5_COMMAND_STR_CASE(DELETE_VXLAN_UDP_DPORT);
+	MLX5_COMMAND_STR_CASE(SET_L2_TABLE_ENTRY);
+	MLX5_COMMAND_STR_CASE(QUERY_L2_TABLE_ENTRY);
+	MLX5_COMMAND_STR_CASE(DELETE_L2_TABLE_ENTRY);
+	MLX5_COMMAND_STR_CASE(SET_WOL_ROL);
+	MLX5_COMMAND_STR_CASE(QUERY_WOL_ROL);
+	MLX5_COMMAND_STR_CASE(CREATE_LAG);
+	MLX5_COMMAND_STR_CASE(MODIFY_LAG);
+	MLX5_COMMAND_STR_CASE(QUERY_LAG);
+	MLX5_COMMAND_STR_CASE(DESTROY_LAG);
+	MLX5_COMMAND_STR_CASE(CREATE_VPORT_LAG);
+	MLX5_COMMAND_STR_CASE(DESTROY_VPORT_LAG);
+	MLX5_COMMAND_STR_CASE(CREATE_TIR);
+	MLX5_COMMAND_STR_CASE(MODIFY_TIR);
+	MLX5_COMMAND_STR_CASE(DESTROY_TIR);
+	MLX5_COMMAND_STR_CASE(QUERY_TIR);
+	MLX5_COMMAND_STR_CASE(CREATE_SQ);
+	MLX5_COMMAND_STR_CASE(MODIFY_SQ);
+	MLX5_COMMAND_STR_CASE(DESTROY_SQ);
+	MLX5_COMMAND_STR_CASE(QUERY_SQ);
+	MLX5_COMMAND_STR_CASE(CREATE_RQ);
+	MLX5_COMMAND_STR_CASE(MODIFY_RQ);
+	MLX5_COMMAND_STR_CASE(DESTROY_RQ);
+	MLX5_COMMAND_STR_CASE(QUERY_RQ);
+	MLX5_COMMAND_STR_CASE(CREATE_RMP);
+	MLX5_COMMAND_STR_CASE(MODIFY_RMP);
+	MLX5_COMMAND_STR_CASE(DESTROY_RMP);
+	MLX5_COMMAND_STR_CASE(QUERY_RMP);
+	MLX5_COMMAND_STR_CASE(CREATE_TIS);
+	MLX5_COMMAND_STR_CASE(MODIFY_TIS);
+	MLX5_COMMAND_STR_CASE(DESTROY_TIS);
+	MLX5_COMMAND_STR_CASE(QUERY_TIS);
+	MLX5_COMMAND_STR_CASE(CREATE_RQT);
+	MLX5_COMMAND_STR_CASE(MODIFY_RQT);
+	MLX5_COMMAND_STR_CASE(DESTROY_RQT);
+	MLX5_COMMAND_STR_CASE(QUERY_RQT);
+	MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ROOT);
+	MLX5_COMMAND_STR_CASE(CREATE_FLOW_TABLE);
+	MLX5_COMMAND_STR_CASE(DESTROY_FLOW_TABLE);
+	MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE);
+	MLX5_COMMAND_STR_CASE(CREATE_FLOW_GROUP);
+	MLX5_COMMAND_STR_CASE(DESTROY_FLOW_GROUP);
+	MLX5_COMMAND_STR_CASE(QUERY_FLOW_GROUP);
+	MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ENTRY);
+	MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE_ENTRY);
+	MLX5_COMMAND_STR_CASE(DELETE_FLOW_TABLE_ENTRY);
+	MLX5_COMMAND_STR_CASE(ALLOC_FLOW_COUNTER);
+	MLX5_COMMAND_STR_CASE(DEALLOC_FLOW_COUNTER);
+	MLX5_COMMAND_STR_CASE(QUERY_FLOW_COUNTER);
+	MLX5_COMMAND_STR_CASE(MODIFY_FLOW_TABLE);
+	MLX5_COMMAND_STR_CASE(ALLOC_ENCAP_HEADER);
+	MLX5_COMMAND_STR_CASE(DEALLOC_ENCAP_HEADER);
+	MLX5_COMMAND_STR_CASE(CREATE_SCHEDULING_ELEMENT);
+	MLX5_COMMAND_STR_CASE(DESTROY_SCHEDULING_ELEMENT);
+	MLX5_COMMAND_STR_CASE(QUERY_SCHEDULING_ELEMENT);
+	MLX5_COMMAND_STR_CASE(MODIFY_SCHEDULING_ELEMENT);
+	MLX5_COMMAND_STR_CASE(CREATE_QOS_PARA_VPORT);
+	MLX5_COMMAND_STR_CASE(DESTROY_QOS_PARA_VPORT);
+	default: return "unknown command opcode";
+	}
+}
+
+static const char *cmd_status_str(u8 status)
+{
+	switch (status) {
+	case MLX5_CMD_STAT_OK:
+		return "OK";
+	case MLX5_CMD_STAT_INT_ERR:
+		return "internal error";
+	case MLX5_CMD_STAT_BAD_OP_ERR:
+		return "bad operation";
+	case MLX5_CMD_STAT_BAD_PARAM_ERR:
+		return "bad parameter";
+	case MLX5_CMD_STAT_BAD_SYS_STATE_ERR:
+		return "bad system state";
+	case MLX5_CMD_STAT_BAD_RES_ERR:
+		return "bad resource";
+	case MLX5_CMD_STAT_RES_BUSY:
+		return "resource busy";
+	case MLX5_CMD_STAT_LIM_ERR:
+		return "limits exceeded";
+	case MLX5_CMD_STAT_BAD_RES_STATE_ERR:
+		return "bad resource state";
+	case MLX5_CMD_STAT_IX_ERR:
+		return "bad index";
+	case MLX5_CMD_STAT_NO_RES_ERR:
+		return "no resources";
+	case MLX5_CMD_STAT_BAD_INP_LEN_ERR:
+		return "bad input length";
+	case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR:
+		return "bad output length";
+	case MLX5_CMD_STAT_BAD_QP_STATE_ERR:
+		return "bad QP state";
+	case MLX5_CMD_STAT_BAD_PKT_ERR:
+		return "bad packet (discarded)";
+	case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR:
+		return "bad size too many outstanding CQEs";
+	default:
+		return "unknown status";
+	}
+}
+
+static int cmd_status_to_err(u8 status)
+{
+	switch (status) {
+	case MLX5_CMD_STAT_OK:				return 0;
+	case MLX5_CMD_STAT_INT_ERR:			return -EIO;
+	case MLX5_CMD_STAT_BAD_OP_ERR:			return -EINVAL;
+	case MLX5_CMD_STAT_BAD_PARAM_ERR:		return -EINVAL;
+	case MLX5_CMD_STAT_BAD_SYS_STATE_ERR:		return -EIO;
+	case MLX5_CMD_STAT_BAD_RES_ERR:			return -EINVAL;
+	case MLX5_CMD_STAT_RES_BUSY:			return -EBUSY;
+	case MLX5_CMD_STAT_LIM_ERR:			return -ENOMEM;
+	case MLX5_CMD_STAT_BAD_RES_STATE_ERR:		return -EINVAL;
+	case MLX5_CMD_STAT_IX_ERR:			return -EINVAL;
+	case MLX5_CMD_STAT_NO_RES_ERR:			return -EAGAIN;
+	case MLX5_CMD_STAT_BAD_INP_LEN_ERR:		return -EIO;
+	case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR:		return -EIO;
+	case MLX5_CMD_STAT_BAD_QP_STATE_ERR:		return -EINVAL;
+	case MLX5_CMD_STAT_BAD_PKT_ERR:			return -EINVAL;
+	case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR:	return -EINVAL;
+	default:					return -EIO;
+	}
+}
+
+struct mlx5_ifc_mbox_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_mbox_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x40];
+};
+
+void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome)
+{
+	*status = MLX5_GET(mbox_out, out, status);
+	*syndrome = MLX5_GET(mbox_out, out, syndrome);
+}
+
+static int mlx5_cmd_check(struct mlx5_core_dev *dev, void *in, void *out)
+{
+	u32 syndrome;
+	u8  status;
+	u16 opcode;
+	u16 op_mod;
+
+	mlx5_cmd_mbox_status(out, &status, &syndrome);
+	if (!status)
+		return 0;
+
+	opcode = MLX5_GET(mbox_in, in, opcode);
+	op_mod = MLX5_GET(mbox_in, in, op_mod);
+
+	mlx5_core_err(dev,
+		      "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x)\n",
+		      mlx5_command_str(opcode),
+		      opcode, op_mod,
+		      cmd_status_str(status),
+		      status,
+		      syndrome);
+
+	return cmd_status_to_err(status);
+}
+
+static void dump_command(struct mlx5_core_dev *dev,
+			 struct mlx5_cmd_work_ent *ent, int input)
+{
+	struct mlx5_cmd_msg *msg = input ? ent->in : ent->out;
+	u16 op = MLX5_GET(mbox_in, ent->lay->in, opcode);
+	struct mlx5_cmd_mailbox *next = msg->next;
+	int data_only;
+	u32 offset = 0;
+	int dump_len;
+
+	data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA));
+
+	if (data_only)
+		mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_DATA,
+				   "dump command data %s(0x%x) %s\n",
+				   mlx5_command_str(op), op,
+				   input ? "INPUT" : "OUTPUT");
+	else
+		mlx5_core_dbg(dev, "dump command %s(0x%x) %s\n",
+			      mlx5_command_str(op), op,
+			      input ? "INPUT" : "OUTPUT");
+
+	if (data_only) {
+		if (input) {
+			dump_buf(ent->lay->in, sizeof(ent->lay->in), 1, offset);
+			offset += sizeof(ent->lay->in);
+		} else {
+			dump_buf(ent->lay->out, sizeof(ent->lay->out), 1, offset);
+			offset += sizeof(ent->lay->out);
+		}
+	} else {
+		dump_buf(ent->lay, sizeof(*ent->lay), 0, offset);
+		offset += sizeof(*ent->lay);
+	}
+
+	while (next && offset < msg->len) {
+		if (data_only) {
+			dump_len = min_t(int, MLX5_CMD_DATA_BLOCK_SIZE, msg->len - offset);
+			dump_buf(next->buf, dump_len, 1, offset);
+			offset += MLX5_CMD_DATA_BLOCK_SIZE;
+		} else {
+			mlx5_core_dbg(dev, "command block:\n");
+			dump_buf(next->buf, sizeof(struct mlx5_cmd_prot_block), 0, offset);
+			offset += sizeof(struct mlx5_cmd_prot_block);
+		}
+		next = next->next;
+	}
+
+	if (data_only)
+		pr_debug("\n");
+}
+
+static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
+{
+	return MLX5_GET(mbox_in, in->first.data, opcode);
+}
+
+static void cb_timeout_handler(struct work_struct *work)
+{
+	struct delayed_work *dwork = container_of(work, struct delayed_work,
+						  work);
+	struct mlx5_cmd_work_ent *ent = container_of(dwork,
+						     struct mlx5_cmd_work_ent,
+						     cb_timeout_work);
+	struct mlx5_core_dev *dev = container_of(ent->cmd, struct mlx5_core_dev,
+						 cmd);
+
+	ent->ret = -ETIMEDOUT;
+	mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
+		       mlx5_command_str(msg_to_opcode(ent->in)),
+		       msg_to_opcode(ent->in));
+	mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
+}
+
+static void cmd_work_handler(struct work_struct *work)
+{
+	struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
+	struct mlx5_cmd *cmd = ent->cmd;
+	struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd);
+	unsigned long cb_timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
+	struct mlx5_cmd_layout *lay;
+	struct semaphore *sem;
+	unsigned long flags;
+
+	sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
+	down(sem);
+	if (!ent->page_queue) {
+		ent->idx = alloc_ent(cmd);
+		if (ent->idx < 0) {
+			mlx5_core_err(dev, "failed to allocate command entry\n");
+			up(sem);
+			return;
+		}
+	} else {
+		ent->idx = cmd->max_reg_cmds;
+		spin_lock_irqsave(&cmd->alloc_lock, flags);
+		clear_bit(ent->idx, &cmd->bitmask);
+		spin_unlock_irqrestore(&cmd->alloc_lock, flags);
+	}
+
+	cmd->ent_arr[ent->idx] = ent;
+	lay = get_inst(cmd, ent->idx);
+	ent->lay = lay;
+	memset(lay, 0, sizeof(*lay));
+	memcpy(lay->in, ent->in->first.data, sizeof(lay->in));
+	ent->op = be32_to_cpu(lay->in[0]) >> 16;
+	if (ent->in->next)
+		lay->in_ptr = cpu_to_be64(ent->in->next->dma);
+	lay->inlen = cpu_to_be32(ent->in->len);
+	if (ent->out->next)
+		lay->out_ptr = cpu_to_be64(ent->out->next->dma);
+	lay->outlen = cpu_to_be32(ent->out->len);
+	lay->type = MLX5_PCI_CMD_XPORT;
+	lay->token = ent->token;
+	lay->status_own = CMD_OWNER_HW;
+	set_signature(ent, !cmd->checksum_disabled);
+	dump_command(dev, ent, 1);
+	ent->ts1 = ktime_get_ns();
+
+	if (ent->callback)
+		schedule_delayed_work(&ent->cb_timeout_work, cb_timeout);
+
+	/* ring doorbell after the descriptor is valid */
+	mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
+	wmb();
+	iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell);
+	mmiowb();
+	/* if not in polling don't use ent after this point */
+	if (cmd->mode == CMD_MODE_POLLING) {
+		poll_timeout(ent);
+		/* make sure we read the descriptor after ownership is SW */
+		rmb();
+		mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
+	}
+}
+
+static const char *deliv_status_to_str(u8 status)
+{
+	switch (status) {
+	case MLX5_CMD_DELIVERY_STAT_OK:
+		return "no errors";
+	case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR:
+		return "signature error";
+	case MLX5_CMD_DELIVERY_STAT_TOK_ERR:
+		return "token error";
+	case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR:
+		return "bad block number";
+	case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR:
+		return "output pointer not aligned to block size";
+	case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR:
+		return "input pointer not aligned to block size";
+	case MLX5_CMD_DELIVERY_STAT_FW_ERR:
+		return "firmware internal error";
+	case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR:
+		return "command input length error";
+	case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR:
+		return "command ouput length error";
+	case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR:
+		return "reserved fields not cleared";
+	case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR:
+		return "bad command descriptor type";
+	default:
+		return "unknown status code";
+	}
+}
+
+static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
+{
+	unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
+	struct mlx5_cmd *cmd = &dev->cmd;
+	int err;
+
+	if (cmd->mode == CMD_MODE_POLLING) {
+		wait_for_completion(&ent->done);
+	} else if (!wait_for_completion_timeout(&ent->done, timeout)) {
+		ent->ret = -ETIMEDOUT;
+		mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
+	}
+
+	err = ent->ret;
+
+	if (err == -ETIMEDOUT) {
+		mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
+			       mlx5_command_str(msg_to_opcode(ent->in)),
+			       msg_to_opcode(ent->in));
+	}
+	mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n",
+		      err, deliv_status_to_str(ent->status), ent->status);
+
+	return err;
+}
+
+/*  Notes:
+ *    1. Callback functions may not sleep
+ *    2. page queue commands do not support asynchrous completion
+ */
+static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
+			   struct mlx5_cmd_msg *out, void *uout, int uout_size,
+			   mlx5_cmd_cbk_t callback,
+			   void *context, int page_queue, u8 *status,
+			   u8 token)
+{
+	struct mlx5_cmd *cmd = &dev->cmd;
+	struct mlx5_cmd_work_ent *ent;
+	struct mlx5_cmd_stats *stats;
+	int err = 0;
+	s64 ds;
+	u16 op;
+
+	if (callback && page_queue)
+		return -EINVAL;
+
+	ent = alloc_cmd(cmd, in, out, uout, uout_size, callback, context,
+			page_queue);
+	if (IS_ERR(ent))
+		return PTR_ERR(ent);
+
+	ent->token = token;
+
+	if (!callback)
+		init_completion(&ent->done);
+
+	INIT_DELAYED_WORK(&ent->cb_timeout_work, cb_timeout_handler);
+	INIT_WORK(&ent->work, cmd_work_handler);
+	if (page_queue) {
+		cmd_work_handler(&ent->work);
+	} else if (!queue_work(cmd->wq, &ent->work)) {
+		mlx5_core_warn(dev, "failed to queue work\n");
+		err = -ENOMEM;
+		goto out_free;
+	}
+
+	if (callback)
+		goto out;
+
+	err = wait_func(dev, ent);
+	if (err == -ETIMEDOUT)
+		goto out_free;
+
+	ds = ent->ts2 - ent->ts1;
+	op = MLX5_GET(mbox_in, in->first.data, opcode);
+	if (op < ARRAY_SIZE(cmd->stats)) {
+		stats = &cmd->stats[op];
+		spin_lock_irq(&stats->lock);
+		stats->sum += ds;
+		++stats->n;
+		spin_unlock_irq(&stats->lock);
+	}
+	mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
+			   "fw exec time for %s is %lld nsec\n",
+			   mlx5_command_str(op), ds);
+	*status = ent->status;
+
+out_free:
+	free_cmd(ent);
+out:
+	return err;
+}
+
+static ssize_t dbg_write(struct file *filp, const char __user *buf,
+			 size_t count, loff_t *pos)
+{
+	struct mlx5_core_dev *dev = filp->private_data;
+	struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
+	char lbuf[3];
+	int err;
+
+	if (!dbg->in_msg || !dbg->out_msg)
+		return -ENOMEM;
+
+	if (copy_from_user(lbuf, buf, sizeof(lbuf)))
+		return -EFAULT;
+
+	lbuf[sizeof(lbuf) - 1] = 0;
+
+	if (strcmp(lbuf, "go"))
+		return -EINVAL;
+
+	err = mlx5_cmd_exec(dev, dbg->in_msg, dbg->inlen, dbg->out_msg, dbg->outlen);
+
+	return err ? err : count;
+}
+
+
+static const struct file_operations fops = {
+	.owner	= THIS_MODULE,
+	.open	= simple_open,
+	.write	= dbg_write,
+};
+
+static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size,
+			    u8 token)
+{
+	struct mlx5_cmd_prot_block *block;
+	struct mlx5_cmd_mailbox *next;
+	int copy;
+
+	if (!to || !from)
+		return -ENOMEM;
+
+	copy = min_t(int, size, sizeof(to->first.data));
+	memcpy(to->first.data, from, copy);
+	size -= copy;
+	from += copy;
+
+	next = to->next;
+	while (size) {
+		if (!next) {
+			/* this is a BUG */
+			return -ENOMEM;
+		}
+
+		copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE);
+		block = next->buf;
+		memcpy(block->data, from, copy);
+		from += copy;
+		size -= copy;
+		block->token = token;
+		next = next->next;
+	}
+
+	return 0;
+}
+
+static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size)
+{
+	struct mlx5_cmd_prot_block *block;
+	struct mlx5_cmd_mailbox *next;
+	int copy;
+
+	if (!to || !from)
+		return -ENOMEM;
+
+	copy = min_t(int, size, sizeof(from->first.data));
+	memcpy(to, from->first.data, copy);
+	size -= copy;
+	to += copy;
+
+	next = from->next;
+	while (size) {
+		if (!next) {
+			/* this is a BUG */
+			return -ENOMEM;
+		}
+
+		copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE);
+		block = next->buf;
+
+		memcpy(to, block->data, copy);
+		to += copy;
+		size -= copy;
+		next = next->next;
+	}
+
+	return 0;
+}
+
+static struct mlx5_cmd_mailbox *alloc_cmd_box(struct mlx5_core_dev *dev,
+					      gfp_t flags)
+{
+	struct mlx5_cmd_mailbox *mailbox;
+
+	mailbox = kmalloc(sizeof(*mailbox), flags);
+	if (!mailbox)
+		return ERR_PTR(-ENOMEM);
+
+	mailbox->buf = pci_pool_zalloc(dev->cmd.pool, flags,
+				       &mailbox->dma);
+	if (!mailbox->buf) {
+		mlx5_core_dbg(dev, "failed allocation\n");
+		kfree(mailbox);
+		return ERR_PTR(-ENOMEM);
+	}
+	mailbox->next = NULL;
+
+	return mailbox;
+}
+
+static void free_cmd_box(struct mlx5_core_dev *dev,
+			 struct mlx5_cmd_mailbox *mailbox)
+{
+	pci_pool_free(dev->cmd.pool, mailbox->buf, mailbox->dma);
+	kfree(mailbox);
+}
+
+static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev,
+					       gfp_t flags, int size,
+					       u8 token)
+{
+	struct mlx5_cmd_mailbox *tmp, *head = NULL;
+	struct mlx5_cmd_prot_block *block;
+	struct mlx5_cmd_msg *msg;
+	int blen;
+	int err;
+	int n;
+	int i;
+
+	msg = kzalloc(sizeof(*msg), flags);
+	if (!msg)
+		return ERR_PTR(-ENOMEM);
+
+	blen = size - min_t(int, sizeof(msg->first.data), size);
+	n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1) / MLX5_CMD_DATA_BLOCK_SIZE;
+
+	for (i = 0; i < n; i++) {
+		tmp = alloc_cmd_box(dev, flags);
+		if (IS_ERR(tmp)) {
+			mlx5_core_warn(dev, "failed allocating block\n");
+			err = PTR_ERR(tmp);
+			goto err_alloc;
+		}
+
+		block = tmp->buf;
+		tmp->next = head;
+		block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0);
+		block->block_num = cpu_to_be32(n - i - 1);
+		block->token = token;
+		head = tmp;
+	}
+	msg->next = head;
+	msg->len = size;
+	return msg;
+
+err_alloc:
+	while (head) {
+		tmp = head->next;
+		free_cmd_box(dev, head);
+		head = tmp;
+	}
+	kfree(msg);
+
+	return ERR_PTR(err);
+}
+
+static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
+				  struct mlx5_cmd_msg *msg)
+{
+	struct mlx5_cmd_mailbox *head = msg->next;
+	struct mlx5_cmd_mailbox *next;
+
+	while (head) {
+		next = head->next;
+		free_cmd_box(dev, head);
+		head = next;
+	}
+	kfree(msg);
+}
+
+static ssize_t data_write(struct file *filp, const char __user *buf,
+			  size_t count, loff_t *pos)
+{
+	struct mlx5_core_dev *dev = filp->private_data;
+	struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
+	void *ptr;
+
+	if (*pos != 0)
+		return -EINVAL;
+
+	kfree(dbg->in_msg);
+	dbg->in_msg = NULL;
+	dbg->inlen = 0;
+	ptr = memdup_user(buf, count);
+	if (IS_ERR(ptr))
+		return PTR_ERR(ptr);
+	dbg->in_msg = ptr;
+	dbg->inlen = count;
+
+	*pos = count;
+
+	return count;
+}
+
+static ssize_t data_read(struct file *filp, char __user *buf, size_t count,
+			 loff_t *pos)
+{
+	struct mlx5_core_dev *dev = filp->private_data;
+	struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
+	int copy;
+
+	if (*pos)
+		return 0;
+
+	if (!dbg->out_msg)
+		return -ENOMEM;
+
+	copy = min_t(int, count, dbg->outlen);
+	if (copy_to_user(buf, dbg->out_msg, copy))
+		return -EFAULT;
+
+	*pos += copy;
+
+	return copy;
+}
+
+static const struct file_operations dfops = {
+	.owner	= THIS_MODULE,
+	.open	= simple_open,
+	.write	= data_write,
+	.read	= data_read,
+};
+
+static ssize_t outlen_read(struct file *filp, char __user *buf, size_t count,
+			   loff_t *pos)
+{
+	struct mlx5_core_dev *dev = filp->private_data;
+	struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
+	char outlen[8];
+	int err;
+
+	if (*pos)
+		return 0;
+
+	err = snprintf(outlen, sizeof(outlen), "%d", dbg->outlen);
+	if (err < 0)
+		return err;
+
+	if (copy_to_user(buf, &outlen, err))
+		return -EFAULT;
+
+	*pos += err;
+
+	return err;
+}
+
+static ssize_t outlen_write(struct file *filp, const char __user *buf,
+			    size_t count, loff_t *pos)
+{
+	struct mlx5_core_dev *dev = filp->private_data;
+	struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
+	char outlen_str[8];
+	int outlen;
+	void *ptr;
+	int err;
+
+	if (*pos != 0 || count > 6)
+		return -EINVAL;
+
+	kfree(dbg->out_msg);
+	dbg->out_msg = NULL;
+	dbg->outlen = 0;
+
+	if (copy_from_user(outlen_str, buf, count))
+		return -EFAULT;
+
+	outlen_str[7] = 0;
+
+	err = sscanf(outlen_str, "%d", &outlen);
+	if (err < 0)
+		return err;
+
+	ptr = kzalloc(outlen, GFP_KERNEL);
+	if (!ptr)
+		return -ENOMEM;
+
+	dbg->out_msg = ptr;
+	dbg->outlen = outlen;
+
+	*pos = count;
+
+	return count;
+}
+
+static const struct file_operations olfops = {
+	.owner	= THIS_MODULE,
+	.open	= simple_open,
+	.write	= outlen_write,
+	.read	= outlen_read,
+};
+
+static void set_wqname(struct mlx5_core_dev *dev)
+{
+	struct mlx5_cmd *cmd = &dev->cmd;
+
+	snprintf(cmd->wq_name, sizeof(cmd->wq_name), "mlx5_cmd_%s",
+		 dev_name(&dev->pdev->dev));
+}
+
+static void clean_debug_files(struct mlx5_core_dev *dev)
+{
+	struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
+
+	if (!mlx5_debugfs_root)
+		return;
+
+	mlx5_cmdif_debugfs_cleanup(dev);
+	debugfs_remove_recursive(dbg->dbg_root);
+}
+
+static int create_debugfs_files(struct mlx5_core_dev *dev)
+{
+	struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
+	int err = -ENOMEM;
+
+	if (!mlx5_debugfs_root)
+		return 0;
+
+	dbg->dbg_root = debugfs_create_dir("cmd", dev->priv.dbg_root);
+	if (!dbg->dbg_root)
+		return err;
+
+	dbg->dbg_in = debugfs_create_file("in", 0400, dbg->dbg_root,
+					  dev, &dfops);
+	if (!dbg->dbg_in)
+		goto err_dbg;
+
+	dbg->dbg_out = debugfs_create_file("out", 0200, dbg->dbg_root,
+					   dev, &dfops);
+	if (!dbg->dbg_out)
+		goto err_dbg;
+
+	dbg->dbg_outlen = debugfs_create_file("out_len", 0600, dbg->dbg_root,
+					      dev, &olfops);
+	if (!dbg->dbg_outlen)
+		goto err_dbg;
+
+	dbg->dbg_status = debugfs_create_u8("status", 0600, dbg->dbg_root,
+					    &dbg->status);
+	if (!dbg->dbg_status)
+		goto err_dbg;
+
+	dbg->dbg_run = debugfs_create_file("run", 0200, dbg->dbg_root, dev, &fops);
+	if (!dbg->dbg_run)
+		goto err_dbg;
+
+	mlx5_cmdif_debugfs_init(dev);
+
+	return 0;
+
+err_dbg:
+	clean_debug_files(dev);
+	return err;
+}
+
+static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode)
+{
+	struct mlx5_cmd *cmd = &dev->cmd;
+	int i;
+
+	for (i = 0; i < cmd->max_reg_cmds; i++)
+		down(&cmd->sem);
+	down(&cmd->pages_sem);
+
+	cmd->mode = mode;
+
+	up(&cmd->pages_sem);
+	for (i = 0; i < cmd->max_reg_cmds; i++)
+		up(&cmd->sem);
+}
+
+void mlx5_cmd_use_events(struct mlx5_core_dev *dev)
+{
+	mlx5_cmd_change_mod(dev, CMD_MODE_EVENTS);
+}
+
+void mlx5_cmd_use_polling(struct mlx5_core_dev *dev)
+{
+	mlx5_cmd_change_mod(dev, CMD_MODE_POLLING);
+}
+
+static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
+{
+	unsigned long flags;
+
+	if (msg->parent) {
+		spin_lock_irqsave(&msg->parent->lock, flags);
+		list_add_tail(&msg->list, &msg->parent->head);
+		spin_unlock_irqrestore(&msg->parent->lock, flags);
+	} else {
+		mlx5_free_cmd_msg(dev, msg);
+	}
+}
+
+void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec)
+{
+	struct mlx5_cmd *cmd = &dev->cmd;
+	struct mlx5_cmd_work_ent *ent;
+	mlx5_cmd_cbk_t callback;
+	void *context;
+	int err;
+	int i;
+	s64 ds;
+	struct mlx5_cmd_stats *stats;
+	unsigned long flags;
+	unsigned long vector;
+
+	/* there can be at most 32 command queues */
+	vector = vec & 0xffffffff;
+	for (i = 0; i < (1 << cmd->log_sz); i++) {
+		if (test_bit(i, &vector)) {
+			struct semaphore *sem;
+
+			ent = cmd->ent_arr[i];
+			if (ent->callback)
+				cancel_delayed_work(&ent->cb_timeout_work);
+			if (ent->page_queue)
+				sem = &cmd->pages_sem;
+			else
+				sem = &cmd->sem;
+			ent->ts2 = ktime_get_ns();
+			memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out));
+			dump_command(dev, ent, 0);
+			if (!ent->ret) {
+				if (!cmd->checksum_disabled)
+					ent->ret = verify_signature(ent);
+				else
+					ent->ret = 0;
+				if (vec & MLX5_TRIGGERED_CMD_COMP)
+					ent->status = MLX5_DRIVER_STATUS_ABORTED;
+				else
+					ent->status = ent->lay->status_own >> 1;
+
+				mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n",
+					      ent->ret, deliv_status_to_str(ent->status), ent->status);
+			}
+			free_ent(cmd, ent->idx);
+
+			if (ent->callback) {
+				ds = ent->ts2 - ent->ts1;
+				if (ent->op < ARRAY_SIZE(cmd->stats)) {
+					stats = &cmd->stats[ent->op];
+					spin_lock_irqsave(&stats->lock, flags);
+					stats->sum += ds;
+					++stats->n;
+					spin_unlock_irqrestore(&stats->lock, flags);
+				}
+
+				callback = ent->callback;
+				context = ent->context;
+				err = ent->ret;
+				if (!err) {
+					err = mlx5_copy_from_msg(ent->uout,
+								 ent->out,
+								 ent->uout_size);
+
+					err = err ? err : mlx5_cmd_check(dev,
+									ent->in->first.data,
+									ent->uout);
+				}
+
+				mlx5_free_cmd_msg(dev, ent->out);
+				free_msg(dev, ent->in);
+
+				err = err ? err : ent->status;
+				free_cmd(ent);
+				callback(err, context);
+			} else {
+				complete(&ent->done);
+			}
+			up(sem);
+		}
+	}
+}
+EXPORT_SYMBOL(mlx5_cmd_comp_handler);
+
+static int status_to_err(u8 status)
+{
+	return status ? -1 : 0; /* TBD more meaningful codes */
+}
+
+static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
+				      gfp_t gfp)
+{
+	struct mlx5_cmd_msg *msg = ERR_PTR(-ENOMEM);
+	struct cmd_msg_cache *ch = NULL;
+	struct mlx5_cmd *cmd = &dev->cmd;
+	int i;
+
+	if (in_size <= 16)
+		goto cache_miss;
+
+	for (i = 0; i < MLX5_NUM_COMMAND_CACHES; i++) {
+		ch = &cmd->cache[i];
+		if (in_size > ch->max_inbox_size)
+			continue;
+		spin_lock_irq(&ch->lock);
+		if (list_empty(&ch->head)) {
+			spin_unlock_irq(&ch->lock);
+			continue;
+		}
+		msg = list_entry(ch->head.next, typeof(*msg), list);
+		/* For cached lists, we must explicitly state what is
+		 * the real size
+		 */
+		msg->len = in_size;
+		list_del(&msg->list);
+		spin_unlock_irq(&ch->lock);
+		break;
+	}
+
+	if (!IS_ERR(msg))
+		return msg;
+
+cache_miss:
+	msg = mlx5_alloc_cmd_msg(dev, gfp, in_size, 0);
+	return msg;
+}
+
+static int is_manage_pages(void *in)
+{
+	return MLX5_GET(mbox_in, in, opcode) == MLX5_CMD_OP_MANAGE_PAGES;
+}
+
+static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
+		    int out_size, mlx5_cmd_cbk_t callback, void *context)
+{
+	struct mlx5_cmd_msg *inb;
+	struct mlx5_cmd_msg *outb;
+	int pages_queue;
+	gfp_t gfp;
+	int err;
+	u8 status = 0;
+	u32 drv_synd;
+	u8 token;
+
+	if (pci_channel_offline(dev->pdev) ||
+	    dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+		u16 opcode = MLX5_GET(mbox_in, in, opcode);
+
+		err = mlx5_internal_err_ret_value(dev, opcode, &drv_synd, &status);
+		MLX5_SET(mbox_out, out, status, status);
+		MLX5_SET(mbox_out, out, syndrome, drv_synd);
+		return err;
+	}
+
+	pages_queue = is_manage_pages(in);
+	gfp = callback ? GFP_ATOMIC : GFP_KERNEL;
+
+	inb = alloc_msg(dev, in_size, gfp);
+	if (IS_ERR(inb)) {
+		err = PTR_ERR(inb);
+		return err;
+	}
+
+	token = alloc_token(&dev->cmd);
+
+	err = mlx5_copy_to_msg(inb, in, in_size, token);
+	if (err) {
+		mlx5_core_warn(dev, "err %d\n", err);
+		goto out_in;
+	}
+
+	outb = mlx5_alloc_cmd_msg(dev, gfp, out_size, token);
+	if (IS_ERR(outb)) {
+		err = PTR_ERR(outb);
+		goto out_in;
+	}
+
+	err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context,
+			      pages_queue, &status, token);
+	if (err)
+		goto out_out;
+
+	mlx5_core_dbg(dev, "err %d, status %d\n", err, status);
+	if (status) {
+		err = status_to_err(status);
+		goto out_out;
+	}
+
+	if (!callback)
+		err = mlx5_copy_from_msg(out, outb, out_size);
+
+out_out:
+	if (!callback)
+		mlx5_free_cmd_msg(dev, outb);
+
+out_in:
+	if (!callback)
+		free_msg(dev, inb);
+	return err;
+}
+
+int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
+		  int out_size)
+{
+	int err;
+
+	err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL);
+	return err ? : mlx5_cmd_check(dev, in, out);
+}
+EXPORT_SYMBOL(mlx5_cmd_exec);
+
+int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
+		     void *out, int out_size, mlx5_cmd_cbk_t callback,
+		     void *context)
+{
+	return cmd_exec(dev, in, in_size, out, out_size, callback, context);
+}
+EXPORT_SYMBOL(mlx5_cmd_exec_cb);
+
+static void destroy_msg_cache(struct mlx5_core_dev *dev)
+{
+	struct cmd_msg_cache *ch;
+	struct mlx5_cmd_msg *msg;
+	struct mlx5_cmd_msg *n;
+	int i;
+
+	for (i = 0; i < MLX5_NUM_COMMAND_CACHES; i++) {
+		ch = &dev->cmd.cache[i];
+		list_for_each_entry_safe(msg, n, &ch->head, list) {
+			list_del(&msg->list);
+			mlx5_free_cmd_msg(dev, msg);
+		}
+	}
+}
+
+static unsigned cmd_cache_num_ent[MLX5_NUM_COMMAND_CACHES] = {
+	512, 32, 16, 8, 2
+};
+
+static unsigned cmd_cache_ent_size[MLX5_NUM_COMMAND_CACHES] = {
+	16 + MLX5_CMD_DATA_BLOCK_SIZE,
+	16 + MLX5_CMD_DATA_BLOCK_SIZE * 2,
+	16 + MLX5_CMD_DATA_BLOCK_SIZE * 16,
+	16 + MLX5_CMD_DATA_BLOCK_SIZE * 256,
+	16 + MLX5_CMD_DATA_BLOCK_SIZE * 512,
+};
+
+static void create_msg_cache(struct mlx5_core_dev *dev)
+{
+	struct mlx5_cmd *cmd = &dev->cmd;
+	struct cmd_msg_cache *ch;
+	struct mlx5_cmd_msg *msg;
+	int i;
+	int k;
+
+	/* Initialize and fill the caches with initial entries */
+	for (k = 0; k < MLX5_NUM_COMMAND_CACHES; k++) {
+		ch = &cmd->cache[k];
+		spin_lock_init(&ch->lock);
+		INIT_LIST_HEAD(&ch->head);
+		ch->num_ent = cmd_cache_num_ent[k];
+		ch->max_inbox_size = cmd_cache_ent_size[k];
+		for (i = 0; i < ch->num_ent; i++) {
+			msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL | __GFP_NOWARN,
+						 ch->max_inbox_size, 0);
+			if (IS_ERR(msg))
+				break;
+			msg->parent = ch;
+			list_add_tail(&msg->list, &ch->head);
+		}
+	}
+}
+
+static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
+{
+	struct device *ddev = &dev->pdev->dev;
+
+	cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE,
+						 &cmd->alloc_dma, GFP_KERNEL);
+	if (!cmd->cmd_alloc_buf)
+		return -ENOMEM;
+
+	/* make sure it is aligned to 4K */
+	if (!((uintptr_t)cmd->cmd_alloc_buf & (MLX5_ADAPTER_PAGE_SIZE - 1))) {
+		cmd->cmd_buf = cmd->cmd_alloc_buf;
+		cmd->dma = cmd->alloc_dma;
+		cmd->alloc_size = MLX5_ADAPTER_PAGE_SIZE;
+		return 0;
+	}
+
+	dma_free_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, cmd->cmd_alloc_buf,
+			  cmd->alloc_dma);
+	cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev,
+						 2 * MLX5_ADAPTER_PAGE_SIZE - 1,
+						 &cmd->alloc_dma, GFP_KERNEL);
+	if (!cmd->cmd_alloc_buf)
+		return -ENOMEM;
+
+	cmd->cmd_buf = PTR_ALIGN(cmd->cmd_alloc_buf, MLX5_ADAPTER_PAGE_SIZE);
+	cmd->dma = ALIGN(cmd->alloc_dma, MLX5_ADAPTER_PAGE_SIZE);
+	cmd->alloc_size = 2 * MLX5_ADAPTER_PAGE_SIZE - 1;
+	return 0;
+}
+
+static void free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
+{
+	struct device *ddev = &dev->pdev->dev;
+
+	dma_free_coherent(ddev, cmd->alloc_size, cmd->cmd_alloc_buf,
+			  cmd->alloc_dma);
+}
+
+int mlx5_cmd_init(struct mlx5_core_dev *dev)
+{
+	int size = sizeof(struct mlx5_cmd_prot_block);
+	int align = roundup_pow_of_two(size);
+	struct mlx5_cmd *cmd = &dev->cmd;
+	u32 cmd_h, cmd_l;
+	u16 cmd_if_rev;
+	int err;
+	int i;
+
+	memset(cmd, 0, sizeof(*cmd));
+	cmd_if_rev = cmdif_rev(dev);
+	if (cmd_if_rev != CMD_IF_REV) {
+		dev_err(&dev->pdev->dev,
+			"Driver cmdif rev(%d) differs from firmware's(%d)\n",
+			CMD_IF_REV, cmd_if_rev);
+		return -EINVAL;
+	}
+
+	cmd->pool = pci_pool_create("mlx5_cmd", dev->pdev, size, align, 0);
+	if (!cmd->pool)
+		return -ENOMEM;
+
+	err = alloc_cmd_page(dev, cmd);
+	if (err)
+		goto err_free_pool;
+
+	cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff;
+	cmd->log_sz = cmd_l >> 4 & 0xf;
+	cmd->log_stride = cmd_l & 0xf;
+	if (1 << cmd->log_sz > MLX5_MAX_COMMANDS) {
+		dev_err(&dev->pdev->dev, "firmware reports too many outstanding commands %d\n",
+			1 << cmd->log_sz);
+		err = -EINVAL;
+		goto err_free_page;
+	}
+
+	if (cmd->log_sz + cmd->log_stride > MLX5_ADAPTER_PAGE_SHIFT) {
+		dev_err(&dev->pdev->dev, "command queue size overflow\n");
+		err = -EINVAL;
+		goto err_free_page;
+	}
+
+	cmd->checksum_disabled = 1;
+	cmd->max_reg_cmds = (1 << cmd->log_sz) - 1;
+	cmd->bitmask = (1 << cmd->max_reg_cmds) - 1;
+
+	cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
+	if (cmd->cmdif_rev > CMD_IF_REV) {
+		dev_err(&dev->pdev->dev, "driver does not support command interface version. driver %d, firmware %d\n",
+			CMD_IF_REV, cmd->cmdif_rev);
+		err = -ENOTSUPP;
+		goto err_free_page;
+	}
+
+	spin_lock_init(&cmd->alloc_lock);
+	spin_lock_init(&cmd->token_lock);
+	for (i = 0; i < ARRAY_SIZE(cmd->stats); i++)
+		spin_lock_init(&cmd->stats[i].lock);
+
+	sema_init(&cmd->sem, cmd->max_reg_cmds);
+	sema_init(&cmd->pages_sem, 1);
+
+	cmd_h = (u32)((u64)(cmd->dma) >> 32);
+	cmd_l = (u32)(cmd->dma);
+	if (cmd_l & 0xfff) {
+		dev_err(&dev->pdev->dev, "invalid command queue address\n");
+		err = -ENOMEM;
+		goto err_free_page;
+	}
+
+	iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h);
+	iowrite32be(cmd_l, &dev->iseg->cmdq_addr_l_sz);
+
+	/* Make sure firmware sees the complete address before we proceed */
+	wmb();
+
+	mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma));
+
+	cmd->mode = CMD_MODE_POLLING;
+
+	create_msg_cache(dev);
+
+	set_wqname(dev);
+	cmd->wq = create_singlethread_workqueue(cmd->wq_name);
+	if (!cmd->wq) {
+		dev_err(&dev->pdev->dev, "failed to create command workqueue\n");
+		err = -ENOMEM;
+		goto err_cache;
+	}
+
+	err = create_debugfs_files(dev);
+	if (err) {
+		err = -ENOMEM;
+		goto err_wq;
+	}
+
+	return 0;
+
+err_wq:
+	destroy_workqueue(cmd->wq);
+
+err_cache:
+	destroy_msg_cache(dev);
+
+err_free_page:
+	free_cmd_page(dev, cmd);
+
+err_free_pool:
+	pci_pool_destroy(cmd->pool);
+
+	return err;
+}
+EXPORT_SYMBOL(mlx5_cmd_init);
+
+void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
+{
+	struct mlx5_cmd *cmd = &dev->cmd;
+
+	clean_debug_files(dev);
+	destroy_workqueue(cmd->wq);
+	destroy_msg_cache(dev);
+	free_cmd_page(dev, cmd);
+	pci_pool_destroy(cmd->pool);
+}
+EXPORT_SYMBOL(mlx5_cmd_cleanup);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
deleted file mode 100644
index ddb4ca4..0000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ /dev/null
@@ -1,32 +0,0 @@
-#
-# Mellanox driver configuration
-#
-
-config MLX5_CORE
-	tristate "Mellanox Technologies ConnectX-4 and Connect-IB core driver"
-	depends on MAY_USE_DEVLINK
-	depends on PCI
-	default n
-	---help---
-	  Core driver for low level functionality of the ConnectX-4 and
-	  Connect-IB cards by Mellanox Technologies.
-
-config MLX5_CORE_EN
-	bool "Mellanox Technologies ConnectX-4 Ethernet support"
-	depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE
-	imply PTP_1588_CLOCK
-	default n
-	---help---
-	  Ethernet support in Mellanox Technologies ConnectX-4 NIC.
-
-config MLX5_CORE_EN_DCB
-	bool "Data Center Bridging (DCB) Support"
-	default y
-	depends on MLX5_CORE_EN && DCB
-	---help---
-	  Say Y here if you want to use Data Center Bridging (DCB) in the
-	  driver.
-	  If set to N, will not be able to configure QoS and ratelimit attributes.
-	  This flag is depended on the kernel's DCB support.
-
-	  If unsure, set to Y
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
deleted file mode 100644
index 9f43beb..0000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ /dev/null
@@ -1,13 +0,0 @@
-obj-$(CONFIG_MLX5_CORE)		+= mlx5_core.o
-
-mlx5_core-y :=	main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
-		health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \
-		mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o \
-		fs_counters.o rl.o lag.o dev.o
-
-mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o eswitch_offloads.o \
-		en_main.o en_common.o en_fs.o en_ethtool.o en_tx.o \
-		en_rx.o en_rx_am.o en_txrx.o en_clock.o vxlan.o \
-		en_tc.o en_arfs.o en_rep.o en_fs_ethtool.o en_selftest.o
-
-mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) +=  en_dcbnl.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
deleted file mode 100644
index 66bd213..0000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
+++ /dev/null
@@ -1,298 +0,0 @@
-/*
- * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
-#include <linux/export.h>
-#include <linux/bitmap.h>
-#include <linux/dma-mapping.h>
-#include <linux/vmalloc.h>
-#include <linux/mlx5/driver.h>
-
-#include "mlx5_core.h"
-
-struct mlx5_db_pgdir {
-	struct list_head	list;
-	unsigned long	       *bitmap;
-	__be32		       *db_page;
-	dma_addr_t		db_dma;
-};
-
-/* Handling for queue buffers -- we allocate a bunch of memory and
- * register it in a memory region at HCA virtual address 0.
- */
-
-static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev,
-					   size_t size, dma_addr_t *dma_handle,
-					   int node)
-{
-	struct mlx5_priv *priv = &dev->priv;
-	int original_node;
-	void *cpu_handle;
-
-	mutex_lock(&priv->alloc_mutex);
-	original_node = dev_to_node(&dev->pdev->dev);
-	set_dev_node(&dev->pdev->dev, node);
-	cpu_handle = dma_zalloc_coherent(&dev->pdev->dev, size,
-					 dma_handle, GFP_KERNEL);
-	set_dev_node(&dev->pdev->dev, original_node);
-	mutex_unlock(&priv->alloc_mutex);
-	return cpu_handle;
-}
-
-int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
-			struct mlx5_buf *buf, int node)
-{
-	dma_addr_t t;
-
-	buf->size = size;
-	buf->npages       = 1;
-	buf->page_shift   = (u8)get_order(size) + PAGE_SHIFT;
-	buf->direct.buf   = mlx5_dma_zalloc_coherent_node(dev, size,
-							  &t, node);
-	if (!buf->direct.buf)
-		return -ENOMEM;
-
-	buf->direct.map = t;
-
-	while (t & ((1 << buf->page_shift) - 1)) {
-		--buf->page_shift;
-		buf->npages *= 2;
-	}
-
-	return 0;
-}
-
-int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf)
-{
-	return mlx5_buf_alloc_node(dev, size, buf, dev->priv.numa_node);
-}
-EXPORT_SYMBOL_GPL(mlx5_buf_alloc);
-
-void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf)
-{
-	dma_free_coherent(&dev->pdev->dev, buf->size, buf->direct.buf,
-			  buf->direct.map);
-}
-EXPORT_SYMBOL_GPL(mlx5_buf_free);
-
-int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
-			     struct mlx5_frag_buf *buf, int node)
-{
-	int i;
-
-	buf->size = size;
-	buf->npages = 1 << get_order(size);
-	buf->page_shift = PAGE_SHIFT;
-	buf->frags = kcalloc(buf->npages, sizeof(struct mlx5_buf_list),
-			     GFP_KERNEL);
-	if (!buf->frags)
-		goto err_out;
-
-	for (i = 0; i < buf->npages; i++) {
-		struct mlx5_buf_list *frag = &buf->frags[i];
-		int frag_sz = min_t(int, size, PAGE_SIZE);
-
-		frag->buf = mlx5_dma_zalloc_coherent_node(dev, frag_sz,
-							  &frag->map, node);
-		if (!frag->buf)
-			goto err_free_buf;
-		if (frag->map & ((1 << buf->page_shift) - 1)) {
-			dma_free_coherent(&dev->pdev->dev, frag_sz,
-					  buf->frags[i].buf, buf->frags[i].map);
-			mlx5_core_warn(dev, "unexpected map alignment: %pad, page_shift=%d\n",
-				       &frag->map, buf->page_shift);
-			goto err_free_buf;
-		}
-		size -= frag_sz;
-	}
-
-	return 0;
-
-err_free_buf:
-	while (i--)
-		dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, buf->frags[i].buf,
-				  buf->frags[i].map);
-	kfree(buf->frags);
-err_out:
-	return -ENOMEM;
-}
-
-void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf)
-{
-	int size = buf->size;
-	int i;
-
-	for (i = 0; i < buf->npages; i++) {
-		int frag_sz = min_t(int, size, PAGE_SIZE);
-
-		dma_free_coherent(&dev->pdev->dev, frag_sz, buf->frags[i].buf,
-				  buf->frags[i].map);
-		size -= frag_sz;
-	}
-	kfree(buf->frags);
-}
-
-static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev,
-						 int node)
-{
-	u32 db_per_page = PAGE_SIZE / cache_line_size();
-	struct mlx5_db_pgdir *pgdir;
-
-	pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL);
-	if (!pgdir)
-		return NULL;
-
-	pgdir->bitmap = kcalloc(BITS_TO_LONGS(db_per_page),
-				sizeof(unsigned long),
-				GFP_KERNEL);
-
-	if (!pgdir->bitmap) {
-		kfree(pgdir);
-		return NULL;
-	}
-
-	bitmap_fill(pgdir->bitmap, db_per_page);
-
-	pgdir->db_page = mlx5_dma_zalloc_coherent_node(dev, PAGE_SIZE,
-						       &pgdir->db_dma, node);
-	if (!pgdir->db_page) {
-		kfree(pgdir->bitmap);
-		kfree(pgdir);
-		return NULL;
-	}
-
-	return pgdir;
-}
-
-static int mlx5_alloc_db_from_pgdir(struct mlx5_db_pgdir *pgdir,
-				    struct mlx5_db *db)
-{
-	u32 db_per_page = PAGE_SIZE / cache_line_size();
-	int offset;
-	int i;
-
-	i = find_first_bit(pgdir->bitmap, db_per_page);
-	if (i >= db_per_page)
-		return -ENOMEM;
-
-	__clear_bit(i, pgdir->bitmap);
-
-	db->u.pgdir = pgdir;
-	db->index   = i;
-	offset = db->index * cache_line_size();
-	db->db      = pgdir->db_page + offset / sizeof(*pgdir->db_page);
-	db->dma     = pgdir->db_dma  + offset;
-
-	db->db[0] = 0;
-	db->db[1] = 0;
-
-	return 0;
-}
-
-int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db, int node)
-{
-	struct mlx5_db_pgdir *pgdir;
-	int ret = 0;
-
-	mutex_lock(&dev->priv.pgdir_mutex);
-
-	list_for_each_entry(pgdir, &dev->priv.pgdir_list, list)
-		if (!mlx5_alloc_db_from_pgdir(pgdir, db))
-			goto out;
-
-	pgdir = mlx5_alloc_db_pgdir(dev, node);
-	if (!pgdir) {
-		ret = -ENOMEM;
-		goto out;
-	}
-
-	list_add(&pgdir->list, &dev->priv.pgdir_list);
-
-	/* This should never fail -- we just allocated an empty page: */
-	WARN_ON(mlx5_alloc_db_from_pgdir(pgdir, db));
-
-out:
-	mutex_unlock(&dev->priv.pgdir_mutex);
-
-	return ret;
-}
-EXPORT_SYMBOL_GPL(mlx5_db_alloc_node);
-
-int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db)
-{
-	return mlx5_db_alloc_node(dev, db, dev->priv.numa_node);
-}
-EXPORT_SYMBOL_GPL(mlx5_db_alloc);
-
-void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
-{
-	u32 db_per_page = PAGE_SIZE / cache_line_size();
-	mutex_lock(&dev->priv.pgdir_mutex);
-
-	__set_bit(db->index, db->u.pgdir->bitmap);
-
-	if (bitmap_full(db->u.pgdir->bitmap, db_per_page)) {
-		dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
-				  db->u.pgdir->db_page, db->u.pgdir->db_dma);
-		list_del(&db->u.pgdir->list);
-		kfree(db->u.pgdir->bitmap);
-		kfree(db->u.pgdir);
-	}
-
-	mutex_unlock(&dev->priv.pgdir_mutex);
-}
-EXPORT_SYMBOL_GPL(mlx5_db_free);
-
-
-void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas)
-{
-	u64 addr;
-	int i;
-
-	for (i = 0; i < buf->npages; i++) {
-		addr = buf->direct.map + (i << buf->page_shift);
-
-		pas[i] = cpu_to_be64(addr);
-	}
-}
-EXPORT_SYMBOL_GPL(mlx5_fill_page_array);
-
-void mlx5_fill_page_frag_array(struct mlx5_frag_buf *buf, __be64 *pas)
-{
-	int i;
-
-	for (i = 0; i < buf->npages; i++)
-		pas[i] = cpu_to_be64(buf->frags[i].map);
-}
-EXPORT_SYMBOL_GPL(mlx5_fill_page_frag_array);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
deleted file mode 100644
index 3797cc7..0000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ /dev/null
@@ -1,1805 +0,0 @@
-/*
- * Copyright (c) 2013-2016, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/highmem.h>
-#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/random.h>
-#include <linux/io-mapping.h>
-#include <linux/mlx5/driver.h>
-#include <linux/debugfs.h>
-
-#include "mlx5_core.h"
-
-enum {
-	CMD_IF_REV = 5,
-};
-
-enum {
-	CMD_MODE_POLLING,
-	CMD_MODE_EVENTS
-};
-
-enum {
-	MLX5_CMD_DELIVERY_STAT_OK			= 0x0,
-	MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR		= 0x1,
-	MLX5_CMD_DELIVERY_STAT_TOK_ERR			= 0x2,
-	MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR		= 0x3,
-	MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR	= 0x4,
-	MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR		= 0x5,
-	MLX5_CMD_DELIVERY_STAT_FW_ERR			= 0x6,
-	MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR		= 0x7,
-	MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR		= 0x8,
-	MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR	= 0x9,
-	MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR		= 0x10,
-};
-
-static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd,
-					   struct mlx5_cmd_msg *in,
-					   struct mlx5_cmd_msg *out,
-					   void *uout, int uout_size,
-					   mlx5_cmd_cbk_t cbk,
-					   void *context, int page_queue)
-{
-	gfp_t alloc_flags = cbk ? GFP_ATOMIC : GFP_KERNEL;
-	struct mlx5_cmd_work_ent *ent;
-
-	ent = kzalloc(sizeof(*ent), alloc_flags);
-	if (!ent)
-		return ERR_PTR(-ENOMEM);
-
-	ent->in		= in;
-	ent->out	= out;
-	ent->uout	= uout;
-	ent->uout_size	= uout_size;
-	ent->callback	= cbk;
-	ent->context	= context;
-	ent->cmd	= cmd;
-	ent->page_queue = page_queue;
-
-	return ent;
-}
-
-static u8 alloc_token(struct mlx5_cmd *cmd)
-{
-	u8 token;
-
-	spin_lock(&cmd->token_lock);
-	cmd->token++;
-	if (cmd->token == 0)
-		cmd->token++;
-	token = cmd->token;
-	spin_unlock(&cmd->token_lock);
-
-	return token;
-}
-
-static int alloc_ent(struct mlx5_cmd *cmd)
-{
-	unsigned long flags;
-	int ret;
-
-	spin_lock_irqsave(&cmd->alloc_lock, flags);
-	ret = find_first_bit(&cmd->bitmask, cmd->max_reg_cmds);
-	if (ret < cmd->max_reg_cmds)
-		clear_bit(ret, &cmd->bitmask);
-	spin_unlock_irqrestore(&cmd->alloc_lock, flags);
-
-	return ret < cmd->max_reg_cmds ? ret : -ENOMEM;
-}
-
-static void free_ent(struct mlx5_cmd *cmd, int idx)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&cmd->alloc_lock, flags);
-	set_bit(idx, &cmd->bitmask);
-	spin_unlock_irqrestore(&cmd->alloc_lock, flags);
-}
-
-static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx)
-{
-	return cmd->cmd_buf + (idx << cmd->log_stride);
-}
-
-static u8 xor8_buf(void *buf, size_t offset, int len)
-{
-	u8 *ptr = buf;
-	u8 sum = 0;
-	int i;
-	int end = len + offset;
-
-	for (i = offset; i < end; i++)
-		sum ^= ptr[i];
-
-	return sum;
-}
-
-static int verify_block_sig(struct mlx5_cmd_prot_block *block)
-{
-	size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0);
-	int xor_len = sizeof(*block) - sizeof(block->data) - 1;
-
-	if (xor8_buf(block, rsvd0_off, xor_len) != 0xff)
-		return -EINVAL;
-
-	if (xor8_buf(block, 0, sizeof(*block)) != 0xff)
-		return -EINVAL;
-
-	return 0;
-}
-
-static void calc_block_sig(struct mlx5_cmd_prot_block *block)
-{
-	int ctrl_xor_len = sizeof(*block) - sizeof(block->data) - 2;
-	size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0);
-
-	block->ctrl_sig = ~xor8_buf(block, rsvd0_off, ctrl_xor_len);
-	block->sig = ~xor8_buf(block, 0, sizeof(*block) - 1);
-}
-
-static void calc_chain_sig(struct mlx5_cmd_msg *msg)
-{
-	struct mlx5_cmd_mailbox *next = msg->next;
-	int size = msg->len;
-	int blen = size - min_t(int, sizeof(msg->first.data), size);
-	int n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1)
-		/ MLX5_CMD_DATA_BLOCK_SIZE;
-	int i = 0;
-
-	for (i = 0; i < n && next; i++)  {
-		calc_block_sig(next->buf);
-		next = next->next;
-	}
-}
-
-static void set_signature(struct mlx5_cmd_work_ent *ent, int csum)
-{
-	ent->lay->sig = ~xor8_buf(ent->lay, 0,  sizeof(*ent->lay));
-	if (csum) {
-		calc_chain_sig(ent->in);
-		calc_chain_sig(ent->out);
-	}
-}
-
-static void poll_timeout(struct mlx5_cmd_work_ent *ent)
-{
-	unsigned long poll_end = jiffies + msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC + 1000);
-	u8 own;
-
-	do {
-		own = ent->lay->status_own;
-		if (!(own & CMD_OWNER_HW)) {
-			ent->ret = 0;
-			return;
-		}
-		usleep_range(5000, 10000);
-	} while (time_before(jiffies, poll_end));
-
-	ent->ret = -ETIMEDOUT;
-}
-
-static void free_cmd(struct mlx5_cmd_work_ent *ent)
-{
-	kfree(ent);
-}
-
-
-static int verify_signature(struct mlx5_cmd_work_ent *ent)
-{
-	struct mlx5_cmd_mailbox *next = ent->out->next;
-	int err;
-	u8 sig;
-	int size = ent->out->len;
-	int blen = size - min_t(int, sizeof(ent->out->first.data), size);
-	int n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1)
-		/ MLX5_CMD_DATA_BLOCK_SIZE;
-	int i = 0;
-
-	sig = xor8_buf(ent->lay, 0, sizeof(*ent->lay));
-	if (sig != 0xff)
-		return -EINVAL;
-
-	for (i = 0; i < n && next; i++) {
-		err = verify_block_sig(next->buf);
-		if (err)
-			return err;
-
-		next = next->next;
-	}
-
-	return 0;
-}
-
-static void dump_buf(void *buf, int size, int data_only, int offset)
-{
-	__be32 *p = buf;
-	int i;
-
-	for (i = 0; i < size; i += 16) {
-		pr_debug("%03x: %08x %08x %08x %08x\n", offset, be32_to_cpu(p[0]),
-			 be32_to_cpu(p[1]), be32_to_cpu(p[2]),
-			 be32_to_cpu(p[3]));
-		p += 4;
-		offset += 16;
-	}
-	if (!data_only)
-		pr_debug("\n");
-}
-
-static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
-				       u32 *synd, u8 *status)
-{
-	*synd = 0;
-	*status = 0;
-
-	switch (op) {
-	case MLX5_CMD_OP_TEARDOWN_HCA:
-	case MLX5_CMD_OP_DISABLE_HCA:
-	case MLX5_CMD_OP_MANAGE_PAGES:
-	case MLX5_CMD_OP_DESTROY_MKEY:
-	case MLX5_CMD_OP_DESTROY_EQ:
-	case MLX5_CMD_OP_DESTROY_CQ:
-	case MLX5_CMD_OP_DESTROY_QP:
-	case MLX5_CMD_OP_DESTROY_PSV:
-	case MLX5_CMD_OP_DESTROY_SRQ:
-	case MLX5_CMD_OP_DESTROY_XRC_SRQ:
-	case MLX5_CMD_OP_DESTROY_DCT:
-	case MLX5_CMD_OP_DEALLOC_Q_COUNTER:
-	case MLX5_CMD_OP_DEALLOC_PD:
-	case MLX5_CMD_OP_DEALLOC_UAR:
-	case MLX5_CMD_OP_DETACH_FROM_MCG:
-	case MLX5_CMD_OP_DEALLOC_XRCD:
-	case MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN:
-	case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT:
-	case MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY:
-	case MLX5_CMD_OP_DESTROY_LAG:
-	case MLX5_CMD_OP_DESTROY_VPORT_LAG:
-	case MLX5_CMD_OP_DESTROY_TIR:
-	case MLX5_CMD_OP_DESTROY_SQ:
-	case MLX5_CMD_OP_DESTROY_RQ:
-	case MLX5_CMD_OP_DESTROY_RMP:
-	case MLX5_CMD_OP_DESTROY_TIS:
-	case MLX5_CMD_OP_DESTROY_RQT:
-	case MLX5_CMD_OP_DESTROY_FLOW_TABLE:
-	case MLX5_CMD_OP_DESTROY_FLOW_GROUP:
-	case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY:
-	case MLX5_CMD_OP_DEALLOC_FLOW_COUNTER:
-	case MLX5_CMD_OP_2ERR_QP:
-	case MLX5_CMD_OP_2RST_QP:
-	case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT:
-	case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
-	case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
-	case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT:
-	case MLX5_CMD_OP_DEALLOC_ENCAP_HEADER:
-	case MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT:
-	case MLX5_CMD_OP_DESTROY_QOS_PARA_VPORT:
-		return MLX5_CMD_STAT_OK;
-
-	case MLX5_CMD_OP_QUERY_HCA_CAP:
-	case MLX5_CMD_OP_QUERY_ADAPTER:
-	case MLX5_CMD_OP_INIT_HCA:
-	case MLX5_CMD_OP_ENABLE_HCA:
-	case MLX5_CMD_OP_QUERY_PAGES:
-	case MLX5_CMD_OP_SET_HCA_CAP:
-	case MLX5_CMD_OP_QUERY_ISSI:
-	case MLX5_CMD_OP_SET_ISSI:
-	case MLX5_CMD_OP_CREATE_MKEY:
-	case MLX5_CMD_OP_QUERY_MKEY:
-	case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS:
-	case MLX5_CMD_OP_PAGE_FAULT_RESUME:
-	case MLX5_CMD_OP_CREATE_EQ:
-	case MLX5_CMD_OP_QUERY_EQ:
-	case MLX5_CMD_OP_GEN_EQE:
-	case MLX5_CMD_OP_CREATE_CQ:
-	case MLX5_CMD_OP_QUERY_CQ:
-	case MLX5_CMD_OP_MODIFY_CQ:
-	case MLX5_CMD_OP_CREATE_QP:
-	case MLX5_CMD_OP_RST2INIT_QP:
-	case MLX5_CMD_OP_INIT2RTR_QP:
-	case MLX5_CMD_OP_RTR2RTS_QP:
-	case MLX5_CMD_OP_RTS2RTS_QP:
-	case MLX5_CMD_OP_SQERR2RTS_QP:
-	case MLX5_CMD_OP_QUERY_QP:
-	case MLX5_CMD_OP_SQD_RTS_QP:
-	case MLX5_CMD_OP_INIT2INIT_QP:
-	case MLX5_CMD_OP_CREATE_PSV:
-	case MLX5_CMD_OP_CREATE_SRQ:
-	case MLX5_CMD_OP_QUERY_SRQ:
-	case MLX5_CMD_OP_ARM_RQ:
-	case MLX5_CMD_OP_CREATE_XRC_SRQ:
-	case MLX5_CMD_OP_QUERY_XRC_SRQ:
-	case MLX5_CMD_OP_ARM_XRC_SRQ:
-	case MLX5_CMD_OP_CREATE_DCT:
-	case MLX5_CMD_OP_DRAIN_DCT:
-	case MLX5_CMD_OP_QUERY_DCT:
-	case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
-	case MLX5_CMD_OP_QUERY_VPORT_STATE:
-	case MLX5_CMD_OP_MODIFY_VPORT_STATE:
-	case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
-	case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT:
-	case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
-	case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
-	case MLX5_CMD_OP_SET_ROCE_ADDRESS:
-	case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
-	case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT:
-	case MLX5_CMD_OP_QUERY_HCA_VPORT_GID:
-	case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY:
-	case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
-	case MLX5_CMD_OP_ALLOC_Q_COUNTER:
-	case MLX5_CMD_OP_QUERY_Q_COUNTER:
-	case MLX5_CMD_OP_ALLOC_PD:
-	case MLX5_CMD_OP_ALLOC_UAR:
-	case MLX5_CMD_OP_CONFIG_INT_MODERATION:
-	case MLX5_CMD_OP_ACCESS_REG:
-	case MLX5_CMD_OP_ATTACH_TO_MCG:
-	case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG:
-	case MLX5_CMD_OP_MAD_IFC:
-	case MLX5_CMD_OP_QUERY_MAD_DEMUX:
-	case MLX5_CMD_OP_SET_MAD_DEMUX:
-	case MLX5_CMD_OP_NOP:
-	case MLX5_CMD_OP_ALLOC_XRCD:
-	case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
-	case MLX5_CMD_OP_QUERY_CONG_STATUS:
-	case MLX5_CMD_OP_MODIFY_CONG_STATUS:
-	case MLX5_CMD_OP_QUERY_CONG_PARAMS:
-	case MLX5_CMD_OP_MODIFY_CONG_PARAMS:
-	case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
-	case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
-	case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
-	case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
-	case MLX5_CMD_OP_CREATE_LAG:
-	case MLX5_CMD_OP_MODIFY_LAG:
-	case MLX5_CMD_OP_QUERY_LAG:
-	case MLX5_CMD_OP_CREATE_VPORT_LAG:
-	case MLX5_CMD_OP_CREATE_TIR:
-	case MLX5_CMD_OP_MODIFY_TIR:
-	case MLX5_CMD_OP_QUERY_TIR:
-	case MLX5_CMD_OP_CREATE_SQ:
-	case MLX5_CMD_OP_MODIFY_SQ:
-	case MLX5_CMD_OP_QUERY_SQ:
-	case MLX5_CMD_OP_CREATE_RQ:
-	case MLX5_CMD_OP_MODIFY_RQ:
-	case MLX5_CMD_OP_QUERY_RQ:
-	case MLX5_CMD_OP_CREATE_RMP:
-	case MLX5_CMD_OP_MODIFY_RMP:
-	case MLX5_CMD_OP_QUERY_RMP:
-	case MLX5_CMD_OP_CREATE_TIS:
-	case MLX5_CMD_OP_MODIFY_TIS:
-	case MLX5_CMD_OP_QUERY_TIS:
-	case MLX5_CMD_OP_CREATE_RQT:
-	case MLX5_CMD_OP_MODIFY_RQT:
-	case MLX5_CMD_OP_QUERY_RQT:
-
-	case MLX5_CMD_OP_CREATE_FLOW_TABLE:
-	case MLX5_CMD_OP_QUERY_FLOW_TABLE:
-	case MLX5_CMD_OP_CREATE_FLOW_GROUP:
-	case MLX5_CMD_OP_QUERY_FLOW_GROUP:
-	case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
-	case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
-	case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
-	case MLX5_CMD_OP_ALLOC_ENCAP_HEADER:
-	case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
-	case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
-	case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
-	case MLX5_CMD_OP_CREATE_QOS_PARA_VPORT:
-		*status = MLX5_DRIVER_STATUS_ABORTED;
-		*synd = MLX5_DRIVER_SYND;
-		return -EIO;
-	default:
-		mlx5_core_err(dev, "Unknown FW command (%d)\n", op);
-		return -EINVAL;
-	}
-}
-
-const char *mlx5_command_str(int command)
-{
-#define MLX5_COMMAND_STR_CASE(__cmd) case MLX5_CMD_OP_ ## __cmd: return #__cmd
-
-	switch (command) {
-	MLX5_COMMAND_STR_CASE(QUERY_HCA_CAP);
-	MLX5_COMMAND_STR_CASE(QUERY_ADAPTER);
-	MLX5_COMMAND_STR_CASE(INIT_HCA);
-	MLX5_COMMAND_STR_CASE(TEARDOWN_HCA);
-	MLX5_COMMAND_STR_CASE(ENABLE_HCA);
-	MLX5_COMMAND_STR_CASE(DISABLE_HCA);
-	MLX5_COMMAND_STR_CASE(QUERY_PAGES);
-	MLX5_COMMAND_STR_CASE(MANAGE_PAGES);
-	MLX5_COMMAND_STR_CASE(SET_HCA_CAP);
-	MLX5_COMMAND_STR_CASE(QUERY_ISSI);
-	MLX5_COMMAND_STR_CASE(SET_ISSI);
-	MLX5_COMMAND_STR_CASE(CREATE_MKEY);
-	MLX5_COMMAND_STR_CASE(QUERY_MKEY);
-	MLX5_COMMAND_STR_CASE(DESTROY_MKEY);
-	MLX5_COMMAND_STR_CASE(QUERY_SPECIAL_CONTEXTS);
-	MLX5_COMMAND_STR_CASE(PAGE_FAULT_RESUME);
-	MLX5_COMMAND_STR_CASE(CREATE_EQ);
-	MLX5_COMMAND_STR_CASE(DESTROY_EQ);
-	MLX5_COMMAND_STR_CASE(QUERY_EQ);
-	MLX5_COMMAND_STR_CASE(GEN_EQE);
-	MLX5_COMMAND_STR_CASE(CREATE_CQ);
-	MLX5_COMMAND_STR_CASE(DESTROY_CQ);
-	MLX5_COMMAND_STR_CASE(QUERY_CQ);
-	MLX5_COMMAND_STR_CASE(MODIFY_CQ);
-	MLX5_COMMAND_STR_CASE(CREATE_QP);
-	MLX5_COMMAND_STR_CASE(DESTROY_QP);
-	MLX5_COMMAND_STR_CASE(RST2INIT_QP);
-	MLX5_COMMAND_STR_CASE(INIT2RTR_QP);
-	MLX5_COMMAND_STR_CASE(RTR2RTS_QP);
-	MLX5_COMMAND_STR_CASE(RTS2RTS_QP);
-	MLX5_COMMAND_STR_CASE(SQERR2RTS_QP);
-	MLX5_COMMAND_STR_CASE(2ERR_QP);
-	MLX5_COMMAND_STR_CASE(2RST_QP);
-	MLX5_COMMAND_STR_CASE(QUERY_QP);
-	MLX5_COMMAND_STR_CASE(SQD_RTS_QP);
-	MLX5_COMMAND_STR_CASE(INIT2INIT_QP);
-	MLX5_COMMAND_STR_CASE(CREATE_PSV);
-	MLX5_COMMAND_STR_CASE(DESTROY_PSV);
-	MLX5_COMMAND_STR_CASE(CREATE_SRQ);
-	MLX5_COMMAND_STR_CASE(DESTROY_SRQ);
-	MLX5_COMMAND_STR_CASE(QUERY_SRQ);
-	MLX5_COMMAND_STR_CASE(ARM_RQ);
-	MLX5_COMMAND_STR_CASE(CREATE_XRC_SRQ);
-	MLX5_COMMAND_STR_CASE(DESTROY_XRC_SRQ);
-	MLX5_COMMAND_STR_CASE(QUERY_XRC_SRQ);
-	MLX5_COMMAND_STR_CASE(ARM_XRC_SRQ);
-	MLX5_COMMAND_STR_CASE(CREATE_DCT);
-	MLX5_COMMAND_STR_CASE(DESTROY_DCT);
-	MLX5_COMMAND_STR_CASE(DRAIN_DCT);
-	MLX5_COMMAND_STR_CASE(QUERY_DCT);
-	MLX5_COMMAND_STR_CASE(ARM_DCT_FOR_KEY_VIOLATION);
-	MLX5_COMMAND_STR_CASE(QUERY_VPORT_STATE);
-	MLX5_COMMAND_STR_CASE(MODIFY_VPORT_STATE);
-	MLX5_COMMAND_STR_CASE(QUERY_ESW_VPORT_CONTEXT);
-	MLX5_COMMAND_STR_CASE(MODIFY_ESW_VPORT_CONTEXT);
-	MLX5_COMMAND_STR_CASE(QUERY_NIC_VPORT_CONTEXT);
-	MLX5_COMMAND_STR_CASE(MODIFY_NIC_VPORT_CONTEXT);
-	MLX5_COMMAND_STR_CASE(QUERY_ROCE_ADDRESS);
-	MLX5_COMMAND_STR_CASE(SET_ROCE_ADDRESS);
-	MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_CONTEXT);
-	MLX5_COMMAND_STR_CASE(MODIFY_HCA_VPORT_CONTEXT);
-	MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_GID);
-	MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_PKEY);
-	MLX5_COMMAND_STR_CASE(QUERY_VPORT_COUNTER);
-	MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER);
-	MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER);
-	MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER);
-	MLX5_COMMAND_STR_CASE(ALLOC_PD);
-	MLX5_COMMAND_STR_CASE(DEALLOC_PD);
-	MLX5_COMMAND_STR_CASE(ALLOC_UAR);
-	MLX5_COMMAND_STR_CASE(DEALLOC_UAR);
-	MLX5_COMMAND_STR_CASE(CONFIG_INT_MODERATION);
-	MLX5_COMMAND_STR_CASE(ACCESS_REG);
-	MLX5_COMMAND_STR_CASE(ATTACH_TO_MCG);
-	MLX5_COMMAND_STR_CASE(DETACH_FROM_MCG);
-	MLX5_COMMAND_STR_CASE(GET_DROPPED_PACKET_LOG);
-	MLX5_COMMAND_STR_CASE(MAD_IFC);
-	MLX5_COMMAND_STR_CASE(QUERY_MAD_DEMUX);
-	MLX5_COMMAND_STR_CASE(SET_MAD_DEMUX);
-	MLX5_COMMAND_STR_CASE(NOP);
-	MLX5_COMMAND_STR_CASE(ALLOC_XRCD);
-	MLX5_COMMAND_STR_CASE(DEALLOC_XRCD);
-	MLX5_COMMAND_STR_CASE(ALLOC_TRANSPORT_DOMAIN);
-	MLX5_COMMAND_STR_CASE(DEALLOC_TRANSPORT_DOMAIN);
-	MLX5_COMMAND_STR_CASE(QUERY_CONG_STATUS);
-	MLX5_COMMAND_STR_CASE(MODIFY_CONG_STATUS);
-	MLX5_COMMAND_STR_CASE(QUERY_CONG_PARAMS);
-	MLX5_COMMAND_STR_CASE(MODIFY_CONG_PARAMS);
-	MLX5_COMMAND_STR_CASE(QUERY_CONG_STATISTICS);
-	MLX5_COMMAND_STR_CASE(ADD_VXLAN_UDP_DPORT);
-	MLX5_COMMAND_STR_CASE(DELETE_VXLAN_UDP_DPORT);
-	MLX5_COMMAND_STR_CASE(SET_L2_TABLE_ENTRY);
-	MLX5_COMMAND_STR_CASE(QUERY_L2_TABLE_ENTRY);
-	MLX5_COMMAND_STR_CASE(DELETE_L2_TABLE_ENTRY);
-	MLX5_COMMAND_STR_CASE(SET_WOL_ROL);
-	MLX5_COMMAND_STR_CASE(QUERY_WOL_ROL);
-	MLX5_COMMAND_STR_CASE(CREATE_LAG);
-	MLX5_COMMAND_STR_CASE(MODIFY_LAG);
-	MLX5_COMMAND_STR_CASE(QUERY_LAG);
-	MLX5_COMMAND_STR_CASE(DESTROY_LAG);
-	MLX5_COMMAND_STR_CASE(CREATE_VPORT_LAG);
-	MLX5_COMMAND_STR_CASE(DESTROY_VPORT_LAG);
-	MLX5_COMMAND_STR_CASE(CREATE_TIR);
-	MLX5_COMMAND_STR_CASE(MODIFY_TIR);
-	MLX5_COMMAND_STR_CASE(DESTROY_TIR);
-	MLX5_COMMAND_STR_CASE(QUERY_TIR);
-	MLX5_COMMAND_STR_CASE(CREATE_SQ);
-	MLX5_COMMAND_STR_CASE(MODIFY_SQ);
-	MLX5_COMMAND_STR_CASE(DESTROY_SQ);
-	MLX5_COMMAND_STR_CASE(QUERY_SQ);
-	MLX5_COMMAND_STR_CASE(CREATE_RQ);
-	MLX5_COMMAND_STR_CASE(MODIFY_RQ);
-	MLX5_COMMAND_STR_CASE(DESTROY_RQ);
-	MLX5_COMMAND_STR_CASE(QUERY_RQ);
-	MLX5_COMMAND_STR_CASE(CREATE_RMP);
-	MLX5_COMMAND_STR_CASE(MODIFY_RMP);
-	MLX5_COMMAND_STR_CASE(DESTROY_RMP);
-	MLX5_COMMAND_STR_CASE(QUERY_RMP);
-	MLX5_COMMAND_STR_CASE(CREATE_TIS);
-	MLX5_COMMAND_STR_CASE(MODIFY_TIS);
-	MLX5_COMMAND_STR_CASE(DESTROY_TIS);
-	MLX5_COMMAND_STR_CASE(QUERY_TIS);
-	MLX5_COMMAND_STR_CASE(CREATE_RQT);
-	MLX5_COMMAND_STR_CASE(MODIFY_RQT);
-	MLX5_COMMAND_STR_CASE(DESTROY_RQT);
-	MLX5_COMMAND_STR_CASE(QUERY_RQT);
-	MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ROOT);
-	MLX5_COMMAND_STR_CASE(CREATE_FLOW_TABLE);
-	MLX5_COMMAND_STR_CASE(DESTROY_FLOW_TABLE);
-	MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE);
-	MLX5_COMMAND_STR_CASE(CREATE_FLOW_GROUP);
-	MLX5_COMMAND_STR_CASE(DESTROY_FLOW_GROUP);
-	MLX5_COMMAND_STR_CASE(QUERY_FLOW_GROUP);
-	MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ENTRY);
-	MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE_ENTRY);
-	MLX5_COMMAND_STR_CASE(DELETE_FLOW_TABLE_ENTRY);
-	MLX5_COMMAND_STR_CASE(ALLOC_FLOW_COUNTER);
-	MLX5_COMMAND_STR_CASE(DEALLOC_FLOW_COUNTER);
-	MLX5_COMMAND_STR_CASE(QUERY_FLOW_COUNTER);
-	MLX5_COMMAND_STR_CASE(MODIFY_FLOW_TABLE);
-	MLX5_COMMAND_STR_CASE(ALLOC_ENCAP_HEADER);
-	MLX5_COMMAND_STR_CASE(DEALLOC_ENCAP_HEADER);
-	MLX5_COMMAND_STR_CASE(CREATE_SCHEDULING_ELEMENT);
-	MLX5_COMMAND_STR_CASE(DESTROY_SCHEDULING_ELEMENT);
-	MLX5_COMMAND_STR_CASE(QUERY_SCHEDULING_ELEMENT);
-	MLX5_COMMAND_STR_CASE(MODIFY_SCHEDULING_ELEMENT);
-	MLX5_COMMAND_STR_CASE(CREATE_QOS_PARA_VPORT);
-	MLX5_COMMAND_STR_CASE(DESTROY_QOS_PARA_VPORT);
-	default: return "unknown command opcode";
-	}
-}
-
-static const char *cmd_status_str(u8 status)
-{
-	switch (status) {
-	case MLX5_CMD_STAT_OK:
-		return "OK";
-	case MLX5_CMD_STAT_INT_ERR:
-		return "internal error";
-	case MLX5_CMD_STAT_BAD_OP_ERR:
-		return "bad operation";
-	case MLX5_CMD_STAT_BAD_PARAM_ERR:
-		return "bad parameter";
-	case MLX5_CMD_STAT_BAD_SYS_STATE_ERR:
-		return "bad system state";
-	case MLX5_CMD_STAT_BAD_RES_ERR:
-		return "bad resource";
-	case MLX5_CMD_STAT_RES_BUSY:
-		return "resource busy";
-	case MLX5_CMD_STAT_LIM_ERR:
-		return "limits exceeded";
-	case MLX5_CMD_STAT_BAD_RES_STATE_ERR:
-		return "bad resource state";
-	case MLX5_CMD_STAT_IX_ERR:
-		return "bad index";
-	case MLX5_CMD_STAT_NO_RES_ERR:
-		return "no resources";
-	case MLX5_CMD_STAT_BAD_INP_LEN_ERR:
-		return "bad input length";
-	case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR:
-		return "bad output length";
-	case MLX5_CMD_STAT_BAD_QP_STATE_ERR:
-		return "bad QP state";
-	case MLX5_CMD_STAT_BAD_PKT_ERR:
-		return "bad packet (discarded)";
-	case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR:
-		return "bad size too many outstanding CQEs";
-	default:
-		return "unknown status";
-	}
-}
-
-static int cmd_status_to_err(u8 status)
-{
-	switch (status) {
-	case MLX5_CMD_STAT_OK:				return 0;
-	case MLX5_CMD_STAT_INT_ERR:			return -EIO;
-	case MLX5_CMD_STAT_BAD_OP_ERR:			return -EINVAL;
-	case MLX5_CMD_STAT_BAD_PARAM_ERR:		return -EINVAL;
-	case MLX5_CMD_STAT_BAD_SYS_STATE_ERR:		return -EIO;
-	case MLX5_CMD_STAT_BAD_RES_ERR:			return -EINVAL;
-	case MLX5_CMD_STAT_RES_BUSY:			return -EBUSY;
-	case MLX5_CMD_STAT_LIM_ERR:			return -ENOMEM;
-	case MLX5_CMD_STAT_BAD_RES_STATE_ERR:		return -EINVAL;
-	case MLX5_CMD_STAT_IX_ERR:			return -EINVAL;
-	case MLX5_CMD_STAT_NO_RES_ERR:			return -EAGAIN;
-	case MLX5_CMD_STAT_BAD_INP_LEN_ERR:		return -EIO;
-	case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR:		return -EIO;
-	case MLX5_CMD_STAT_BAD_QP_STATE_ERR:		return -EINVAL;
-	case MLX5_CMD_STAT_BAD_PKT_ERR:			return -EINVAL;
-	case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR:	return -EINVAL;
-	default:					return -EIO;
-	}
-}
-
-struct mlx5_ifc_mbox_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_mbox_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x40];
-};
-
-void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome)
-{
-	*status = MLX5_GET(mbox_out, out, status);
-	*syndrome = MLX5_GET(mbox_out, out, syndrome);
-}
-
-static int mlx5_cmd_check(struct mlx5_core_dev *dev, void *in, void *out)
-{
-	u32 syndrome;
-	u8  status;
-	u16 opcode;
-	u16 op_mod;
-
-	mlx5_cmd_mbox_status(out, &status, &syndrome);
-	if (!status)
-		return 0;
-
-	opcode = MLX5_GET(mbox_in, in, opcode);
-	op_mod = MLX5_GET(mbox_in, in, op_mod);
-
-	mlx5_core_err(dev,
-		      "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x)\n",
-		      mlx5_command_str(opcode),
-		      opcode, op_mod,
-		      cmd_status_str(status),
-		      status,
-		      syndrome);
-
-	return cmd_status_to_err(status);
-}
-
-static void dump_command(struct mlx5_core_dev *dev,
-			 struct mlx5_cmd_work_ent *ent, int input)
-{
-	struct mlx5_cmd_msg *msg = input ? ent->in : ent->out;
-	u16 op = MLX5_GET(mbox_in, ent->lay->in, opcode);
-	struct mlx5_cmd_mailbox *next = msg->next;
-	int data_only;
-	u32 offset = 0;
-	int dump_len;
-
-	data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA));
-
-	if (data_only)
-		mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_DATA,
-				   "dump command data %s(0x%x) %s\n",
-				   mlx5_command_str(op), op,
-				   input ? "INPUT" : "OUTPUT");
-	else
-		mlx5_core_dbg(dev, "dump command %s(0x%x) %s\n",
-			      mlx5_command_str(op), op,
-			      input ? "INPUT" : "OUTPUT");
-
-	if (data_only) {
-		if (input) {
-			dump_buf(ent->lay->in, sizeof(ent->lay->in), 1, offset);
-			offset += sizeof(ent->lay->in);
-		} else {
-			dump_buf(ent->lay->out, sizeof(ent->lay->out), 1, offset);
-			offset += sizeof(ent->lay->out);
-		}
-	} else {
-		dump_buf(ent->lay, sizeof(*ent->lay), 0, offset);
-		offset += sizeof(*ent->lay);
-	}
-
-	while (next && offset < msg->len) {
-		if (data_only) {
-			dump_len = min_t(int, MLX5_CMD_DATA_BLOCK_SIZE, msg->len - offset);
-			dump_buf(next->buf, dump_len, 1, offset);
-			offset += MLX5_CMD_DATA_BLOCK_SIZE;
-		} else {
-			mlx5_core_dbg(dev, "command block:\n");
-			dump_buf(next->buf, sizeof(struct mlx5_cmd_prot_block), 0, offset);
-			offset += sizeof(struct mlx5_cmd_prot_block);
-		}
-		next = next->next;
-	}
-
-	if (data_only)
-		pr_debug("\n");
-}
-
-static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
-{
-	return MLX5_GET(mbox_in, in->first.data, opcode);
-}
-
-static void cb_timeout_handler(struct work_struct *work)
-{
-	struct delayed_work *dwork = container_of(work, struct delayed_work,
-						  work);
-	struct mlx5_cmd_work_ent *ent = container_of(dwork,
-						     struct mlx5_cmd_work_ent,
-						     cb_timeout_work);
-	struct mlx5_core_dev *dev = container_of(ent->cmd, struct mlx5_core_dev,
-						 cmd);
-
-	ent->ret = -ETIMEDOUT;
-	mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
-		       mlx5_command_str(msg_to_opcode(ent->in)),
-		       msg_to_opcode(ent->in));
-	mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
-}
-
-static void cmd_work_handler(struct work_struct *work)
-{
-	struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
-	struct mlx5_cmd *cmd = ent->cmd;
-	struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd);
-	unsigned long cb_timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
-	struct mlx5_cmd_layout *lay;
-	struct semaphore *sem;
-	unsigned long flags;
-
-	sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
-	down(sem);
-	if (!ent->page_queue) {
-		ent->idx = alloc_ent(cmd);
-		if (ent->idx < 0) {
-			mlx5_core_err(dev, "failed to allocate command entry\n");
-			up(sem);
-			return;
-		}
-	} else {
-		ent->idx = cmd->max_reg_cmds;
-		spin_lock_irqsave(&cmd->alloc_lock, flags);
-		clear_bit(ent->idx, &cmd->bitmask);
-		spin_unlock_irqrestore(&cmd->alloc_lock, flags);
-	}
-
-	cmd->ent_arr[ent->idx] = ent;
-	lay = get_inst(cmd, ent->idx);
-	ent->lay = lay;
-	memset(lay, 0, sizeof(*lay));
-	memcpy(lay->in, ent->in->first.data, sizeof(lay->in));
-	ent->op = be32_to_cpu(lay->in[0]) >> 16;
-	if (ent->in->next)
-		lay->in_ptr = cpu_to_be64(ent->in->next->dma);
-	lay->inlen = cpu_to_be32(ent->in->len);
-	if (ent->out->next)
-		lay->out_ptr = cpu_to_be64(ent->out->next->dma);
-	lay->outlen = cpu_to_be32(ent->out->len);
-	lay->type = MLX5_PCI_CMD_XPORT;
-	lay->token = ent->token;
-	lay->status_own = CMD_OWNER_HW;
-	set_signature(ent, !cmd->checksum_disabled);
-	dump_command(dev, ent, 1);
-	ent->ts1 = ktime_get_ns();
-
-	if (ent->callback)
-		schedule_delayed_work(&ent->cb_timeout_work, cb_timeout);
-
-	/* ring doorbell after the descriptor is valid */
-	mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
-	wmb();
-	iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell);
-	mmiowb();
-	/* if not in polling don't use ent after this point */
-	if (cmd->mode == CMD_MODE_POLLING) {
-		poll_timeout(ent);
-		/* make sure we read the descriptor after ownership is SW */
-		rmb();
-		mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
-	}
-}
-
-static const char *deliv_status_to_str(u8 status)
-{
-	switch (status) {
-	case MLX5_CMD_DELIVERY_STAT_OK:
-		return "no errors";
-	case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR:
-		return "signature error";
-	case MLX5_CMD_DELIVERY_STAT_TOK_ERR:
-		return "token error";
-	case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR:
-		return "bad block number";
-	case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR:
-		return "output pointer not aligned to block size";
-	case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR:
-		return "input pointer not aligned to block size";
-	case MLX5_CMD_DELIVERY_STAT_FW_ERR:
-		return "firmware internal error";
-	case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR:
-		return "command input length error";
-	case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR:
-		return "command ouput length error";
-	case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR:
-		return "reserved fields not cleared";
-	case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR:
-		return "bad command descriptor type";
-	default:
-		return "unknown status code";
-	}
-}
-
-static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
-{
-	unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
-	struct mlx5_cmd *cmd = &dev->cmd;
-	int err;
-
-	if (cmd->mode == CMD_MODE_POLLING) {
-		wait_for_completion(&ent->done);
-	} else if (!wait_for_completion_timeout(&ent->done, timeout)) {
-		ent->ret = -ETIMEDOUT;
-		mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
-	}
-
-	err = ent->ret;
-
-	if (err == -ETIMEDOUT) {
-		mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
-			       mlx5_command_str(msg_to_opcode(ent->in)),
-			       msg_to_opcode(ent->in));
-	}
-	mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n",
-		      err, deliv_status_to_str(ent->status), ent->status);
-
-	return err;
-}
-
-/*  Notes:
- *    1. Callback functions may not sleep
- *    2. page queue commands do not support asynchrous completion
- */
-static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
-			   struct mlx5_cmd_msg *out, void *uout, int uout_size,
-			   mlx5_cmd_cbk_t callback,
-			   void *context, int page_queue, u8 *status,
-			   u8 token)
-{
-	struct mlx5_cmd *cmd = &dev->cmd;
-	struct mlx5_cmd_work_ent *ent;
-	struct mlx5_cmd_stats *stats;
-	int err = 0;
-	s64 ds;
-	u16 op;
-
-	if (callback && page_queue)
-		return -EINVAL;
-
-	ent = alloc_cmd(cmd, in, out, uout, uout_size, callback, context,
-			page_queue);
-	if (IS_ERR(ent))
-		return PTR_ERR(ent);
-
-	ent->token = token;
-
-	if (!callback)
-		init_completion(&ent->done);
-
-	INIT_DELAYED_WORK(&ent->cb_timeout_work, cb_timeout_handler);
-	INIT_WORK(&ent->work, cmd_work_handler);
-	if (page_queue) {
-		cmd_work_handler(&ent->work);
-	} else if (!queue_work(cmd->wq, &ent->work)) {
-		mlx5_core_warn(dev, "failed to queue work\n");
-		err = -ENOMEM;
-		goto out_free;
-	}
-
-	if (callback)
-		goto out;
-
-	err = wait_func(dev, ent);
-	if (err == -ETIMEDOUT)
-		goto out_free;
-
-	ds = ent->ts2 - ent->ts1;
-	op = MLX5_GET(mbox_in, in->first.data, opcode);
-	if (op < ARRAY_SIZE(cmd->stats)) {
-		stats = &cmd->stats[op];
-		spin_lock_irq(&stats->lock);
-		stats->sum += ds;
-		++stats->n;
-		spin_unlock_irq(&stats->lock);
-	}
-	mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
-			   "fw exec time for %s is %lld nsec\n",
-			   mlx5_command_str(op), ds);
-	*status = ent->status;
-
-out_free:
-	free_cmd(ent);
-out:
-	return err;
-}
-
-static ssize_t dbg_write(struct file *filp, const char __user *buf,
-			 size_t count, loff_t *pos)
-{
-	struct mlx5_core_dev *dev = filp->private_data;
-	struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
-	char lbuf[3];
-	int err;
-
-	if (!dbg->in_msg || !dbg->out_msg)
-		return -ENOMEM;
-
-	if (copy_from_user(lbuf, buf, sizeof(lbuf)))
-		return -EFAULT;
-
-	lbuf[sizeof(lbuf) - 1] = 0;
-
-	if (strcmp(lbuf, "go"))
-		return -EINVAL;
-
-	err = mlx5_cmd_exec(dev, dbg->in_msg, dbg->inlen, dbg->out_msg, dbg->outlen);
-
-	return err ? err : count;
-}
-
-
-static const struct file_operations fops = {
-	.owner	= THIS_MODULE,
-	.open	= simple_open,
-	.write	= dbg_write,
-};
-
-static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size,
-			    u8 token)
-{
-	struct mlx5_cmd_prot_block *block;
-	struct mlx5_cmd_mailbox *next;
-	int copy;
-
-	if (!to || !from)
-		return -ENOMEM;
-
-	copy = min_t(int, size, sizeof(to->first.data));
-	memcpy(to->first.data, from, copy);
-	size -= copy;
-	from += copy;
-
-	next = to->next;
-	while (size) {
-		if (!next) {
-			/* this is a BUG */
-			return -ENOMEM;
-		}
-
-		copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE);
-		block = next->buf;
-		memcpy(block->data, from, copy);
-		from += copy;
-		size -= copy;
-		block->token = token;
-		next = next->next;
-	}
-
-	return 0;
-}
-
-static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size)
-{
-	struct mlx5_cmd_prot_block *block;
-	struct mlx5_cmd_mailbox *next;
-	int copy;
-
-	if (!to || !from)
-		return -ENOMEM;
-
-	copy = min_t(int, size, sizeof(from->first.data));
-	memcpy(to, from->first.data, copy);
-	size -= copy;
-	to += copy;
-
-	next = from->next;
-	while (size) {
-		if (!next) {
-			/* this is a BUG */
-			return -ENOMEM;
-		}
-
-		copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE);
-		block = next->buf;
-
-		memcpy(to, block->data, copy);
-		to += copy;
-		size -= copy;
-		next = next->next;
-	}
-
-	return 0;
-}
-
-static struct mlx5_cmd_mailbox *alloc_cmd_box(struct mlx5_core_dev *dev,
-					      gfp_t flags)
-{
-	struct mlx5_cmd_mailbox *mailbox;
-
-	mailbox = kmalloc(sizeof(*mailbox), flags);
-	if (!mailbox)
-		return ERR_PTR(-ENOMEM);
-
-	mailbox->buf = pci_pool_zalloc(dev->cmd.pool, flags,
-				       &mailbox->dma);
-	if (!mailbox->buf) {
-		mlx5_core_dbg(dev, "failed allocation\n");
-		kfree(mailbox);
-		return ERR_PTR(-ENOMEM);
-	}
-	mailbox->next = NULL;
-
-	return mailbox;
-}
-
-static void free_cmd_box(struct mlx5_core_dev *dev,
-			 struct mlx5_cmd_mailbox *mailbox)
-{
-	pci_pool_free(dev->cmd.pool, mailbox->buf, mailbox->dma);
-	kfree(mailbox);
-}
-
-static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev,
-					       gfp_t flags, int size,
-					       u8 token)
-{
-	struct mlx5_cmd_mailbox *tmp, *head = NULL;
-	struct mlx5_cmd_prot_block *block;
-	struct mlx5_cmd_msg *msg;
-	int blen;
-	int err;
-	int n;
-	int i;
-
-	msg = kzalloc(sizeof(*msg), flags);
-	if (!msg)
-		return ERR_PTR(-ENOMEM);
-
-	blen = size - min_t(int, sizeof(msg->first.data), size);
-	n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1) / MLX5_CMD_DATA_BLOCK_SIZE;
-
-	for (i = 0; i < n; i++) {
-		tmp = alloc_cmd_box(dev, flags);
-		if (IS_ERR(tmp)) {
-			mlx5_core_warn(dev, "failed allocating block\n");
-			err = PTR_ERR(tmp);
-			goto err_alloc;
-		}
-
-		block = tmp->buf;
-		tmp->next = head;
-		block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0);
-		block->block_num = cpu_to_be32(n - i - 1);
-		block->token = token;
-		head = tmp;
-	}
-	msg->next = head;
-	msg->len = size;
-	return msg;
-
-err_alloc:
-	while (head) {
-		tmp = head->next;
-		free_cmd_box(dev, head);
-		head = tmp;
-	}
-	kfree(msg);
-
-	return ERR_PTR(err);
-}
-
-static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
-				  struct mlx5_cmd_msg *msg)
-{
-	struct mlx5_cmd_mailbox *head = msg->next;
-	struct mlx5_cmd_mailbox *next;
-
-	while (head) {
-		next = head->next;
-		free_cmd_box(dev, head);
-		head = next;
-	}
-	kfree(msg);
-}
-
-static ssize_t data_write(struct file *filp, const char __user *buf,
-			  size_t count, loff_t *pos)
-{
-	struct mlx5_core_dev *dev = filp->private_data;
-	struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
-	void *ptr;
-
-	if (*pos != 0)
-		return -EINVAL;
-
-	kfree(dbg->in_msg);
-	dbg->in_msg = NULL;
-	dbg->inlen = 0;
-	ptr = memdup_user(buf, count);
-	if (IS_ERR(ptr))
-		return PTR_ERR(ptr);
-	dbg->in_msg = ptr;
-	dbg->inlen = count;
-
-	*pos = count;
-
-	return count;
-}
-
-static ssize_t data_read(struct file *filp, char __user *buf, size_t count,
-			 loff_t *pos)
-{
-	struct mlx5_core_dev *dev = filp->private_data;
-	struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
-	int copy;
-
-	if (*pos)
-		return 0;
-
-	if (!dbg->out_msg)
-		return -ENOMEM;
-
-	copy = min_t(int, count, dbg->outlen);
-	if (copy_to_user(buf, dbg->out_msg, copy))
-		return -EFAULT;
-
-	*pos += copy;
-
-	return copy;
-}
-
-static const struct file_operations dfops = {
-	.owner	= THIS_MODULE,
-	.open	= simple_open,
-	.write	= data_write,
-	.read	= data_read,
-};
-
-static ssize_t outlen_read(struct file *filp, char __user *buf, size_t count,
-			   loff_t *pos)
-{
-	struct mlx5_core_dev *dev = filp->private_data;
-	struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
-	char outlen[8];
-	int err;
-
-	if (*pos)
-		return 0;
-
-	err = snprintf(outlen, sizeof(outlen), "%d", dbg->outlen);
-	if (err < 0)
-		return err;
-
-	if (copy_to_user(buf, &outlen, err))
-		return -EFAULT;
-
-	*pos += err;
-
-	return err;
-}
-
-static ssize_t outlen_write(struct file *filp, const char __user *buf,
-			    size_t count, loff_t *pos)
-{
-	struct mlx5_core_dev *dev = filp->private_data;
-	struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
-	char outlen_str[8];
-	int outlen;
-	void *ptr;
-	int err;
-
-	if (*pos != 0 || count > 6)
-		return -EINVAL;
-
-	kfree(dbg->out_msg);
-	dbg->out_msg = NULL;
-	dbg->outlen = 0;
-
-	if (copy_from_user(outlen_str, buf, count))
-		return -EFAULT;
-
-	outlen_str[7] = 0;
-
-	err = sscanf(outlen_str, "%d", &outlen);
-	if (err < 0)
-		return err;
-
-	ptr = kzalloc(outlen, GFP_KERNEL);
-	if (!ptr)
-		return -ENOMEM;
-
-	dbg->out_msg = ptr;
-	dbg->outlen = outlen;
-
-	*pos = count;
-
-	return count;
-}
-
-static const struct file_operations olfops = {
-	.owner	= THIS_MODULE,
-	.open	= simple_open,
-	.write	= outlen_write,
-	.read	= outlen_read,
-};
-
-static void set_wqname(struct mlx5_core_dev *dev)
-{
-	struct mlx5_cmd *cmd = &dev->cmd;
-
-	snprintf(cmd->wq_name, sizeof(cmd->wq_name), "mlx5_cmd_%s",
-		 dev_name(&dev->pdev->dev));
-}
-
-static void clean_debug_files(struct mlx5_core_dev *dev)
-{
-	struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
-
-	if (!mlx5_debugfs_root)
-		return;
-
-	mlx5_cmdif_debugfs_cleanup(dev);
-	debugfs_remove_recursive(dbg->dbg_root);
-}
-
-static int create_debugfs_files(struct mlx5_core_dev *dev)
-{
-	struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
-	int err = -ENOMEM;
-
-	if (!mlx5_debugfs_root)
-		return 0;
-
-	dbg->dbg_root = debugfs_create_dir("cmd", dev->priv.dbg_root);
-	if (!dbg->dbg_root)
-		return err;
-
-	dbg->dbg_in = debugfs_create_file("in", 0400, dbg->dbg_root,
-					  dev, &dfops);
-	if (!dbg->dbg_in)
-		goto err_dbg;
-
-	dbg->dbg_out = debugfs_create_file("out", 0200, dbg->dbg_root,
-					   dev, &dfops);
-	if (!dbg->dbg_out)
-		goto err_dbg;
-
-	dbg->dbg_outlen = debugfs_create_file("out_len", 0600, dbg->dbg_root,
-					      dev, &olfops);
-	if (!dbg->dbg_outlen)
-		goto err_dbg;
-
-	dbg->dbg_status = debugfs_create_u8("status", 0600, dbg->dbg_root,
-					    &dbg->status);
-	if (!dbg->dbg_status)
-		goto err_dbg;
-
-	dbg->dbg_run = debugfs_create_file("run", 0200, dbg->dbg_root, dev, &fops);
-	if (!dbg->dbg_run)
-		goto err_dbg;
-
-	mlx5_cmdif_debugfs_init(dev);
-
-	return 0;
-
-err_dbg:
-	clean_debug_files(dev);
-	return err;
-}
-
-static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode)
-{
-	struct mlx5_cmd *cmd = &dev->cmd;
-	int i;
-
-	for (i = 0; i < cmd->max_reg_cmds; i++)
-		down(&cmd->sem);
-	down(&cmd->pages_sem);
-
-	cmd->mode = mode;
-
-	up(&cmd->pages_sem);
-	for (i = 0; i < cmd->max_reg_cmds; i++)
-		up(&cmd->sem);
-}
-
-void mlx5_cmd_use_events(struct mlx5_core_dev *dev)
-{
-	mlx5_cmd_change_mod(dev, CMD_MODE_EVENTS);
-}
-
-void mlx5_cmd_use_polling(struct mlx5_core_dev *dev)
-{
-	mlx5_cmd_change_mod(dev, CMD_MODE_POLLING);
-}
-
-static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
-{
-	unsigned long flags;
-
-	if (msg->parent) {
-		spin_lock_irqsave(&msg->parent->lock, flags);
-		list_add_tail(&msg->list, &msg->parent->head);
-		spin_unlock_irqrestore(&msg->parent->lock, flags);
-	} else {
-		mlx5_free_cmd_msg(dev, msg);
-	}
-}
-
-void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec)
-{
-	struct mlx5_cmd *cmd = &dev->cmd;
-	struct mlx5_cmd_work_ent *ent;
-	mlx5_cmd_cbk_t callback;
-	void *context;
-	int err;
-	int i;
-	s64 ds;
-	struct mlx5_cmd_stats *stats;
-	unsigned long flags;
-	unsigned long vector;
-
-	/* there can be at most 32 command queues */
-	vector = vec & 0xffffffff;
-	for (i = 0; i < (1 << cmd->log_sz); i++) {
-		if (test_bit(i, &vector)) {
-			struct semaphore *sem;
-
-			ent = cmd->ent_arr[i];
-			if (ent->callback)
-				cancel_delayed_work(&ent->cb_timeout_work);
-			if (ent->page_queue)
-				sem = &cmd->pages_sem;
-			else
-				sem = &cmd->sem;
-			ent->ts2 = ktime_get_ns();
-			memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out));
-			dump_command(dev, ent, 0);
-			if (!ent->ret) {
-				if (!cmd->checksum_disabled)
-					ent->ret = verify_signature(ent);
-				else
-					ent->ret = 0;
-				if (vec & MLX5_TRIGGERED_CMD_COMP)
-					ent->status = MLX5_DRIVER_STATUS_ABORTED;
-				else
-					ent->status = ent->lay->status_own >> 1;
-
-				mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n",
-					      ent->ret, deliv_status_to_str(ent->status), ent->status);
-			}
-			free_ent(cmd, ent->idx);
-
-			if (ent->callback) {
-				ds = ent->ts2 - ent->ts1;
-				if (ent->op < ARRAY_SIZE(cmd->stats)) {
-					stats = &cmd->stats[ent->op];
-					spin_lock_irqsave(&stats->lock, flags);
-					stats->sum += ds;
-					++stats->n;
-					spin_unlock_irqrestore(&stats->lock, flags);
-				}
-
-				callback = ent->callback;
-				context = ent->context;
-				err = ent->ret;
-				if (!err) {
-					err = mlx5_copy_from_msg(ent->uout,
-								 ent->out,
-								 ent->uout_size);
-
-					err = err ? err : mlx5_cmd_check(dev,
-									ent->in->first.data,
-									ent->uout);
-				}
-
-				mlx5_free_cmd_msg(dev, ent->out);
-				free_msg(dev, ent->in);
-
-				err = err ? err : ent->status;
-				free_cmd(ent);
-				callback(err, context);
-			} else {
-				complete(&ent->done);
-			}
-			up(sem);
-		}
-	}
-}
-EXPORT_SYMBOL(mlx5_cmd_comp_handler);
-
-static int status_to_err(u8 status)
-{
-	return status ? -1 : 0; /* TBD more meaningful codes */
-}
-
-static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
-				      gfp_t gfp)
-{
-	struct mlx5_cmd_msg *msg = ERR_PTR(-ENOMEM);
-	struct cmd_msg_cache *ch = NULL;
-	struct mlx5_cmd *cmd = &dev->cmd;
-	int i;
-
-	if (in_size <= 16)
-		goto cache_miss;
-
-	for (i = 0; i < MLX5_NUM_COMMAND_CACHES; i++) {
-		ch = &cmd->cache[i];
-		if (in_size > ch->max_inbox_size)
-			continue;
-		spin_lock_irq(&ch->lock);
-		if (list_empty(&ch->head)) {
-			spin_unlock_irq(&ch->lock);
-			continue;
-		}
-		msg = list_entry(ch->head.next, typeof(*msg), list);
-		/* For cached lists, we must explicitly state what is
-		 * the real size
-		 */
-		msg->len = in_size;
-		list_del(&msg->list);
-		spin_unlock_irq(&ch->lock);
-		break;
-	}
-
-	if (!IS_ERR(msg))
-		return msg;
-
-cache_miss:
-	msg = mlx5_alloc_cmd_msg(dev, gfp, in_size, 0);
-	return msg;
-}
-
-static int is_manage_pages(void *in)
-{
-	return MLX5_GET(mbox_in, in, opcode) == MLX5_CMD_OP_MANAGE_PAGES;
-}
-
-static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
-		    int out_size, mlx5_cmd_cbk_t callback, void *context)
-{
-	struct mlx5_cmd_msg *inb;
-	struct mlx5_cmd_msg *outb;
-	int pages_queue;
-	gfp_t gfp;
-	int err;
-	u8 status = 0;
-	u32 drv_synd;
-	u8 token;
-
-	if (pci_channel_offline(dev->pdev) ||
-	    dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
-		u16 opcode = MLX5_GET(mbox_in, in, opcode);
-
-		err = mlx5_internal_err_ret_value(dev, opcode, &drv_synd, &status);
-		MLX5_SET(mbox_out, out, status, status);
-		MLX5_SET(mbox_out, out, syndrome, drv_synd);
-		return err;
-	}
-
-	pages_queue = is_manage_pages(in);
-	gfp = callback ? GFP_ATOMIC : GFP_KERNEL;
-
-	inb = alloc_msg(dev, in_size, gfp);
-	if (IS_ERR(inb)) {
-		err = PTR_ERR(inb);
-		return err;
-	}
-
-	token = alloc_token(&dev->cmd);
-
-	err = mlx5_copy_to_msg(inb, in, in_size, token);
-	if (err) {
-		mlx5_core_warn(dev, "err %d\n", err);
-		goto out_in;
-	}
-
-	outb = mlx5_alloc_cmd_msg(dev, gfp, out_size, token);
-	if (IS_ERR(outb)) {
-		err = PTR_ERR(outb);
-		goto out_in;
-	}
-
-	err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context,
-			      pages_queue, &status, token);
-	if (err)
-		goto out_out;
-
-	mlx5_core_dbg(dev, "err %d, status %d\n", err, status);
-	if (status) {
-		err = status_to_err(status);
-		goto out_out;
-	}
-
-	if (!callback)
-		err = mlx5_copy_from_msg(out, outb, out_size);
-
-out_out:
-	if (!callback)
-		mlx5_free_cmd_msg(dev, outb);
-
-out_in:
-	if (!callback)
-		free_msg(dev, inb);
-	return err;
-}
-
-int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
-		  int out_size)
-{
-	int err;
-
-	err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL);
-	return err ? : mlx5_cmd_check(dev, in, out);
-}
-EXPORT_SYMBOL(mlx5_cmd_exec);
-
-int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
-		     void *out, int out_size, mlx5_cmd_cbk_t callback,
-		     void *context)
-{
-	return cmd_exec(dev, in, in_size, out, out_size, callback, context);
-}
-EXPORT_SYMBOL(mlx5_cmd_exec_cb);
-
-static void destroy_msg_cache(struct mlx5_core_dev *dev)
-{
-	struct cmd_msg_cache *ch;
-	struct mlx5_cmd_msg *msg;
-	struct mlx5_cmd_msg *n;
-	int i;
-
-	for (i = 0; i < MLX5_NUM_COMMAND_CACHES; i++) {
-		ch = &dev->cmd.cache[i];
-		list_for_each_entry_safe(msg, n, &ch->head, list) {
-			list_del(&msg->list);
-			mlx5_free_cmd_msg(dev, msg);
-		}
-	}
-}
-
-static unsigned cmd_cache_num_ent[MLX5_NUM_COMMAND_CACHES] = {
-	512, 32, 16, 8, 2
-};
-
-static unsigned cmd_cache_ent_size[MLX5_NUM_COMMAND_CACHES] = {
-	16 + MLX5_CMD_DATA_BLOCK_SIZE,
-	16 + MLX5_CMD_DATA_BLOCK_SIZE * 2,
-	16 + MLX5_CMD_DATA_BLOCK_SIZE * 16,
-	16 + MLX5_CMD_DATA_BLOCK_SIZE * 256,
-	16 + MLX5_CMD_DATA_BLOCK_SIZE * 512,
-};
-
-static void create_msg_cache(struct mlx5_core_dev *dev)
-{
-	struct mlx5_cmd *cmd = &dev->cmd;
-	struct cmd_msg_cache *ch;
-	struct mlx5_cmd_msg *msg;
-	int i;
-	int k;
-
-	/* Initialize and fill the caches with initial entries */
-	for (k = 0; k < MLX5_NUM_COMMAND_CACHES; k++) {
-		ch = &cmd->cache[k];
-		spin_lock_init(&ch->lock);
-		INIT_LIST_HEAD(&ch->head);
-		ch->num_ent = cmd_cache_num_ent[k];
-		ch->max_inbox_size = cmd_cache_ent_size[k];
-		for (i = 0; i < ch->num_ent; i++) {
-			msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL | __GFP_NOWARN,
-						 ch->max_inbox_size, 0);
-			if (IS_ERR(msg))
-				break;
-			msg->parent = ch;
-			list_add_tail(&msg->list, &ch->head);
-		}
-	}
-}
-
-static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
-{
-	struct device *ddev = &dev->pdev->dev;
-
-	cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE,
-						 &cmd->alloc_dma, GFP_KERNEL);
-	if (!cmd->cmd_alloc_buf)
-		return -ENOMEM;
-
-	/* make sure it is aligned to 4K */
-	if (!((uintptr_t)cmd->cmd_alloc_buf & (MLX5_ADAPTER_PAGE_SIZE - 1))) {
-		cmd->cmd_buf = cmd->cmd_alloc_buf;
-		cmd->dma = cmd->alloc_dma;
-		cmd->alloc_size = MLX5_ADAPTER_PAGE_SIZE;
-		return 0;
-	}
-
-	dma_free_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, cmd->cmd_alloc_buf,
-			  cmd->alloc_dma);
-	cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev,
-						 2 * MLX5_ADAPTER_PAGE_SIZE - 1,
-						 &cmd->alloc_dma, GFP_KERNEL);
-	if (!cmd->cmd_alloc_buf)
-		return -ENOMEM;
-
-	cmd->cmd_buf = PTR_ALIGN(cmd->cmd_alloc_buf, MLX5_ADAPTER_PAGE_SIZE);
-	cmd->dma = ALIGN(cmd->alloc_dma, MLX5_ADAPTER_PAGE_SIZE);
-	cmd->alloc_size = 2 * MLX5_ADAPTER_PAGE_SIZE - 1;
-	return 0;
-}
-
-static void free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
-{
-	struct device *ddev = &dev->pdev->dev;
-
-	dma_free_coherent(ddev, cmd->alloc_size, cmd->cmd_alloc_buf,
-			  cmd->alloc_dma);
-}
-
-int mlx5_cmd_init(struct mlx5_core_dev *dev)
-{
-	int size = sizeof(struct mlx5_cmd_prot_block);
-	int align = roundup_pow_of_two(size);
-	struct mlx5_cmd *cmd = &dev->cmd;
-	u32 cmd_h, cmd_l;
-	u16 cmd_if_rev;
-	int err;
-	int i;
-
-	memset(cmd, 0, sizeof(*cmd));
-	cmd_if_rev = cmdif_rev(dev);
-	if (cmd_if_rev != CMD_IF_REV) {
-		dev_err(&dev->pdev->dev,
-			"Driver cmdif rev(%d) differs from firmware's(%d)\n",
-			CMD_IF_REV, cmd_if_rev);
-		return -EINVAL;
-	}
-
-	cmd->pool = pci_pool_create("mlx5_cmd", dev->pdev, size, align, 0);
-	if (!cmd->pool)
-		return -ENOMEM;
-
-	err = alloc_cmd_page(dev, cmd);
-	if (err)
-		goto err_free_pool;
-
-	cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff;
-	cmd->log_sz = cmd_l >> 4 & 0xf;
-	cmd->log_stride = cmd_l & 0xf;
-	if (1 << cmd->log_sz > MLX5_MAX_COMMANDS) {
-		dev_err(&dev->pdev->dev, "firmware reports too many outstanding commands %d\n",
-			1 << cmd->log_sz);
-		err = -EINVAL;
-		goto err_free_page;
-	}
-
-	if (cmd->log_sz + cmd->log_stride > MLX5_ADAPTER_PAGE_SHIFT) {
-		dev_err(&dev->pdev->dev, "command queue size overflow\n");
-		err = -EINVAL;
-		goto err_free_page;
-	}
-
-	cmd->checksum_disabled = 1;
-	cmd->max_reg_cmds = (1 << cmd->log_sz) - 1;
-	cmd->bitmask = (1 << cmd->max_reg_cmds) - 1;
-
-	cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
-	if (cmd->cmdif_rev > CMD_IF_REV) {
-		dev_err(&dev->pdev->dev, "driver does not support command interface version. driver %d, firmware %d\n",
-			CMD_IF_REV, cmd->cmdif_rev);
-		err = -ENOTSUPP;
-		goto err_free_page;
-	}
-
-	spin_lock_init(&cmd->alloc_lock);
-	spin_lock_init(&cmd->token_lock);
-	for (i = 0; i < ARRAY_SIZE(cmd->stats); i++)
-		spin_lock_init(&cmd->stats[i].lock);
-
-	sema_init(&cmd->sem, cmd->max_reg_cmds);
-	sema_init(&cmd->pages_sem, 1);
-
-	cmd_h = (u32)((u64)(cmd->dma) >> 32);
-	cmd_l = (u32)(cmd->dma);
-	if (cmd_l & 0xfff) {
-		dev_err(&dev->pdev->dev, "invalid command queue address\n");
-		err = -ENOMEM;
-		goto err_free_page;
-	}
-
-	iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h);
-	iowrite32be(cmd_l, &dev->iseg->cmdq_addr_l_sz);
-
-	/* Make sure firmware sees the complete address before we proceed */
-	wmb();
-
-	mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma));
-
-	cmd->mode = CMD_MODE_POLLING;
-
-	create_msg_cache(dev);
-
-	set_wqname(dev);
-	cmd->wq = create_singlethread_workqueue(cmd->wq_name);
-	if (!cmd->wq) {
-		dev_err(&dev->pdev->dev, "failed to create command workqueue\n");
-		err = -ENOMEM;
-		goto err_cache;
-	}
-
-	err = create_debugfs_files(dev);
-	if (err) {
-		err = -ENOMEM;
-		goto err_wq;
-	}
-
-	return 0;
-
-err_wq:
-	destroy_workqueue(cmd->wq);
-
-err_cache:
-	destroy_msg_cache(dev);
-
-err_free_page:
-	free_cmd_page(dev, cmd);
-
-err_free_pool:
-	pci_pool_destroy(cmd->pool);
-
-	return err;
-}
-EXPORT_SYMBOL(mlx5_cmd_init);
-
-void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
-{
-	struct mlx5_cmd *cmd = &dev->cmd;
-
-	clean_debug_files(dev);
-	destroy_workqueue(cmd->wq);
-	destroy_msg_cache(dev);
-	free_cmd_page(dev, cmd);
-	pci_pool_destroy(cmd->pool);
-}
-EXPORT_SYMBOL(mlx5_cmd_cleanup);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
deleted file mode 100644
index 336d473..0000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c
+++ /dev/null
@@ -1,290 +0,0 @@
-/*
- * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/hardirq.h>
-#include <linux/mlx5/driver.h>
-#include <linux/mlx5/cmd.h>
-#include <rdma/ib_verbs.h>
-#include <linux/mlx5/cq.h>
-#include "mlx5_core.h"
-
-#define TASKLET_MAX_TIME 2
-#define TASKLET_MAX_TIME_JIFFIES msecs_to_jiffies(TASKLET_MAX_TIME)
-
-void mlx5_cq_tasklet_cb(unsigned long data)
-{
-	unsigned long flags;
-	unsigned long end = jiffies + TASKLET_MAX_TIME_JIFFIES;
-	struct mlx5_eq_tasklet *ctx = (struct mlx5_eq_tasklet *)data;
-	struct mlx5_core_cq *mcq;
-	struct mlx5_core_cq *temp;
-
-	spin_lock_irqsave(&ctx->lock, flags);
-	list_splice_tail_init(&ctx->list, &ctx->process_list);
-	spin_unlock_irqrestore(&ctx->lock, flags);
-
-	list_for_each_entry_safe(mcq, temp, &ctx->process_list,
-				 tasklet_ctx.list) {
-		list_del_init(&mcq->tasklet_ctx.list);
-		mcq->tasklet_ctx.comp(mcq);
-		if (atomic_dec_and_test(&mcq->refcount))
-			complete(&mcq->free);
-		if (time_after(jiffies, end))
-			break;
-	}
-
-	if (!list_empty(&ctx->process_list))
-		tasklet_schedule(&ctx->task);
-}
-
-static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq)
-{
-	unsigned long flags;
-	struct mlx5_eq_tasklet *tasklet_ctx = cq->tasklet_ctx.priv;
-
-	spin_lock_irqsave(&tasklet_ctx->lock, flags);
-	/* When migrating CQs between EQs will be implemented, please note
-	 * that you need to sync this point. It is possible that
-	 * while migrating a CQ, completions on the old EQs could
-	 * still arrive.
-	 */
-	if (list_empty_careful(&cq->tasklet_ctx.list)) {
-		atomic_inc(&cq->refcount);
-		list_add_tail(&cq->tasklet_ctx.list, &tasklet_ctx->list);
-	}
-	spin_unlock_irqrestore(&tasklet_ctx->lock, flags);
-}
-
-void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn)
-{
-	struct mlx5_core_cq *cq;
-	struct mlx5_cq_table *table = &dev->priv.cq_table;
-
-	spin_lock(&table->lock);
-	cq = radix_tree_lookup(&table->tree, cqn);
-	if (likely(cq))
-		atomic_inc(&cq->refcount);
-	spin_unlock(&table->lock);
-
-	if (!cq) {
-		mlx5_core_warn(dev, "Completion event for bogus CQ 0x%x\n", cqn);
-		return;
-	}
-
-	++cq->arm_sn;
-
-	cq->comp(cq);
-
-	if (atomic_dec_and_test(&cq->refcount))
-		complete(&cq->free);
-}
-
-void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type)
-{
-	struct mlx5_cq_table *table = &dev->priv.cq_table;
-	struct mlx5_core_cq *cq;
-
-	spin_lock(&table->lock);
-
-	cq = radix_tree_lookup(&table->tree, cqn);
-	if (cq)
-		atomic_inc(&cq->refcount);
-
-	spin_unlock(&table->lock);
-
-	if (!cq) {
-		mlx5_core_warn(dev, "Async event for bogus CQ 0x%x\n", cqn);
-		return;
-	}
-
-	cq->event(cq, event_type);
-
-	if (atomic_dec_and_test(&cq->refcount))
-		complete(&cq->free);
-}
-
-int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
-			u32 *in, int inlen)
-{
-	struct mlx5_cq_table *table = &dev->priv.cq_table;
-	u32 out[MLX5_ST_SZ_DW(create_cq_out)];
-	u32 din[MLX5_ST_SZ_DW(destroy_cq_in)];
-	u32 dout[MLX5_ST_SZ_DW(destroy_cq_out)];
-	int eqn = MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context),
-			   c_eqn);
-	struct mlx5_eq *eq;
-	int err;
-
-	eq = mlx5_eqn2eq(dev, eqn);
-	if (IS_ERR(eq))
-		return PTR_ERR(eq);
-
-	memset(out, 0, sizeof(out));
-	MLX5_SET(create_cq_in, in, opcode, MLX5_CMD_OP_CREATE_CQ);
-	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
-	if (err)
-		return err;
-
-	cq->cqn = MLX5_GET(create_cq_out, out, cqn);
-	cq->cons_index = 0;
-	cq->arm_sn     = 0;
-	atomic_set(&cq->refcount, 1);
-	init_completion(&cq->free);
-	if (!cq->comp)
-		cq->comp = mlx5_add_cq_to_tasklet;
-	/* assuming CQ will be deleted before the EQ */
-	cq->tasklet_ctx.priv = &eq->tasklet_ctx;
-	INIT_LIST_HEAD(&cq->tasklet_ctx.list);
-
-	spin_lock_irq(&table->lock);
-	err = radix_tree_insert(&table->tree, cq->cqn, cq);
-	spin_unlock_irq(&table->lock);
-	if (err)
-		goto err_cmd;
-
-	cq->pid = current->pid;
-	err = mlx5_debug_cq_add(dev, cq);
-	if (err)
-		mlx5_core_dbg(dev, "failed adding CP 0x%x to debug file system\n",
-			      cq->cqn);
-
-	cq->uar = dev->priv.uar;
-
-	return 0;
-
-err_cmd:
-	memset(din, 0, sizeof(din));
-	memset(dout, 0, sizeof(dout));
-	MLX5_SET(destroy_cq_in, din, opcode, MLX5_CMD_OP_DESTROY_CQ);
-	MLX5_SET(destroy_cq_in, din, cqn, cq->cqn);
-	mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout));
-	return err;
-}
-EXPORT_SYMBOL(mlx5_core_create_cq);
-
-int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
-{
-	struct mlx5_cq_table *table = &dev->priv.cq_table;
-	u32 out[MLX5_ST_SZ_DW(destroy_cq_out)] = {0};
-	u32 in[MLX5_ST_SZ_DW(destroy_cq_in)] = {0};
-	struct mlx5_core_cq *tmp;
-	int err;
-
-	spin_lock_irq(&table->lock);
-	tmp = radix_tree_delete(&table->tree, cq->cqn);
-	spin_unlock_irq(&table->lock);
-	if (!tmp) {
-		mlx5_core_warn(dev, "cq 0x%x not found in tree\n", cq->cqn);
-		return -EINVAL;
-	}
-	if (tmp != cq) {
-		mlx5_core_warn(dev, "corruption on srqn 0x%x\n", cq->cqn);
-		return -EINVAL;
-	}
-
-	MLX5_SET(destroy_cq_in, in, opcode, MLX5_CMD_OP_DESTROY_CQ);
-	MLX5_SET(destroy_cq_in, in, cqn, cq->cqn);
-	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-	if (err)
-		return err;
-
-	synchronize_irq(cq->irqn);
-
-	mlx5_debug_cq_remove(dev, cq);
-	if (atomic_dec_and_test(&cq->refcount))
-		complete(&cq->free);
-	wait_for_completion(&cq->free);
-
-	return 0;
-}
-EXPORT_SYMBOL(mlx5_core_destroy_cq);
-
-int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
-		       u32 *out, int outlen)
-{
-	u32 in[MLX5_ST_SZ_DW(query_cq_in)] = {0};
-
-	MLX5_SET(query_cq_in, in, opcode, MLX5_CMD_OP_QUERY_CQ);
-	MLX5_SET(query_cq_in, in, cqn, cq->cqn);
-	return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
-}
-EXPORT_SYMBOL(mlx5_core_query_cq);
-
-int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
-			u32 *in, int inlen)
-{
-	u32 out[MLX5_ST_SZ_DW(modify_cq_out)] = {0};
-
-	MLX5_SET(modify_cq_in, in, opcode, MLX5_CMD_OP_MODIFY_CQ);
-	return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
-}
-EXPORT_SYMBOL(mlx5_core_modify_cq);
-
-int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
-				   struct mlx5_core_cq *cq,
-				   u16 cq_period,
-				   u16 cq_max_count)
-{
-	u32 in[MLX5_ST_SZ_DW(modify_cq_in)] = {0};
-	void *cqc;
-
-	MLX5_SET(modify_cq_in, in, cqn, cq->cqn);
-	cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context);
-	MLX5_SET(cqc, cqc, cq_period, cq_period);
-	MLX5_SET(cqc, cqc, cq_max_count, cq_max_count);
-	MLX5_SET(modify_cq_in, in,
-		 modify_field_select_resize_field_select.modify_field_select.modify_field_select,
-		 MLX5_CQ_MODIFY_PERIOD | MLX5_CQ_MODIFY_COUNT);
-
-	return mlx5_core_modify_cq(dev, cq, in, sizeof(in));
-}
-EXPORT_SYMBOL(mlx5_core_modify_cq_moderation);
-
-int mlx5_init_cq_table(struct mlx5_core_dev *dev)
-{
-	struct mlx5_cq_table *table = &dev->priv.cq_table;
-	int err;
-
-	memset(table, 0, sizeof(*table));
-	spin_lock_init(&table->lock);
-	INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
-	err = mlx5_cq_debugfs_init(dev);
-
-	return err;
-}
-
-void mlx5_cleanup_cq_table(struct mlx5_core_dev *dev)
-{
-	mlx5_cq_debugfs_cleanup(dev);
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
deleted file mode 100644
index e94a953..0000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
+++ /dev/null
@@ -1,614 +0,0 @@
-/*
- * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/module.h>
-#include <linux/debugfs.h>
-#include <linux/mlx5/qp.h>
-#include <linux/mlx5/cq.h>
-#include <linux/mlx5/driver.h>
-#include "mlx5_core.h"
-
-enum {
-	QP_PID,
-	QP_STATE,
-	QP_XPORT,
-	QP_MTU,
-	QP_N_RECV,
-	QP_RECV_SZ,
-	QP_N_SEND,
-	QP_LOG_PG_SZ,
-	QP_RQPN,
-};
-
-static char *qp_fields[] = {
-	[QP_PID]	= "pid",
-	[QP_STATE]	= "state",
-	[QP_XPORT]	= "transport",
-	[QP_MTU]	= "mtu",
-	[QP_N_RECV]	= "num_recv",
-	[QP_RECV_SZ]	= "rcv_wqe_sz",
-	[QP_N_SEND]	= "num_send",
-	[QP_LOG_PG_SZ]	= "log2_page_sz",
-	[QP_RQPN]	= "remote_qpn",
-};
-
-enum {
-	EQ_NUM_EQES,
-	EQ_INTR,
-	EQ_LOG_PG_SZ,
-};
-
-static char *eq_fields[] = {
-	[EQ_NUM_EQES]	= "num_eqes",
-	[EQ_INTR]	= "intr",
-	[EQ_LOG_PG_SZ]	= "log_page_size",
-};
-
-enum {
-	CQ_PID,
-	CQ_NUM_CQES,
-	CQ_LOG_PG_SZ,
-};
-
-static char *cq_fields[] = {
-	[CQ_PID]	= "pid",
-	[CQ_NUM_CQES]	= "num_cqes",
-	[CQ_LOG_PG_SZ]	= "log_page_size",
-};
-
-struct dentry *mlx5_debugfs_root;
-EXPORT_SYMBOL(mlx5_debugfs_root);
-
-void mlx5_register_debugfs(void)
-{
-	mlx5_debugfs_root = debugfs_create_dir("mlx5", NULL);
-	if (IS_ERR_OR_NULL(mlx5_debugfs_root))
-		mlx5_debugfs_root = NULL;
-}
-
-void mlx5_unregister_debugfs(void)
-{
-	debugfs_remove(mlx5_debugfs_root);
-}
-
-int mlx5_qp_debugfs_init(struct mlx5_core_dev *dev)
-{
-	if (!mlx5_debugfs_root)
-		return 0;
-
-	atomic_set(&dev->num_qps, 0);
-
-	dev->priv.qp_debugfs = debugfs_create_dir("QPs",  dev->priv.dbg_root);
-	if (!dev->priv.qp_debugfs)
-		return -ENOMEM;
-
-	return 0;
-}
-
-void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev)
-{
-	if (!mlx5_debugfs_root)
-		return;
-
-	debugfs_remove_recursive(dev->priv.qp_debugfs);
-}
-
-int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev)
-{
-	if (!mlx5_debugfs_root)
-		return 0;
-
-	dev->priv.eq_debugfs = debugfs_create_dir("EQs",  dev->priv.dbg_root);
-	if (!dev->priv.eq_debugfs)
-		return -ENOMEM;
-
-	return 0;
-}
-
-void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev)
-{
-	if (!mlx5_debugfs_root)
-		return;
-
-	debugfs_remove_recursive(dev->priv.eq_debugfs);
-}
-
-static ssize_t average_read(struct file *filp, char __user *buf, size_t count,
-			    loff_t *pos)
-{
-	struct mlx5_cmd_stats *stats;
-	u64 field = 0;
-	int ret;
-	char tbuf[22];
-
-	if (*pos)
-		return 0;
-
-	stats = filp->private_data;
-	spin_lock_irq(&stats->lock);
-	if (stats->n)
-		field = div64_u64(stats->sum, stats->n);
-	spin_unlock_irq(&stats->lock);
-	ret = snprintf(tbuf, sizeof(tbuf), "%llu\n", field);
-	if (ret > 0) {
-		if (copy_to_user(buf, tbuf, ret))
-			return -EFAULT;
-	}
-
-	*pos += ret;
-	return ret;
-}
-
-
-static ssize_t average_write(struct file *filp, const char __user *buf,
-			     size_t count, loff_t *pos)
-{
-	struct mlx5_cmd_stats *stats;
-
-	stats = filp->private_data;
-	spin_lock_irq(&stats->lock);
-	stats->sum = 0;
-	stats->n = 0;
-	spin_unlock_irq(&stats->lock);
-
-	*pos += count;
-
-	return count;
-}
-
-static const struct file_operations stats_fops = {
-	.owner	= THIS_MODULE,
-	.open	= simple_open,
-	.read	= average_read,
-	.write	= average_write,
-};
-
-int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev)
-{
-	struct mlx5_cmd_stats *stats;
-	struct dentry **cmd;
-	const char *namep;
-	int err;
-	int i;
-
-	if (!mlx5_debugfs_root)
-		return 0;
-
-	cmd = &dev->priv.cmdif_debugfs;
-	*cmd = debugfs_create_dir("commands", dev->priv.dbg_root);
-	if (!*cmd)
-		return -ENOMEM;
-
-	for (i = 0; i < ARRAY_SIZE(dev->cmd.stats); i++) {
-		stats = &dev->cmd.stats[i];
-		namep = mlx5_command_str(i);
-		if (strcmp(namep, "unknown command opcode")) {
-			stats->root = debugfs_create_dir(namep, *cmd);
-			if (!stats->root) {
-				mlx5_core_warn(dev, "failed adding command %d\n",
-					       i);
-				err = -ENOMEM;
-				goto out;
-			}
-
-			stats->avg = debugfs_create_file("average", 0400,
-							 stats->root, stats,
-							 &stats_fops);
-			if (!stats->avg) {
-				mlx5_core_warn(dev, "failed creating debugfs file\n");
-				err = -ENOMEM;
-				goto out;
-			}
-
-			stats->count = debugfs_create_u64("n", 0400,
-							  stats->root,
-							  &stats->n);
-			if (!stats->count) {
-				mlx5_core_warn(dev, "failed creating debugfs file\n");
-				err = -ENOMEM;
-				goto out;
-			}
-		}
-	}
-
-	return 0;
-out:
-	debugfs_remove_recursive(dev->priv.cmdif_debugfs);
-	return err;
-}
-
-void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev)
-{
-	if (!mlx5_debugfs_root)
-		return;
-
-	debugfs_remove_recursive(dev->priv.cmdif_debugfs);
-}
-
-int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev)
-{
-	if (!mlx5_debugfs_root)
-		return 0;
-
-	dev->priv.cq_debugfs = debugfs_create_dir("CQs",  dev->priv.dbg_root);
-	if (!dev->priv.cq_debugfs)
-		return -ENOMEM;
-
-	return 0;
-}
-
-void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev)
-{
-	if (!mlx5_debugfs_root)
-		return;
-
-	debugfs_remove_recursive(dev->priv.cq_debugfs);
-}
-
-static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
-			 int index, int *is_str)
-{
-	int outlen = MLX5_ST_SZ_BYTES(query_qp_out);
-	struct mlx5_qp_context *ctx;
-	u64 param = 0;
-	u32 *out;
-	int err;
-	int no_sq;
-
-	out = kzalloc(outlen, GFP_KERNEL);
-	if (!out)
-		return param;
-
-	err = mlx5_core_qp_query(dev, qp, out, outlen);
-	if (err) {
-		mlx5_core_warn(dev, "failed to query qp err=%d\n", err);
-		goto out;
-	}
-
-	*is_str = 0;
-
-	/* FIXME: use MLX5_GET rather than mlx5_qp_context manual struct */
-	ctx = (struct mlx5_qp_context *)MLX5_ADDR_OF(query_qp_out, out, qpc);
-
-	switch (index) {
-	case QP_PID:
-		param = qp->pid;
-		break;
-	case QP_STATE:
-		param = (unsigned long)mlx5_qp_state_str(be32_to_cpu(ctx->flags) >> 28);
-		*is_str = 1;
-		break;
-	case QP_XPORT:
-		param = (unsigned long)mlx5_qp_type_str((be32_to_cpu(ctx->flags) >> 16) & 0xff);
-		*is_str = 1;
-		break;
-	case QP_MTU:
-		switch (ctx->mtu_msgmax >> 5) {
-		case IB_MTU_256:
-			param = 256;
-			break;
-		case IB_MTU_512:
-			param = 512;
-			break;
-		case IB_MTU_1024:
-			param = 1024;
-			break;
-		case IB_MTU_2048:
-			param = 2048;
-			break;
-		case IB_MTU_4096:
-			param = 4096;
-			break;
-		default:
-			param = 0;
-		}
-		break;
-	case QP_N_RECV:
-		param = 1 << ((ctx->rq_size_stride >> 3) & 0xf);
-		break;
-	case QP_RECV_SZ:
-		param = 1 << ((ctx->rq_size_stride & 7) + 4);
-		break;
-	case QP_N_SEND:
-		no_sq = be16_to_cpu(ctx->sq_crq_size) >> 15;
-		if (!no_sq)
-			param = 1 << (be16_to_cpu(ctx->sq_crq_size) >> 11);
-		else
-			param = 0;
-		break;
-	case QP_LOG_PG_SZ:
-		param = (be32_to_cpu(ctx->log_pg_sz_remote_qpn) >> 24) & 0x1f;
-		param += 12;
-		break;
-	case QP_RQPN:
-		param = be32_to_cpu(ctx->log_pg_sz_remote_qpn) & 0xffffff;
-		break;
-	}
-
-out:
-	kfree(out);
-	return param;
-}
-
-static u64 eq_read_field(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
-			 int index)
-{
-	int outlen = MLX5_ST_SZ_BYTES(query_eq_out);
-	u64 param = 0;
-	void *ctx;
-	u32 *out;
-	int err;
-
-	out = kzalloc(outlen, GFP_KERNEL);
-	if (!out)
-		return param;
-
-	err = mlx5_core_eq_query(dev, eq, out, outlen);
-	if (err) {
-		mlx5_core_warn(dev, "failed to query eq\n");
-		goto out;
-	}
-	ctx = MLX5_ADDR_OF(query_eq_out, out, eq_context_entry);
-
-	switch (index) {
-	case EQ_NUM_EQES:
-		param = 1 << MLX5_GET(eqc, ctx, log_eq_size);
-		break;
-	case EQ_INTR:
-		param = MLX5_GET(eqc, ctx, intr);
-		break;
-	case EQ_LOG_PG_SZ:
-		param = MLX5_GET(eqc, ctx, log_page_size) + 12;
-		break;
-	}
-
-out:
-	kfree(out);
-	return param;
-}
-
-static u64 cq_read_field(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
-			 int index)
-{
-	int outlen = MLX5_ST_SZ_BYTES(query_cq_out);
-	u64 param = 0;
-	void *ctx;
-	u32 *out;
-	int err;
-
-	out = mlx5_vzalloc(outlen);
-	if (!out)
-		return param;
-
-	err = mlx5_core_query_cq(dev, cq, out, outlen);
-	if (err) {
-		mlx5_core_warn(dev, "failed to query cq\n");
-		goto out;
-	}
-	ctx = MLX5_ADDR_OF(query_cq_out, out, cq_context);
-
-	switch (index) {
-	case CQ_PID:
-		param = cq->pid;
-		break;
-	case CQ_NUM_CQES:
-		param = 1 << MLX5_GET(cqc, ctx, log_cq_size);
-		break;
-	case CQ_LOG_PG_SZ:
-		param = MLX5_GET(cqc, ctx, log_page_size);
-		break;
-	}
-
-out:
-	kvfree(out);
-	return param;
-}
-
-static ssize_t dbg_read(struct file *filp, char __user *buf, size_t count,
-			loff_t *pos)
-{
-	struct mlx5_field_desc *desc;
-	struct mlx5_rsc_debug *d;
-	char tbuf[18];
-	int is_str = 0;
-	u64 field;
-	int ret;
-
-	if (*pos)
-		return 0;
-
-	desc = filp->private_data;
-	d = (void *)(desc - desc->i) - sizeof(*d);
-	switch (d->type) {
-	case MLX5_DBG_RSC_QP:
-		field = qp_read_field(d->dev, d->object, desc->i, &is_str);
-		break;
-
-	case MLX5_DBG_RSC_EQ:
-		field = eq_read_field(d->dev, d->object, desc->i);
-		break;
-
-	case MLX5_DBG_RSC_CQ:
-		field = cq_read_field(d->dev, d->object, desc->i);
-		break;
-
-	default:
-		mlx5_core_warn(d->dev, "invalid resource type %d\n", d->type);
-		return -EINVAL;
-	}
-
-
-	if (is_str)
-		ret = snprintf(tbuf, sizeof(tbuf), "%s\n", (const char *)(unsigned long)field);
-	else
-		ret = snprintf(tbuf, sizeof(tbuf), "0x%llx\n", field);
-
-	if (ret > 0) {
-		if (copy_to_user(buf, tbuf, ret))
-			return -EFAULT;
-	}
-
-	*pos += ret;
-	return ret;
-}
-
-static const struct file_operations fops = {
-	.owner	= THIS_MODULE,
-	.open	= simple_open,
-	.read	= dbg_read,
-};
-
-static int add_res_tree(struct mlx5_core_dev *dev, enum dbg_rsc_type type,
-			struct dentry *root, struct mlx5_rsc_debug **dbg,
-			int rsn, char **field, int nfile, void *data)
-{
-	struct mlx5_rsc_debug *d;
-	char resn[32];
-	int err;
-	int i;
-
-	d = kzalloc(sizeof(*d) + nfile * sizeof(d->fields[0]), GFP_KERNEL);
-	if (!d)
-		return -ENOMEM;
-
-	d->dev = dev;
-	d->object = data;
-	d->type = type;
-	sprintf(resn, "0x%x", rsn);
-	d->root = debugfs_create_dir(resn,  root);
-	if (!d->root) {
-		err = -ENOMEM;
-		goto out_free;
-	}
-
-	for (i = 0; i < nfile; i++) {
-		d->fields[i].i = i;
-		d->fields[i].dent = debugfs_create_file(field[i], 0400,
-							d->root, &d->fields[i],
-							&fops);
-		if (!d->fields[i].dent) {
-			err = -ENOMEM;
-			goto out_rem;
-		}
-	}
-	*dbg = d;
-
-	return 0;
-out_rem:
-	debugfs_remove_recursive(d->root);
-
-out_free:
-	kfree(d);
-	return err;
-}
-
-static void rem_res_tree(struct mlx5_rsc_debug *d)
-{
-	debugfs_remove_recursive(d->root);
-	kfree(d);
-}
-
-int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp)
-{
-	int err;
-
-	if (!mlx5_debugfs_root)
-		return 0;
-
-	err = add_res_tree(dev, MLX5_DBG_RSC_QP, dev->priv.qp_debugfs,
-			   &qp->dbg, qp->qpn, qp_fields,
-			   ARRAY_SIZE(qp_fields), qp);
-	if (err)
-		qp->dbg = NULL;
-
-	return err;
-}
-
-void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp)
-{
-	if (!mlx5_debugfs_root)
-		return;
-
-	if (qp->dbg)
-		rem_res_tree(qp->dbg);
-}
-
-
-int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
-{
-	int err;
-
-	if (!mlx5_debugfs_root)
-		return 0;
-
-	err = add_res_tree(dev, MLX5_DBG_RSC_EQ, dev->priv.eq_debugfs,
-			   &eq->dbg, eq->eqn, eq_fields,
-			   ARRAY_SIZE(eq_fields), eq);
-	if (err)
-		eq->dbg = NULL;
-
-	return err;
-}
-
-void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
-{
-	if (!mlx5_debugfs_root)
-		return;
-
-	if (eq->dbg)
-		rem_res_tree(eq->dbg);
-}
-
-int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
-{
-	int err;
-
-	if (!mlx5_debugfs_root)
-		return 0;
-
-	err = add_res_tree(dev, MLX5_DBG_RSC_CQ, dev->priv.cq_debugfs,
-			   &cq->dbg, cq->cqn, cq_fields,
-			   ARRAY_SIZE(cq_fields), cq);
-	if (err)
-		cq->dbg = NULL;
-
-	return err;
-}
-
-void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
-{
-	if (!mlx5_debugfs_root)
-		return;
-
-	if (cq->dbg)
-		rem_res_tree(cq->dbg);
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
deleted file mode 100644
index a62f4b6..0000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ /dev/null
@@ -1,378 +0,0 @@
-/*
- * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/mlx5/driver.h>
-#include "mlx5_core.h"
-
-static LIST_HEAD(intf_list);
-static LIST_HEAD(mlx5_dev_list);
-/* intf dev list mutex */
-static DEFINE_MUTEX(mlx5_intf_mutex);
-
-struct mlx5_device_context {
-	struct list_head	list;
-	struct mlx5_interface  *intf;
-	void		       *context;
-	unsigned long		state;
-};
-
-enum {
-	MLX5_INTERFACE_ADDED,
-	MLX5_INTERFACE_ATTACHED,
-};
-
-void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
-{
-	struct mlx5_device_context *dev_ctx;
-	struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
-
-	if (!mlx5_lag_intf_add(intf, priv))
-		return;
-
-	dev_ctx = kzalloc(sizeof(*dev_ctx), GFP_KERNEL);
-	if (!dev_ctx)
-		return;
-
-	dev_ctx->intf = intf;
-	dev_ctx->context = intf->add(dev);
-	set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
-	if (intf->attach)
-		set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
-
-	if (dev_ctx->context) {
-		spin_lock_irq(&priv->ctx_lock);
-		list_add_tail(&dev_ctx->list, &priv->ctx_list);
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
-		if (dev_ctx->intf->pfault) {
-			if (priv->pfault) {
-				mlx5_core_err(dev, "multiple page fault handlers not supported");
-			} else {
-				priv->pfault_ctx = dev_ctx->context;
-				priv->pfault = dev_ctx->intf->pfault;
-			}
-		}
-#endif
-		spin_unlock_irq(&priv->ctx_lock);
-	} else {
-		kfree(dev_ctx);
-	}
-}
-
-static struct mlx5_device_context *mlx5_get_device(struct mlx5_interface *intf,
-						   struct mlx5_priv *priv)
-{
-	struct mlx5_device_context *dev_ctx;
-
-	list_for_each_entry(dev_ctx, &priv->ctx_list, list)
-		if (dev_ctx->intf == intf)
-			return dev_ctx;
-	return NULL;
-}
-
-void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
-{
-	struct mlx5_device_context *dev_ctx;
-	struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
-
-	dev_ctx = mlx5_get_device(intf, priv);
-	if (!dev_ctx)
-		return;
-
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
-	spin_lock_irq(&priv->ctx_lock);
-	if (priv->pfault == dev_ctx->intf->pfault)
-		priv->pfault = NULL;
-	spin_unlock_irq(&priv->ctx_lock);
-
-	synchronize_srcu(&priv->pfault_srcu);
-#endif
-
-	spin_lock_irq(&priv->ctx_lock);
-	list_del(&dev_ctx->list);
-	spin_unlock_irq(&priv->ctx_lock);
-
-	if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
-		intf->remove(dev, dev_ctx->context);
-
-	kfree(dev_ctx);
-}
-
-static void mlx5_attach_interface(struct mlx5_interface *intf, struct mlx5_priv *priv)
-{
-	struct mlx5_device_context *dev_ctx;
-	struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
-
-	dev_ctx = mlx5_get_device(intf, priv);
-	if (!dev_ctx)
-		return;
-
-	if (intf->attach) {
-		if (test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state))
-			return;
-		intf->attach(dev, dev_ctx->context);
-		set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
-	} else {
-		if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
-			return;
-		dev_ctx->context = intf->add(dev);
-		set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
-	}
-}
-
-void mlx5_attach_device(struct mlx5_core_dev *dev)
-{
-	struct mlx5_priv *priv = &dev->priv;
-	struct mlx5_interface *intf;
-
-	mutex_lock(&mlx5_intf_mutex);
-	list_for_each_entry(intf, &intf_list, list)
-		mlx5_attach_interface(intf, priv);
-	mutex_unlock(&mlx5_intf_mutex);
-}
-
-static void mlx5_detach_interface(struct mlx5_interface *intf, struct mlx5_priv *priv)
-{
-	struct mlx5_device_context *dev_ctx;
-	struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
-
-	dev_ctx = mlx5_get_device(intf, priv);
-	if (!dev_ctx)
-		return;
-
-	if (intf->detach) {
-		if (!test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state))
-			return;
-		intf->detach(dev, dev_ctx->context);
-		clear_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
-	} else {
-		if (!test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
-			return;
-		intf->remove(dev, dev_ctx->context);
-		clear_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
-	}
-}
-
-void mlx5_detach_device(struct mlx5_core_dev *dev)
-{
-	struct mlx5_priv *priv = &dev->priv;
-	struct mlx5_interface *intf;
-
-	mutex_lock(&mlx5_intf_mutex);
-	list_for_each_entry(intf, &intf_list, list)
-		mlx5_detach_interface(intf, priv);
-	mutex_unlock(&mlx5_intf_mutex);
-}
-
-bool mlx5_device_registered(struct mlx5_core_dev *dev)
-{
-	struct mlx5_priv *priv;
-	bool found = false;
-
-	mutex_lock(&mlx5_intf_mutex);
-	list_for_each_entry(priv, &mlx5_dev_list, dev_list)
-		if (priv == &dev->priv)
-			found = true;
-	mutex_unlock(&mlx5_intf_mutex);
-
-	return found;
-}
-
-int mlx5_register_device(struct mlx5_core_dev *dev)
-{
-	struct mlx5_priv *priv = &dev->priv;
-	struct mlx5_interface *intf;
-
-	mutex_lock(&mlx5_intf_mutex);
-	list_add_tail(&priv->dev_list, &mlx5_dev_list);
-	list_for_each_entry(intf, &intf_list, list)
-		mlx5_add_device(intf, priv);
-	mutex_unlock(&mlx5_intf_mutex);
-
-	return 0;
-}
-
-void mlx5_unregister_device(struct mlx5_core_dev *dev)
-{
-	struct mlx5_priv *priv = &dev->priv;
-	struct mlx5_interface *intf;
-
-	mutex_lock(&mlx5_intf_mutex);
-	list_for_each_entry(intf, &intf_list, list)
-		mlx5_remove_device(intf, priv);
-	list_del(&priv->dev_list);
-	mutex_unlock(&mlx5_intf_mutex);
-}
-
-int mlx5_register_interface(struct mlx5_interface *intf)
-{
-	struct mlx5_priv *priv;
-
-	if (!intf->add || !intf->remove)
-		return -EINVAL;
-
-	mutex_lock(&mlx5_intf_mutex);
-	list_add_tail(&intf->list, &intf_list);
-	list_for_each_entry(priv, &mlx5_dev_list, dev_list)
-		mlx5_add_device(intf, priv);
-	mutex_unlock(&mlx5_intf_mutex);
-
-	return 0;
-}
-EXPORT_SYMBOL(mlx5_register_interface);
-
-void mlx5_unregister_interface(struct mlx5_interface *intf)
-{
-	struct mlx5_priv *priv;
-
-	mutex_lock(&mlx5_intf_mutex);
-	list_for_each_entry(priv, &mlx5_dev_list, dev_list)
-		mlx5_remove_device(intf, priv);
-	list_del(&intf->list);
-	mutex_unlock(&mlx5_intf_mutex);
-}
-EXPORT_SYMBOL(mlx5_unregister_interface);
-
-void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol)
-{
-	struct mlx5_priv *priv = &mdev->priv;
-	struct mlx5_device_context *dev_ctx;
-	unsigned long flags;
-	void *result = NULL;
-
-	spin_lock_irqsave(&priv->ctx_lock, flags);
-
-	list_for_each_entry(dev_ctx, &mdev->priv.ctx_list, list)
-		if ((dev_ctx->intf->protocol == protocol) &&
-		    dev_ctx->intf->get_dev) {
-			result = dev_ctx->intf->get_dev(dev_ctx->context);
-			break;
-		}
-
-	spin_unlock_irqrestore(&priv->ctx_lock, flags);
-
-	return result;
-}
-EXPORT_SYMBOL(mlx5_get_protocol_dev);
-
-/* Must be called with intf_mutex held */
-void mlx5_add_dev_by_protocol(struct mlx5_core_dev *dev, int protocol)
-{
-	struct mlx5_interface *intf;
-
-	list_for_each_entry(intf, &intf_list, list)
-		if (intf->protocol == protocol) {
-			mlx5_add_device(intf, &dev->priv);
-			break;
-		}
-}
-
-/* Must be called with intf_mutex held */
-void mlx5_remove_dev_by_protocol(struct mlx5_core_dev *dev, int protocol)
-{
-	struct mlx5_interface *intf;
-
-	list_for_each_entry(intf, &intf_list, list)
-		if (intf->protocol == protocol) {
-			mlx5_remove_device(intf, &dev->priv);
-			break;
-		}
-}
-
-static u16 mlx5_gen_pci_id(struct mlx5_core_dev *dev)
-{
-	return (u16)((dev->pdev->bus->number << 8) |
-		     PCI_SLOT(dev->pdev->devfn));
-}
-
-/* Must be called with intf_mutex held */
-struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
-{
-	u16 pci_id = mlx5_gen_pci_id(dev);
-	struct mlx5_core_dev *res = NULL;
-	struct mlx5_core_dev *tmp_dev;
-	struct mlx5_priv *priv;
-
-	list_for_each_entry(priv, &mlx5_dev_list, dev_list) {
-		tmp_dev = container_of(priv, struct mlx5_core_dev, priv);
-		if ((dev != tmp_dev) && (mlx5_gen_pci_id(tmp_dev) == pci_id)) {
-			res = tmp_dev;
-			break;
-		}
-	}
-
-	return res;
-}
-
-void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
-		     unsigned long param)
-{
-	struct mlx5_priv *priv = &dev->priv;
-	struct mlx5_device_context *dev_ctx;
-	unsigned long flags;
-
-	spin_lock_irqsave(&priv->ctx_lock, flags);
-
-	list_for_each_entry(dev_ctx, &priv->ctx_list, list)
-		if (dev_ctx->intf->event)
-			dev_ctx->intf->event(dev, dev_ctx->context, event, param);
-
-	spin_unlock_irqrestore(&priv->ctx_lock, flags);
-}
-
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
-void mlx5_core_page_fault(struct mlx5_core_dev *dev,
-			  struct mlx5_pagefault *pfault)
-{
-	struct mlx5_priv *priv = &dev->priv;
-	int srcu_idx;
-
-	srcu_idx = srcu_read_lock(&priv->pfault_srcu);
-	if (priv->pfault)
-		priv->pfault(dev, priv->pfault_ctx, pfault);
-	srcu_read_unlock(&priv->pfault_srcu, srcu_idx);
-}
-#endif
-
-void mlx5_dev_list_lock(void)
-{
-	mutex_lock(&mlx5_intf_mutex);
-}
-
-void mlx5_dev_list_unlock(void)
-{
-	mutex_unlock(&mlx5_intf_mutex);
-}
-
-int mlx5_dev_list_trylock(void)
-{
-	return mutex_trylock(&mlx5_intf_mutex);
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
deleted file mode 100644
index a473cea..0000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ /dev/null
@@ -1,932 +0,0 @@
-/*
- * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef __MLX5_EN_H__
-#define __MLX5_EN_H__
-
-#include <linux/if_vlan.h>
-#include <linux/etherdevice.h>
-#include <linux/timecounter.h>
-#include <linux/net_tstamp.h>
-#include <linux/ptp_clock_kernel.h>
-#include <linux/mlx5/driver.h>
-#include <linux/mlx5/qp.h>
-#include <linux/mlx5/cq.h>
-#include <linux/mlx5/port.h>
-#include <linux/mlx5/vport.h>
-#include <linux/mlx5/transobj.h>
-#include <linux/rhashtable.h>
-#include <net/switchdev.h>
-#include "wq.h"
-#include "mlx5_core.h"
-#include "en_stats.h"
-
-#define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
-
-#define MLX5E_MAX_NUM_TC	8
-
-#define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE                0x6
-#define MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE                0xa
-#define MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE                0xd
-
-#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE                0x1
-#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE                0xa
-#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE                0xd
-
-#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW            0x1
-#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW            0x3
-#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW            0x6
-
-#define MLX5_RX_HEADROOM NET_SKB_PAD
-
-#define MLX5_MPWRQ_LOG_STRIDE_SIZE		6  /* >= 6, HW restriction */
-#define MLX5_MPWRQ_LOG_STRIDE_SIZE_CQE_COMPRESS	8  /* >= 6, HW restriction */
-#define MLX5_MPWRQ_LOG_WQE_SZ			18
-#define MLX5_MPWRQ_WQE_PAGE_ORDER  (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \
-				    MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT : 0)
-#define MLX5_MPWRQ_PAGES_PER_WQE		BIT(MLX5_MPWRQ_WQE_PAGE_ORDER)
-#define MLX5_MPWRQ_STRIDES_PER_PAGE		(MLX5_MPWRQ_NUM_STRIDES >> \
-						 MLX5_MPWRQ_WQE_PAGE_ORDER)
-
-#define MLX5_MTT_OCTW(npages) (ALIGN(npages, 8) / 2)
-#define MLX5E_REQUIRED_MTTS(wqes)		\
-	(wqes * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8))
-#define MLX5E_VALID_NUM_MTTS(num_mtts) (MLX5_MTT_OCTW(num_mtts) - 1 <= U16_MAX)
-
-#define MLX5_UMR_ALIGN				(2048)
-#define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD	(128)
-
-#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ                 (64 * 1024)
-#define MLX5E_DEFAULT_LRO_TIMEOUT                       32
-#define MLX5E_LRO_TIMEOUT_ARR_SIZE                      4
-
-#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC      0x10
-#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE 0x3
-#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS      0x20
-#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC      0x10
-#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS      0x20
-#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES                0x80
-#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW            0x2
-
-#define MLX5E_LOG_INDIR_RQT_SIZE       0x7
-#define MLX5E_INDIR_RQT_SIZE           BIT(MLX5E_LOG_INDIR_RQT_SIZE)
-#define MLX5E_MAX_NUM_CHANNELS         (MLX5E_INDIR_RQT_SIZE >> 1)
-#define MLX5E_MAX_NUM_SQS              (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC)
-#define MLX5E_TX_CQ_POLL_BUDGET        128
-#define MLX5E_UPDATE_STATS_INTERVAL    200 /* msecs */
-#define MLX5E_SQ_BF_BUDGET             16
-
-#define MLX5E_ICOSQ_MAX_WQEBBS \
-	(DIV_ROUND_UP(sizeof(struct mlx5e_umr_wqe), MLX5_SEND_WQE_BB))
-
-#define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
-#define MLX5E_XDP_IHS_DS_COUNT \
-	DIV_ROUND_UP(MLX5E_XDP_MIN_INLINE - 2, MLX5_SEND_WQE_DS)
-#define MLX5E_XDP_TX_DS_COUNT \
-	(MLX5E_XDP_IHS_DS_COUNT + \
-	 (sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) + 1 /* SG DS */)
-#define MLX5E_XDP_TX_WQEBBS \
-	DIV_ROUND_UP(MLX5E_XDP_TX_DS_COUNT, MLX5_SEND_WQEBB_NUM_DS)
-
-#define MLX5E_NUM_MAIN_GROUPS 9
-
-static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size)
-{
-	switch (wq_type) {
-	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
-		return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW,
-			     wq_size / 2);
-	default:
-		return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES,
-			     wq_size / 2);
-	}
-}
-
-static inline int mlx5_min_log_rq_size(int wq_type)
-{
-	switch (wq_type) {
-	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
-		return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW;
-	default:
-		return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE;
-	}
-}
-
-static inline int mlx5_max_log_rq_size(int wq_type)
-{
-	switch (wq_type) {
-	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
-		return MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW;
-	default:
-		return MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE;
-	}
-}
-
-struct mlx5e_tx_wqe {
-	struct mlx5_wqe_ctrl_seg ctrl;
-	struct mlx5_wqe_eth_seg  eth;
-};
-
-struct mlx5e_rx_wqe {
-	struct mlx5_wqe_srq_next_seg  next;
-	struct mlx5_wqe_data_seg      data;
-};
-
-struct mlx5e_umr_wqe {
-	struct mlx5_wqe_ctrl_seg       ctrl;
-	struct mlx5_wqe_umr_ctrl_seg   uctrl;
-	struct mlx5_mkey_seg           mkc;
-	struct mlx5_wqe_data_seg       data;
-};
-
-extern const char mlx5e_self_tests[][ETH_GSTRING_LEN];
-
-static const char mlx5e_priv_flags[][ETH_GSTRING_LEN] = {
-	"rx_cqe_moder",
-	"rx_cqe_compress",
-};
-
-enum mlx5e_priv_flag {
-	MLX5E_PFLAG_RX_CQE_BASED_MODER = (1 << 0),
-	MLX5E_PFLAG_RX_CQE_COMPRESS = (1 << 1),
-};
-
-#define MLX5E_SET_PFLAG(priv, pflag, enable)			\
-	do {							\
-		if (enable)					\
-			(priv)->params.pflags |= (pflag);	\
-		else						\
-			(priv)->params.pflags &= ~(pflag);	\
-	} while (0)
-
-#define MLX5E_GET_PFLAG(priv, pflag) (!!((priv)->params.pflags & (pflag)))
-
-#ifdef CONFIG_MLX5_CORE_EN_DCB
-#define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */
-#endif
-
-struct mlx5e_cq_moder {
-	u16 usec;
-	u16 pkts;
-};
-
-struct mlx5e_params {
-	u8  log_sq_size;
-	u8  rq_wq_type;
-	u8  mpwqe_log_stride_sz;
-	u8  mpwqe_log_num_strides;
-	u8  log_rq_size;
-	u16 num_channels;
-	u8  num_tc;
-	u8  rx_cq_period_mode;
-	bool rx_cqe_compress_def;
-	struct mlx5e_cq_moder rx_cq_moderation;
-	struct mlx5e_cq_moder tx_cq_moderation;
-	u16 min_rx_wqes;
-	bool lro_en;
-	u32 lro_wqe_sz;
-	u16 tx_max_inline;
-	u8  tx_min_inline_mode;
-	u8  rss_hfunc;
-	u8  toeplitz_hash_key[40];
-	u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE];
-	bool vlan_strip_disable;
-	bool rx_am_enabled;
-	u32 lro_timeout;
-	u32 pflags;
-};
-
-#ifdef CONFIG_MLX5_CORE_EN_DCB
-struct mlx5e_cee_config {
-	/* bw pct for priority group */
-	u8                         pg_bw_pct[CEE_DCBX_MAX_PGS];
-	u8                         prio_to_pg_map[CEE_DCBX_MAX_PRIO];
-	bool                       pfc_setting[CEE_DCBX_MAX_PRIO];
-	bool                       pfc_enable;
-};
-
-enum {
-	MLX5_DCB_CHG_RESET,
-	MLX5_DCB_NO_CHG,
-	MLX5_DCB_CHG_NO_RESET,
-};
-
-struct mlx5e_dcbx {
-	enum mlx5_dcbx_oper_mode   mode;
-	struct mlx5e_cee_config    cee_cfg; /* pending configuration */
-
-	/* The only setting that cannot be read from FW */
-	u8                         tc_tsa[IEEE_8021QAZ_MAX_TCS];
-};
-#endif
-
-struct mlx5e_tstamp {
-	rwlock_t                   lock;
-	struct cyclecounter        cycles;
-	struct timecounter         clock;
-	struct hwtstamp_config     hwtstamp_config;
-	u32                        nominal_c_mult;
-	unsigned long              overflow_period;
-	struct delayed_work        overflow_work;
-	struct mlx5_core_dev      *mdev;
-	struct ptp_clock          *ptp;
-	struct ptp_clock_info      ptp_info;
-};
-
-enum {
-	MLX5E_RQ_STATE_ENABLED,
-	MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS,
-	MLX5E_RQ_STATE_AM,
-};
-
-struct mlx5e_cq {
-	/* data path - accessed per cqe */
-	struct mlx5_cqwq           wq;
-
-	/* data path - accessed per napi poll */
-	u16                        event_ctr;
-	struct napi_struct        *napi;
-	struct mlx5_core_cq        mcq;
-	struct mlx5e_channel      *channel;
-	struct mlx5e_priv         *priv;
-
-	/* cqe decompression */
-	struct mlx5_cqe64          title;
-	struct mlx5_mini_cqe8      mini_arr[MLX5_MINI_CQE_ARRAY_SIZE];
-	u8                         mini_arr_idx;
-	u16                        decmprs_left;
-	u16                        decmprs_wqe_counter;
-
-	/* control */
-	struct mlx5_frag_wq_ctrl   wq_ctrl;
-} ____cacheline_aligned_in_smp;
-
-struct mlx5e_rq;
-typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq *rq,
-				       struct mlx5_cqe64 *cqe);
-typedef int (*mlx5e_fp_alloc_wqe)(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe,
-				  u16 ix);
-
-typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq *rq, u16 ix);
-
-struct mlx5e_dma_info {
-	struct page	*page;
-	dma_addr_t	addr;
-};
-
-struct mlx5e_rx_am_stats {
-	int ppms; /* packets per msec */
-	int epms; /* events per msec */
-};
-
-struct mlx5e_rx_am_sample {
-	ktime_t		time;
-	unsigned int	pkt_ctr;
-	u16		event_ctr;
-};
-
-struct mlx5e_rx_am { /* Adaptive Moderation */
-	u8					state;
-	struct mlx5e_rx_am_stats		prev_stats;
-	struct mlx5e_rx_am_sample		start_sample;
-	struct work_struct			work;
-	u8					profile_ix;
-	u8					mode;
-	u8					tune_state;
-	u8					steps_right;
-	u8					steps_left;
-	u8					tired;
-};
-
-/* a single cache unit is capable to serve one napi call (for non-striding rq)
- * or a MPWQE (for striding rq).
- */
-#define MLX5E_CACHE_UNIT	(MLX5_MPWRQ_PAGES_PER_WQE > NAPI_POLL_WEIGHT ? \
-				 MLX5_MPWRQ_PAGES_PER_WQE : NAPI_POLL_WEIGHT)
-#define MLX5E_CACHE_SIZE	(2 * roundup_pow_of_two(MLX5E_CACHE_UNIT))
-struct mlx5e_page_cache {
-	u32 head;
-	u32 tail;
-	struct mlx5e_dma_info page_cache[MLX5E_CACHE_SIZE];
-};
-
-struct mlx5e_rq {
-	/* data path */
-	struct mlx5_wq_ll      wq;
-
-	union {
-		struct mlx5e_dma_info *dma_info;
-		struct {
-			struct mlx5e_mpw_info *info;
-			void                  *mtt_no_align;
-		} mpwqe;
-	};
-	struct {
-		u8             page_order;
-		u32            wqe_sz;    /* wqe data buffer size */
-		u8             map_dir;   /* dma map direction */
-	} buff;
-	__be32                 mkey_be;
-
-	struct device         *pdev;
-	struct net_device     *netdev;
-	struct mlx5e_tstamp   *tstamp;
-	struct mlx5e_rq_stats  stats;
-	struct mlx5e_cq        cq;
-	struct mlx5e_page_cache page_cache;
-
-	mlx5e_fp_handle_rx_cqe handle_rx_cqe;
-	mlx5e_fp_alloc_wqe     alloc_wqe;
-	mlx5e_fp_dealloc_wqe   dealloc_wqe;
-
-	unsigned long          state;
-	int                    ix;
-
-	struct mlx5e_rx_am     am; /* Adaptive Moderation */
-	struct bpf_prog       *xdp_prog;
-
-	/* control */
-	struct mlx5_wq_ctrl    wq_ctrl;
-	u8                     wq_type;
-	u32                    mpwqe_stride_sz;
-	u32                    mpwqe_num_strides;
-	u32                    rqn;
-	struct mlx5e_channel  *channel;
-	struct mlx5e_priv     *priv;
-	struct mlx5_core_mkey  umr_mkey;
-} ____cacheline_aligned_in_smp;
-
-struct mlx5e_umr_dma_info {
-	__be64                *mtt;
-	dma_addr_t             mtt_addr;
-	struct mlx5e_dma_info  dma_info[MLX5_MPWRQ_PAGES_PER_WQE];
-	struct mlx5e_umr_wqe   wqe;
-};
-
-struct mlx5e_mpw_info {
-	struct mlx5e_umr_dma_info umr;
-	u16 consumed_strides;
-	u16 skbs_frags[MLX5_MPWRQ_PAGES_PER_WQE];
-};
-
-struct mlx5e_tx_wqe_info {
-	u32 num_bytes;
-	u8  num_wqebbs;
-	u8  num_dma;
-};
-
-enum mlx5e_dma_map_type {
-	MLX5E_DMA_MAP_SINGLE,
-	MLX5E_DMA_MAP_PAGE
-};
-
-struct mlx5e_sq_dma {
-	dma_addr_t              addr;
-	u32                     size;
-	enum mlx5e_dma_map_type type;
-};
-
-enum {
-	MLX5E_SQ_STATE_ENABLED,
-	MLX5E_SQ_STATE_BF_ENABLE,
-};
-
-struct mlx5e_sq_wqe_info {
-	u8  opcode;
-	u8  num_wqebbs;
-};
-
-enum mlx5e_sq_type {
-	MLX5E_SQ_TXQ,
-	MLX5E_SQ_ICO,
-	MLX5E_SQ_XDP
-};
-
-struct mlx5e_sq {
-	/* data path */
-
-	/* dirtied @completion */
-	u16                        cc;
-	u32                        dma_fifo_cc;
-
-	/* dirtied @xmit */
-	u16                        pc ____cacheline_aligned_in_smp;
-	u32                        dma_fifo_pc;
-	u16                        bf_offset;
-	u16                        prev_cc;
-	u8                         bf_budget;
-	struct mlx5e_sq_stats      stats;
-
-	struct mlx5e_cq            cq;
-
-	/* pointers to per tx element info: write@...t, read@...pletion */
-	union {
-		struct {
-			struct sk_buff           **skb;
-			struct mlx5e_sq_dma       *dma_fifo;
-			struct mlx5e_tx_wqe_info  *wqe_info;
-		} txq;
-		struct mlx5e_sq_wqe_info *ico_wqe;
-		struct {
-			struct mlx5e_sq_wqe_info  *wqe_info;
-			struct mlx5e_dma_info     *di;
-			bool                       doorbell;
-		} xdp;
-	} db;
-
-	/* read only */
-	struct mlx5_wq_cyc         wq;
-	u32                        dma_fifo_mask;
-	struct netdev_queue       *txq;
-	u32                        sqn;
-	u16                        bf_buf_size;
-	u16                        max_inline;
-	u8                         min_inline_mode;
-	u16                        edge;
-	struct device             *pdev;
-	struct mlx5e_tstamp       *tstamp;
-	__be32                     mkey_be;
-	unsigned long              state;
-
-	/* control path */
-	struct mlx5_wq_ctrl        wq_ctrl;
-	struct mlx5_sq_bfreg	   bfreg;
-	struct mlx5e_channel      *channel;
-	int                        tc;
-	u32                        rate_limit;
-	u8                         type;
-} ____cacheline_aligned_in_smp;
-
-static inline bool mlx5e_sq_has_room_for(struct mlx5e_sq *sq, u16 n)
-{
-	return (((sq->wq.sz_m1 & (sq->cc - sq->pc)) >= n) ||
-		(sq->cc  == sq->pc));
-}
-
-enum channel_flags {
-	MLX5E_CHANNEL_NAPI_SCHED = 1,
-};
-
-struct mlx5e_channel {
-	/* data path */
-	struct mlx5e_rq            rq;
-	struct mlx5e_sq            xdp_sq;
-	struct mlx5e_sq            sq[MLX5E_MAX_NUM_TC];
-	struct mlx5e_sq            icosq;   /* internal control operations */
-	bool                       xdp;
-	struct napi_struct         napi;
-	struct device             *pdev;
-	struct net_device         *netdev;
-	__be32                     mkey_be;
-	u8                         num_tc;
-	unsigned long              flags;
-
-	/* control */
-	struct mlx5e_priv         *priv;
-	int                        ix;
-	int                        cpu;
-};
-
-enum mlx5e_traffic_types {
-	MLX5E_TT_IPV4_TCP,
-	MLX5E_TT_IPV6_TCP,
-	MLX5E_TT_IPV4_UDP,
-	MLX5E_TT_IPV6_UDP,
-	MLX5E_TT_IPV4_IPSEC_AH,
-	MLX5E_TT_IPV6_IPSEC_AH,
-	MLX5E_TT_IPV4_IPSEC_ESP,
-	MLX5E_TT_IPV6_IPSEC_ESP,
-	MLX5E_TT_IPV4,
-	MLX5E_TT_IPV6,
-	MLX5E_TT_ANY,
-	MLX5E_NUM_TT,
-	MLX5E_NUM_INDIR_TIRS = MLX5E_TT_ANY,
-};
-
-enum {
-	MLX5E_STATE_ASYNC_EVENTS_ENABLED,
-	MLX5E_STATE_OPENED,
-	MLX5E_STATE_DESTROYING,
-};
-
-struct mlx5e_vxlan_db {
-	spinlock_t			lock; /* protect vxlan table */
-	struct radix_tree_root		tree;
-};
-
-struct mlx5e_l2_rule {
-	u8  addr[ETH_ALEN + 2];
-	struct mlx5_flow_handle *rule;
-};
-
-struct mlx5e_flow_table {
-	int num_groups;
-	struct mlx5_flow_table *t;
-	struct mlx5_flow_group **g;
-};
-
-#define MLX5E_L2_ADDR_HASH_SIZE BIT(BITS_PER_BYTE)
-
-struct mlx5e_tc_table {
-	struct mlx5_flow_table		*t;
-
-	struct rhashtable_params        ht_params;
-	struct rhashtable               ht;
-};
-
-struct mlx5e_vlan_table {
-	struct mlx5e_flow_table		ft;
-	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
-	struct mlx5_flow_handle	*active_vlans_rule[VLAN_N_VID];
-	struct mlx5_flow_handle	*untagged_rule;
-	struct mlx5_flow_handle	*any_vlan_rule;
-	bool		filter_disabled;
-};
-
-struct mlx5e_l2_table {
-	struct mlx5e_flow_table    ft;
-	struct hlist_head          netdev_uc[MLX5E_L2_ADDR_HASH_SIZE];
-	struct hlist_head          netdev_mc[MLX5E_L2_ADDR_HASH_SIZE];
-	struct mlx5e_l2_rule	   broadcast;
-	struct mlx5e_l2_rule	   allmulti;
-	struct mlx5e_l2_rule	   promisc;
-	bool                       broadcast_enabled;
-	bool                       allmulti_enabled;
-	bool                       promisc_enabled;
-};
-
-/* L3/L4 traffic type classifier */
-struct mlx5e_ttc_table {
-	struct mlx5e_flow_table  ft;
-	struct mlx5_flow_handle	 *rules[MLX5E_NUM_TT];
-};
-
-#define ARFS_HASH_SHIFT BITS_PER_BYTE
-#define ARFS_HASH_SIZE BIT(BITS_PER_BYTE)
-struct arfs_table {
-	struct mlx5e_flow_table  ft;
-	struct mlx5_flow_handle	 *default_rule;
-	struct hlist_head	 rules_hash[ARFS_HASH_SIZE];
-};
-
-enum  arfs_type {
-	ARFS_IPV4_TCP,
-	ARFS_IPV6_TCP,
-	ARFS_IPV4_UDP,
-	ARFS_IPV6_UDP,
-	ARFS_NUM_TYPES,
-};
-
-struct mlx5e_arfs_tables {
-	struct arfs_table arfs_tables[ARFS_NUM_TYPES];
-	/* Protect aRFS rules list */
-	spinlock_t                     arfs_lock;
-	struct list_head               rules;
-	int                            last_filter_id;
-	struct workqueue_struct        *wq;
-};
-
-/* NIC prio FTS */
-enum {
-	MLX5E_VLAN_FT_LEVEL = 0,
-	MLX5E_L2_FT_LEVEL,
-	MLX5E_TTC_FT_LEVEL,
-	MLX5E_ARFS_FT_LEVEL
-};
-
-struct mlx5e_ethtool_table {
-	struct mlx5_flow_table *ft;
-	int                    num_rules;
-};
-
-#define ETHTOOL_NUM_L3_L4_FTS 7
-#define ETHTOOL_NUM_L2_FTS 4
-
-struct mlx5e_ethtool_steering {
-	struct mlx5e_ethtool_table      l3_l4_ft[ETHTOOL_NUM_L3_L4_FTS];
-	struct mlx5e_ethtool_table      l2_ft[ETHTOOL_NUM_L2_FTS];
-	struct list_head                rules;
-	int                             tot_num_rules;
-};
-
-struct mlx5e_flow_steering {
-	struct mlx5_flow_namespace      *ns;
-	struct mlx5e_ethtool_steering   ethtool;
-	struct mlx5e_tc_table           tc;
-	struct mlx5e_vlan_table         vlan;
-	struct mlx5e_l2_table           l2;
-	struct mlx5e_ttc_table          ttc;
-	struct mlx5e_arfs_tables        arfs;
-};
-
-struct mlx5e_rqt {
-	u32              rqtn;
-	bool		 enabled;
-};
-
-struct mlx5e_tir {
-	u32		  tirn;
-	struct mlx5e_rqt  rqt;
-	struct list_head  list;
-};
-
-enum {
-	MLX5E_TC_PRIO = 0,
-	MLX5E_NIC_PRIO
-};
-
-struct mlx5e_profile {
-	void	(*init)(struct mlx5_core_dev *mdev,
-			struct net_device *netdev,
-			const struct mlx5e_profile *profile, void *ppriv);
-	void	(*cleanup)(struct mlx5e_priv *priv);
-	int	(*init_rx)(struct mlx5e_priv *priv);
-	void	(*cleanup_rx)(struct mlx5e_priv *priv);
-	int	(*init_tx)(struct mlx5e_priv *priv);
-	void	(*cleanup_tx)(struct mlx5e_priv *priv);
-	void	(*enable)(struct mlx5e_priv *priv);
-	void	(*disable)(struct mlx5e_priv *priv);
-	void	(*update_stats)(struct mlx5e_priv *priv);
-	int	(*max_nch)(struct mlx5_core_dev *mdev);
-	int	max_tc;
-};
-
-struct mlx5e_priv {
-	/* priv data path fields - start */
-	struct mlx5e_sq            **txq_to_sq_map;
-	int channeltc_to_txq_map[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC];
-	struct bpf_prog *xdp_prog;
-	/* priv data path fields - end */
-
-	unsigned long              state;
-	struct mutex               state_lock; /* Protects Interface state */
-	struct mlx5e_rq            drop_rq;
-
-	struct mlx5e_channel     **channel;
-	u32                        tisn[MLX5E_MAX_NUM_TC];
-	struct mlx5e_rqt           indir_rqt;
-	struct mlx5e_tir           indir_tir[MLX5E_NUM_INDIR_TIRS];
-	struct mlx5e_tir           direct_tir[MLX5E_MAX_NUM_CHANNELS];
-	u32                        tx_rates[MLX5E_MAX_NUM_SQS];
-
-	struct mlx5e_flow_steering fs;
-	struct mlx5e_vxlan_db      vxlan;
-
-	struct mlx5e_params        params;
-	struct workqueue_struct    *wq;
-	struct work_struct         update_carrier_work;
-	struct work_struct         set_rx_mode_work;
-	struct work_struct         tx_timeout_work;
-	struct delayed_work        update_stats_work;
-
-	struct mlx5_core_dev      *mdev;
-	struct net_device         *netdev;
-	struct mlx5e_stats         stats;
-	struct mlx5e_tstamp        tstamp;
-	u16 q_counter;
-#ifdef CONFIG_MLX5_CORE_EN_DCB
-	struct mlx5e_dcbx          dcbx;
-#endif
-
-	const struct mlx5e_profile *profile;
-	void                      *ppriv;
-};
-
-void mlx5e_build_ptys2ethtool_map(void);
-
-void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw);
-u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
-		       void *accel_priv, select_queue_fallback_t fallback);
-netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
-
-void mlx5e_completion_event(struct mlx5_core_cq *mcq);
-void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
-int mlx5e_napi_poll(struct napi_struct *napi, int budget);
-bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
-int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
-void mlx5e_free_sq_descs(struct mlx5e_sq *sq);
-
-void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info,
-			bool recycle);
-void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
-void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
-bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq);
-int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix);
-int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe,	u16 ix);
-void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix);
-void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
-void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq);
-void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi);
-struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq);
-
-void mlx5e_rx_am(struct mlx5e_rq *rq);
-void mlx5e_rx_am_work(struct work_struct *work);
-struct mlx5e_cq_moder mlx5e_am_get_def_profile(u8 rx_cq_period_mode);
-
-void mlx5e_update_stats(struct mlx5e_priv *priv);
-
-int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
-void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv);
-void mlx5e_init_l2_addr(struct mlx5e_priv *priv);
-void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft);
-int mlx5e_self_test_num(struct mlx5e_priv *priv);
-void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest,
-		     u64 *buf);
-int mlx5e_ethtool_get_flow(struct mlx5e_priv *priv, struct ethtool_rxnfc *info,
-			   int location);
-int mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv,
-				struct ethtool_rxnfc *info, u32 *rule_locs);
-int mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
-			       struct ethtool_rx_flow_spec *fs);
-int mlx5e_ethtool_flow_remove(struct mlx5e_priv *priv,
-			      int location);
-void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv);
-void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv);
-void mlx5e_set_rx_mode_work(struct work_struct *work);
-
-void mlx5e_fill_hwstamp(struct mlx5e_tstamp *clock, u64 timestamp,
-			struct skb_shared_hwtstamps *hwts);
-void mlx5e_timestamp_init(struct mlx5e_priv *priv);
-void mlx5e_timestamp_cleanup(struct mlx5e_priv *priv);
-int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr);
-int mlx5e_hwstamp_get(struct net_device *dev, struct ifreq *ifr);
-void mlx5e_modify_rx_cqe_compression(struct mlx5e_priv *priv, bool val);
-
-int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
-			  u16 vid);
-int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
-			   u16 vid);
-void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv);
-void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv);
-
-int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd);
-
-int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix);
-void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv);
-
-int mlx5e_open_locked(struct net_device *netdev);
-int mlx5e_close_locked(struct net_device *netdev);
-void mlx5e_build_default_indir_rqt(struct mlx5_core_dev *mdev,
-				   u32 *indirection_rqt, int len,
-				   int num_channels);
-int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
-
-void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params,
-				 u8 cq_period_mode);
-
-static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
-				      struct mlx5_wqe_ctrl_seg *ctrl, int bf_sz)
-{
-	u16 ofst = sq->bf_offset;
-
-	/* ensure wqe is visible to device before updating doorbell record */
-	dma_wmb();
-
-	*sq->wq.db = cpu_to_be32(sq->pc);
-
-	/* ensure doorbell record is visible to device before ringing the
-	 * doorbell
-	 */
-	wmb();
-	if (bf_sz)
-		__iowrite64_copy(sq->bfreg.map + ofst, ctrl, bf_sz);
-	else
-		mlx5_write64((__be32 *)ctrl, sq->bfreg.map + ofst, NULL);
-	/* flush the write-combining mapped buffer */
-	wmb();
-
-	sq->bf_offset ^= sq->bf_buf_size;
-}
-
-static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
-{
-	struct mlx5_core_cq *mcq;
-
-	mcq = &cq->mcq;
-	mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, cq->wq.cc);
-}
-
-static inline u32 mlx5e_get_wqe_mtt_offset(struct mlx5e_rq *rq, u16 wqe_ix)
-{
-	return wqe_ix * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8);
-}
-
-static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
-{
-	return min_t(int, mdev->priv.eq_table.num_comp_vectors,
-		     MLX5E_MAX_NUM_CHANNELS);
-}
-
-extern const struct ethtool_ops mlx5e_ethtool_ops;
-#ifdef CONFIG_MLX5_CORE_EN_DCB
-extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops;
-int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets);
-void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv);
-#endif
-
-#ifndef CONFIG_RFS_ACCEL
-static inline int mlx5e_arfs_create_tables(struct mlx5e_priv *priv)
-{
-	return 0;
-}
-
-static inline void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv) {}
-
-static inline int mlx5e_arfs_enable(struct mlx5e_priv *priv)
-{
-	return -ENOTSUPP;
-}
-
-static inline int mlx5e_arfs_disable(struct mlx5e_priv *priv)
-{
-	return -ENOTSUPP;
-}
-#else
-int mlx5e_arfs_create_tables(struct mlx5e_priv *priv);
-void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv);
-int mlx5e_arfs_enable(struct mlx5e_priv *priv);
-int mlx5e_arfs_disable(struct mlx5e_priv *priv);
-int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
-			u16 rxq_index, u32 flow_id);
-#endif
-
-u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev);
-int mlx5e_create_tir(struct mlx5_core_dev *mdev,
-		     struct mlx5e_tir *tir, u32 *in, int inlen);
-void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
-		       struct mlx5e_tir *tir);
-int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev);
-void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev);
-int mlx5e_refresh_tirs_self_loopback(struct mlx5_core_dev *mdev,
-				     bool enable_uc_lb);
-
-struct mlx5_eswitch_rep;
-int mlx5e_vport_rep_load(struct mlx5_eswitch *esw,
-			 struct mlx5_eswitch_rep *rep);
-void mlx5e_vport_rep_unload(struct mlx5_eswitch *esw,
-			    struct mlx5_eswitch_rep *rep);
-int mlx5e_nic_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep);
-void mlx5e_nic_rep_unload(struct mlx5_eswitch *esw,
-			  struct mlx5_eswitch_rep *rep);
-int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv);
-void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv);
-int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr);
-void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
-void mlx5e_update_hw_rep_counters(struct mlx5e_priv *priv);
-
-int mlx5e_create_direct_rqts(struct mlx5e_priv *priv);
-void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt);
-int mlx5e_create_direct_tirs(struct mlx5e_priv *priv);
-void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv);
-int mlx5e_create_tises(struct mlx5e_priv *priv);
-void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv);
-int mlx5e_close(struct net_device *netdev);
-int mlx5e_open(struct net_device *netdev);
-void mlx5e_update_stats_work(struct work_struct *work);
-struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
-				       const struct mlx5e_profile *profile,
-				       void *ppriv);
-void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv);
-int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev);
-void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev);
-u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout);
-void mlx5e_add_vxlan_port(struct net_device *netdev,
-			  struct udp_tunnel_info *ti);
-void mlx5e_del_vxlan_port(struct net_device *netdev,
-			  struct udp_tunnel_info *ti);
-
-int mlx5e_get_offload_stats(int attr_id, const struct net_device *dev,
-			    void *sp);
-bool mlx5e_has_offload_stats(const struct net_device *dev, int attr_id);
-
-bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv);
-bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv);
-#endif /* __MLX5_EN_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
deleted file mode 100644
index 68419a0..0000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ /dev/null
@@ -1,749 +0,0 @@
-/*
- * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifdef CONFIG_RFS_ACCEL
-
-#include <linux/hash.h>
-#include <linux/mlx5/fs.h>
-#include <linux/ip.h>
-#include <linux/ipv6.h>
-#include "en.h"
-
-struct arfs_tuple {
-	__be16 etype;
-	u8     ip_proto;
-	union {
-		__be32 src_ipv4;
-		struct in6_addr src_ipv6;
-	};
-	union {
-		__be32 dst_ipv4;
-		struct in6_addr dst_ipv6;
-	};
-	__be16 src_port;
-	__be16 dst_port;
-};
-
-struct arfs_rule {
-	struct mlx5e_priv	*priv;
-	struct work_struct      arfs_work;
-	struct mlx5_flow_handle *rule;
-	struct hlist_node	hlist;
-	int			rxq;
-	/* Flow ID passed to ndo_rx_flow_steer */
-	int			flow_id;
-	/* Filter ID returned by ndo_rx_flow_steer */
-	int			filter_id;
-	struct arfs_tuple	tuple;
-};
-
-#define mlx5e_for_each_arfs_rule(hn, tmp, arfs_tables, i, j) \
-	for (i = 0; i < ARFS_NUM_TYPES; i++) \
-		mlx5e_for_each_hash_arfs_rule(hn, tmp, arfs_tables[i].rules_hash, j)
-
-#define mlx5e_for_each_hash_arfs_rule(hn, tmp, hash, j) \
-	for (j = 0; j < ARFS_HASH_SIZE; j++) \
-		hlist_for_each_entry_safe(hn, tmp, &hash[j], hlist)
-
-static enum mlx5e_traffic_types arfs_get_tt(enum arfs_type type)
-{
-	switch (type) {
-	case ARFS_IPV4_TCP:
-		return MLX5E_TT_IPV4_TCP;
-	case ARFS_IPV4_UDP:
-		return MLX5E_TT_IPV4_UDP;
-	case ARFS_IPV6_TCP:
-		return MLX5E_TT_IPV6_TCP;
-	case ARFS_IPV6_UDP:
-		return MLX5E_TT_IPV6_UDP;
-	default:
-		return -EINVAL;
-	}
-}
-
-static int arfs_disable(struct mlx5e_priv *priv)
-{
-	struct mlx5_flow_destination dest;
-	struct mlx5e_tir *tir = priv->indir_tir;
-	int err = 0;
-	int tt;
-	int i;
-
-	dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
-	for (i = 0; i < ARFS_NUM_TYPES; i++) {
-		dest.tir_num = tir[i].tirn;
-		tt = arfs_get_tt(i);
-		/* Modify ttc rules destination to bypass the aRFS tables*/
-		err = mlx5_modify_rule_destination(priv->fs.ttc.rules[tt],
-						   &dest, NULL);
-		if (err) {
-			netdev_err(priv->netdev,
-				   "%s: modify ttc destination failed\n",
-				   __func__);
-			return err;
-		}
-	}
-	return 0;
-}
-
-static void arfs_del_rules(struct mlx5e_priv *priv);
-
-int mlx5e_arfs_disable(struct mlx5e_priv *priv)
-{
-	arfs_del_rules(priv);
-
-	return arfs_disable(priv);
-}
-
-int mlx5e_arfs_enable(struct mlx5e_priv *priv)
-{
-	struct mlx5_flow_destination dest;
-	int err = 0;
-	int tt;
-	int i;
-
-	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
-	for (i = 0; i < ARFS_NUM_TYPES; i++) {
-		dest.ft = priv->fs.arfs.arfs_tables[i].ft.t;
-		tt = arfs_get_tt(i);
-		/* Modify ttc rules destination to point on the aRFS FTs */
-		err = mlx5_modify_rule_destination(priv->fs.ttc.rules[tt],
-						   &dest, NULL);
-		if (err) {
-			netdev_err(priv->netdev,
-				   "%s: modify ttc destination failed err=%d\n",
-				   __func__, err);
-			arfs_disable(priv);
-			return err;
-		}
-	}
-	return 0;
-}
-
-static void arfs_destroy_table(struct arfs_table *arfs_t)
-{
-	mlx5_del_flow_rules(arfs_t->default_rule);
-	mlx5e_destroy_flow_table(&arfs_t->ft);
-}
-
-void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv)
-{
-	int i;
-
-	if (!(priv->netdev->hw_features & NETIF_F_NTUPLE))
-		return;
-
-	arfs_del_rules(priv);
-	destroy_workqueue(priv->fs.arfs.wq);
-	for (i = 0; i < ARFS_NUM_TYPES; i++) {
-		if (!IS_ERR_OR_NULL(priv->fs.arfs.arfs_tables[i].ft.t))
-			arfs_destroy_table(&priv->fs.arfs.arfs_tables[i]);
-	}
-}
-
-static int arfs_add_default_rule(struct mlx5e_priv *priv,
-				 enum arfs_type type)
-{
-	struct arfs_table *arfs_t = &priv->fs.arfs.arfs_tables[type];
-	struct mlx5_flow_act flow_act = {
-		.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
-		.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG,
-		.encap_id = 0,
-	};
-	struct mlx5_flow_destination dest;
-	struct mlx5e_tir *tir = priv->indir_tir;
-	struct mlx5_flow_spec *spec;
-	int err = 0;
-
-	spec = mlx5_vzalloc(sizeof(*spec));
-	if (!spec) {
-		netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
-		err = -ENOMEM;
-		goto out;
-	}
-
-	dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
-	switch (type) {
-	case ARFS_IPV4_TCP:
-		dest.tir_num = tir[MLX5E_TT_IPV4_TCP].tirn;
-		break;
-	case ARFS_IPV4_UDP:
-		dest.tir_num = tir[MLX5E_TT_IPV4_UDP].tirn;
-		break;
-	case ARFS_IPV6_TCP:
-		dest.tir_num = tir[MLX5E_TT_IPV6_TCP].tirn;
-		break;
-	case ARFS_IPV6_UDP:
-		dest.tir_num = tir[MLX5E_TT_IPV6_UDP].tirn;
-		break;
-	default:
-		err = -EINVAL;
-		goto out;
-	}
-
-	arfs_t->default_rule = mlx5_add_flow_rules(arfs_t->ft.t, spec,
-						   &flow_act,
-						   &dest, 1);
-	if (IS_ERR(arfs_t->default_rule)) {
-		err = PTR_ERR(arfs_t->default_rule);
-		arfs_t->default_rule = NULL;
-		netdev_err(priv->netdev, "%s: add rule failed, arfs type=%d\n",
-			   __func__, type);
-	}
-out:
-	kvfree(spec);
-	return err;
-}
-
-#define MLX5E_ARFS_NUM_GROUPS	2
-#define MLX5E_ARFS_GROUP1_SIZE	BIT(12)
-#define MLX5E_ARFS_GROUP2_SIZE	BIT(0)
-#define MLX5E_ARFS_TABLE_SIZE	(MLX5E_ARFS_GROUP1_SIZE +\
-				 MLX5E_ARFS_GROUP2_SIZE)
-static int arfs_create_groups(struct mlx5e_flow_table *ft,
-			      enum  arfs_type type)
-{
-	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
-	void *outer_headers_c;
-	int ix = 0;
-	u32 *in;
-	int err;
-	u8 *mc;
-
-	ft->g = kcalloc(MLX5E_ARFS_NUM_GROUPS,
-			sizeof(*ft->g), GFP_KERNEL);
-	in = mlx5_vzalloc(inlen);
-	if  (!in || !ft->g) {
-		kvfree(ft->g);
-		kvfree(in);
-		return -ENOMEM;
-	}
-
-	mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
-	outer_headers_c = MLX5_ADDR_OF(fte_match_param, mc,
-				       outer_headers);
-	MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ethertype);
-	switch (type) {
-	case ARFS_IPV4_TCP:
-	case ARFS_IPV6_TCP:
-		MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, tcp_dport);
-		MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, tcp_sport);
-		break;
-	case ARFS_IPV4_UDP:
-	case ARFS_IPV6_UDP:
-		MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, udp_dport);
-		MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, udp_sport);
-		break;
-	default:
-		err = -EINVAL;
-		goto out;
-	}
-
-	switch (type) {
-	case ARFS_IPV4_TCP:
-	case ARFS_IPV4_UDP:
-		MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c,
-				 src_ipv4_src_ipv6.ipv4_layout.ipv4);
-		MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c,
-				 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
-		break;
-	case ARFS_IPV6_TCP:
-	case ARFS_IPV6_UDP:
-		memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
-				    src_ipv4_src_ipv6.ipv6_layout.ipv6),
-		       0xff, 16);
-		memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
-				    dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
-		       0xff, 16);
-		break;
-	default:
-		err = -EINVAL;
-		goto out;
-	}
-
-	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
-	MLX5_SET_CFG(in, start_flow_index, ix);
-	ix += MLX5E_ARFS_GROUP1_SIZE;
-	MLX5_SET_CFG(in, end_flow_index, ix - 1);
-	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
-	if (IS_ERR(ft->g[ft->num_groups]))
-		goto err;
-	ft->num_groups++;
-
-	memset(in, 0, inlen);
-	MLX5_SET_CFG(in, start_flow_index, ix);
-	ix += MLX5E_ARFS_GROUP2_SIZE;
-	MLX5_SET_CFG(in, end_flow_index, ix - 1);
-	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
-	if (IS_ERR(ft->g[ft->num_groups]))
-		goto err;
-	ft->num_groups++;
-
-	kvfree(in);
-	return 0;
-
-err:
-	err = PTR_ERR(ft->g[ft->num_groups]);
-	ft->g[ft->num_groups] = NULL;
-out:
-	kvfree(in);
-
-	return err;
-}
-
-static int arfs_create_table(struct mlx5e_priv *priv,
-			     enum arfs_type type)
-{
-	struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
-	struct mlx5e_flow_table *ft = &arfs->arfs_tables[type].ft;
-	int err;
-
-	ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO,
-				       MLX5E_ARFS_TABLE_SIZE, MLX5E_ARFS_FT_LEVEL, 0);
-	if (IS_ERR(ft->t)) {
-		err = PTR_ERR(ft->t);
-		ft->t = NULL;
-		return err;
-	}
-
-	err = arfs_create_groups(ft, type);
-	if (err)
-		goto err;
-
-	err = arfs_add_default_rule(priv, type);
-	if (err)
-		goto err;
-
-	return 0;
-err:
-	mlx5e_destroy_flow_table(ft);
-	return err;
-}
-
-int mlx5e_arfs_create_tables(struct mlx5e_priv *priv)
-{
-	int err = 0;
-	int i;
-
-	if (!(priv->netdev->hw_features & NETIF_F_NTUPLE))
-		return 0;
-
-	spin_lock_init(&priv->fs.arfs.arfs_lock);
-	INIT_LIST_HEAD(&priv->fs.arfs.rules);
-	priv->fs.arfs.wq = create_singlethread_workqueue("mlx5e_arfs");
-	if (!priv->fs.arfs.wq)
-		return -ENOMEM;
-
-	for (i = 0; i < ARFS_NUM_TYPES; i++) {
-		err = arfs_create_table(priv, i);
-		if (err)
-			goto err;
-	}
-	return 0;
-err:
-	mlx5e_arfs_destroy_tables(priv);
-	return err;
-}
-
-#define MLX5E_ARFS_EXPIRY_QUOTA 60
-
-static void arfs_may_expire_flow(struct mlx5e_priv *priv)
-{
-	struct arfs_rule *arfs_rule;
-	struct hlist_node *htmp;
-	int quota = 0;
-	int i;
-	int j;
-
-	HLIST_HEAD(del_list);
-	spin_lock_bh(&priv->fs.arfs.arfs_lock);
-	mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs.arfs.arfs_tables, i, j) {
-		if (quota++ > MLX5E_ARFS_EXPIRY_QUOTA)
-			break;
-		if (!work_pending(&arfs_rule->arfs_work) &&
-		    rps_may_expire_flow(priv->netdev,
-					arfs_rule->rxq, arfs_rule->flow_id,
-					arfs_rule->filter_id)) {
-			hlist_del_init(&arfs_rule->hlist);
-			hlist_add_head(&arfs_rule->hlist, &del_list);
-		}
-	}
-	spin_unlock_bh(&priv->fs.arfs.arfs_lock);
-	hlist_for_each_entry_safe(arfs_rule, htmp, &del_list, hlist) {
-		if (arfs_rule->rule)
-			mlx5_del_flow_rules(arfs_rule->rule);
-		hlist_del(&arfs_rule->hlist);
-		kfree(arfs_rule);
-	}
-}
-
-static void arfs_del_rules(struct mlx5e_priv *priv)
-{
-	struct hlist_node *htmp;
-	struct arfs_rule *rule;
-	int i;
-	int j;
-
-	HLIST_HEAD(del_list);
-	spin_lock_bh(&priv->fs.arfs.arfs_lock);
-	mlx5e_for_each_arfs_rule(rule, htmp, priv->fs.arfs.arfs_tables, i, j) {
-		hlist_del_init(&rule->hlist);
-		hlist_add_head(&rule->hlist, &del_list);
-	}
-	spin_unlock_bh(&priv->fs.arfs.arfs_lock);
-
-	hlist_for_each_entry_safe(rule, htmp, &del_list, hlist) {
-		cancel_work_sync(&rule->arfs_work);
-		if (rule->rule)
-			mlx5_del_flow_rules(rule->rule);
-		hlist_del(&rule->hlist);
-		kfree(rule);
-	}
-}
-
-static struct hlist_head *
-arfs_hash_bucket(struct arfs_table *arfs_t, __be16 src_port,
-		 __be16 dst_port)
-{
-	unsigned long l;
-	int bucket_idx;
-
-	l = (__force unsigned long)src_port |
-	    ((__force unsigned long)dst_port << 2);
-
-	bucket_idx = hash_long(l, ARFS_HASH_SHIFT);
-
-	return &arfs_t->rules_hash[bucket_idx];
-}
-
-static u8 arfs_get_ip_proto(const struct sk_buff *skb)
-{
-	return (skb->protocol == htons(ETH_P_IP)) ?
-		ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
-}
-
-static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs,
-					 u8 ip_proto, __be16 etype)
-{
-	if (etype == htons(ETH_P_IP) && ip_proto == IPPROTO_TCP)
-		return &arfs->arfs_tables[ARFS_IPV4_TCP];
-	if (etype == htons(ETH_P_IP) && ip_proto == IPPROTO_UDP)
-		return &arfs->arfs_tables[ARFS_IPV4_UDP];
-	if (etype == htons(ETH_P_IPV6) && ip_proto == IPPROTO_TCP)
-		return &arfs->arfs_tables[ARFS_IPV6_TCP];
-	if (etype == htons(ETH_P_IPV6) && ip_proto == IPPROTO_UDP)
-		return &arfs->arfs_tables[ARFS_IPV6_UDP];
-
-	return NULL;
-}
-
-static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv,
-					      struct arfs_rule *arfs_rule)
-{
-	struct mlx5_flow_act flow_act = {
-		.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
-		.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG,
-		.encap_id = 0,
-	};
-	struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
-	struct arfs_tuple *tuple = &arfs_rule->tuple;
-	struct mlx5_flow_handle *rule = NULL;
-	struct mlx5_flow_destination dest;
-	struct arfs_table *arfs_table;
-	struct mlx5_flow_spec *spec;
-	struct mlx5_flow_table *ft;
-	int err = 0;
-
-	spec = mlx5_vzalloc(sizeof(*spec));
-	if (!spec) {
-		netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
-		err = -ENOMEM;
-		goto out;
-	}
-	spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
-	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
-			 outer_headers.ethertype);
-	MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype,
-		 ntohs(tuple->etype));
-	arfs_table = arfs_get_table(arfs, tuple->ip_proto, tuple->etype);
-	if (!arfs_table) {
-		err = -EINVAL;
-		goto out;
-	}
-
-	ft = arfs_table->ft.t;
-	if (tuple->ip_proto == IPPROTO_TCP) {
-		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
-				 outer_headers.tcp_dport);
-		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
-				 outer_headers.tcp_sport);
-		MLX5_SET(fte_match_param, spec->match_value, outer_headers.tcp_dport,
-			 ntohs(tuple->dst_port));
-		MLX5_SET(fte_match_param, spec->match_value, outer_headers.tcp_sport,
-			 ntohs(tuple->src_port));
-	} else {
-		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
-				 outer_headers.udp_dport);
-		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
-				 outer_headers.udp_sport);
-		MLX5_SET(fte_match_param, spec->match_value, outer_headers.udp_dport,
-			 ntohs(tuple->dst_port));
-		MLX5_SET(fte_match_param, spec->match_value, outer_headers.udp_sport,
-			 ntohs(tuple->src_port));
-	}
-	if (tuple->etype == htons(ETH_P_IP)) {
-		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
-				    outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4),
-		       &tuple->src_ipv4,
-		       4);
-		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
-				    outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
-		       &tuple->dst_ipv4,
-		       4);
-		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
-				 outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4);
-		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
-				 outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
-	} else {
-		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
-				    outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
-		       &tuple->src_ipv6,
-		       16);
-		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
-				    outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
-		       &tuple->dst_ipv6,
-		       16);
-		memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
-				    outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
-		       0xff,
-		       16);
-		memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
-				    outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
-		       0xff,
-		       16);
-	}
-	dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
-	dest.tir_num = priv->direct_tir[arfs_rule->rxq].tirn;
-	rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
-	if (IS_ERR(rule)) {
-		err = PTR_ERR(rule);
-		netdev_err(priv->netdev, "%s: add rule(filter id=%d, rq idx=%d) failed, err=%d\n",
-			   __func__, arfs_rule->filter_id, arfs_rule->rxq, err);
-	}
-
-out:
-	kvfree(spec);
-	return err ? ERR_PTR(err) : rule;
-}
-
-static void arfs_modify_rule_rq(struct mlx5e_priv *priv,
-				struct mlx5_flow_handle *rule, u16 rxq)
-{
-	struct mlx5_flow_destination dst;
-	int err = 0;
-
-	dst.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
-	dst.tir_num = priv->direct_tir[rxq].tirn;
-	err =  mlx5_modify_rule_destination(rule, &dst, NULL);
-	if (err)
-		netdev_warn(priv->netdev,
-			    "Failed to modfiy aRFS rule destination to rq=%d\n", rxq);
-}
-
-static void arfs_handle_work(struct work_struct *work)
-{
-	struct arfs_rule *arfs_rule = container_of(work,
-						   struct arfs_rule,
-						   arfs_work);
-	struct mlx5e_priv *priv = arfs_rule->priv;
-	struct mlx5_flow_handle *rule;
-
-	mutex_lock(&priv->state_lock);
-	if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
-		spin_lock_bh(&priv->fs.arfs.arfs_lock);
-		hlist_del(&arfs_rule->hlist);
-		spin_unlock_bh(&priv->fs.arfs.arfs_lock);
-
-		mutex_unlock(&priv->state_lock);
-		kfree(arfs_rule);
-		goto out;
-	}
-	mutex_unlock(&priv->state_lock);
-
-	if (!arfs_rule->rule) {
-		rule = arfs_add_rule(priv, arfs_rule);
-		if (IS_ERR(rule))
-			goto out;
-		arfs_rule->rule = rule;
-	} else {
-		arfs_modify_rule_rq(priv, arfs_rule->rule,
-				    arfs_rule->rxq);
-	}
-out:
-	arfs_may_expire_flow(priv);
-}
-
-/* return L4 destination port from ip4/6 packets */
-static __be16 arfs_get_dst_port(const struct sk_buff *skb)
-{
-	char *transport_header;
-
-	transport_header = skb_transport_header(skb);
-	if (arfs_get_ip_proto(skb) == IPPROTO_TCP)
-		return ((struct tcphdr *)transport_header)->dest;
-	return ((struct udphdr *)transport_header)->dest;
-}
-
-/* return L4 source port from ip4/6 packets */
-static __be16 arfs_get_src_port(const struct sk_buff *skb)
-{
-	char *transport_header;
-
-	transport_header = skb_transport_header(skb);
-	if (arfs_get_ip_proto(skb) == IPPROTO_TCP)
-		return ((struct tcphdr *)transport_header)->source;
-	return ((struct udphdr *)transport_header)->source;
-}
-
-static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
-					 struct arfs_table *arfs_t,
-					 const struct sk_buff *skb,
-					 u16 rxq, u32 flow_id)
-{
-	struct arfs_rule *rule;
-	struct arfs_tuple *tuple;
-
-	rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
-	if (!rule)
-		return NULL;
-
-	rule->priv = priv;
-	rule->rxq = rxq;
-	INIT_WORK(&rule->arfs_work, arfs_handle_work);
-
-	tuple = &rule->tuple;
-	tuple->etype = skb->protocol;
-	if (tuple->etype == htons(ETH_P_IP)) {
-		tuple->src_ipv4 = ip_hdr(skb)->saddr;
-		tuple->dst_ipv4 = ip_hdr(skb)->daddr;
-	} else {
-		memcpy(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr,
-		       sizeof(struct in6_addr));
-		memcpy(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr,
-		       sizeof(struct in6_addr));
-	}
-	tuple->ip_proto = arfs_get_ip_proto(skb);
-	tuple->src_port = arfs_get_src_port(skb);
-	tuple->dst_port = arfs_get_dst_port(skb);
-
-	rule->flow_id = flow_id;
-	rule->filter_id = priv->fs.arfs.last_filter_id++ % RPS_NO_FILTER;
-
-	hlist_add_head(&rule->hlist,
-		       arfs_hash_bucket(arfs_t, tuple->src_port,
-					tuple->dst_port));
-	return rule;
-}
-
-static bool arfs_cmp_ips(struct arfs_tuple *tuple,
-			 const struct sk_buff *skb)
-{
-	if (tuple->etype == htons(ETH_P_IP) &&
-	    tuple->src_ipv4 == ip_hdr(skb)->saddr &&
-	    tuple->dst_ipv4 == ip_hdr(skb)->daddr)
-		return true;
-	if (tuple->etype == htons(ETH_P_IPV6) &&
-	    (!memcmp(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr,
-		     sizeof(struct in6_addr))) &&
-	    (!memcmp(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr,
-		     sizeof(struct in6_addr))))
-		return true;
-	return false;
-}
-
-static struct arfs_rule *arfs_find_rule(struct arfs_table *arfs_t,
-					const struct sk_buff *skb)
-{
-	struct arfs_rule *arfs_rule;
-	struct hlist_head *head;
-	__be16 src_port = arfs_get_src_port(skb);
-	__be16 dst_port = arfs_get_dst_port(skb);
-
-	head = arfs_hash_bucket(arfs_t, src_port, dst_port);
-	hlist_for_each_entry(arfs_rule, head, hlist) {
-		if (arfs_rule->tuple.src_port == src_port &&
-		    arfs_rule->tuple.dst_port == dst_port &&
-		    arfs_cmp_ips(&arfs_rule->tuple, skb)) {
-			return arfs_rule;
-		}
-	}
-
-	return NULL;
-}
-
-int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
-			u16 rxq_index, u32 flow_id)
-{
-	struct mlx5e_priv *priv = netdev_priv(dev);
-	struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
-	struct arfs_table *arfs_t;
-	struct arfs_rule *arfs_rule;
-
-	if (skb->protocol != htons(ETH_P_IP) &&
-	    skb->protocol != htons(ETH_P_IPV6))
-		return -EPROTONOSUPPORT;
-
-	arfs_t = arfs_get_table(arfs, arfs_get_ip_proto(skb), skb->protocol);
-	if (!arfs_t)
-		return -EPROTONOSUPPORT;
-
-	spin_lock_bh(&arfs->arfs_lock);
-	arfs_rule = arfs_find_rule(arfs_t, skb);
-	if (arfs_rule) {
-		if (arfs_rule->rxq == rxq_index) {
-			spin_unlock_bh(&arfs->arfs_lock);
-			return arfs_rule->filter_id;
-		}
-		arfs_rule->rxq = rxq_index;
-	} else {
-		arfs_rule = arfs_alloc_rule(priv, arfs_t, skb,
-					    rxq_index, flow_id);
-		if (!arfs_rule) {
-			spin_unlock_bh(&arfs->arfs_lock);
-			return -ENOMEM;
-		}
-	}
-	queue_work(priv->fs.arfs.wq, &arfs_rule->arfs_work);
-	spin_unlock_bh(&arfs->arfs_lock);
-	return arfs_rule->filter_id;
-}
-#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
deleted file mode 100644
index 746a92c..0000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
+++ /dev/null
@@ -1,297 +0,0 @@
-/*
- * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/clocksource.h>
-#include "en.h"
-
-enum {
-	MLX5E_CYCLES_SHIFT	= 23
-};
-
-void mlx5e_fill_hwstamp(struct mlx5e_tstamp *tstamp, u64 timestamp,
-			struct skb_shared_hwtstamps *hwts)
-{
-	u64 nsec;
-
-	read_lock(&tstamp->lock);
-	nsec = timecounter_cyc2time(&tstamp->clock, timestamp);
-	read_unlock(&tstamp->lock);
-
-	hwts->hwtstamp = ns_to_ktime(nsec);
-}
-
-static u64 mlx5e_read_internal_timer(const struct cyclecounter *cc)
-{
-	struct mlx5e_tstamp *tstamp = container_of(cc, struct mlx5e_tstamp,
-						   cycles);
-
-	return mlx5_read_internal_timer(tstamp->mdev) & cc->mask;
-}
-
-static void mlx5e_timestamp_overflow(struct work_struct *work)
-{
-	struct delayed_work *dwork = to_delayed_work(work);
-	struct mlx5e_tstamp *tstamp = container_of(dwork, struct mlx5e_tstamp,
-						   overflow_work);
-	unsigned long flags;
-
-	write_lock_irqsave(&tstamp->lock, flags);
-	timecounter_read(&tstamp->clock);
-	write_unlock_irqrestore(&tstamp->lock, flags);
-	schedule_delayed_work(&tstamp->overflow_work, tstamp->overflow_period);
-}
-
-int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr)
-{
-	struct mlx5e_priv *priv = netdev_priv(dev);
-	struct hwtstamp_config config;
-
-	if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
-		return -EOPNOTSUPP;
-
-	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
-		return -EFAULT;
-
-	/* TX HW timestamp */
-	switch (config.tx_type) {
-	case HWTSTAMP_TX_OFF:
-	case HWTSTAMP_TX_ON:
-		break;
-	default:
-		return -ERANGE;
-	}
-
-	/* RX HW timestamp */
-	switch (config.rx_filter) {
-	case HWTSTAMP_FILTER_NONE:
-		/* Reset CQE compression to Admin default */
-		mlx5e_modify_rx_cqe_compression(priv, priv->params.rx_cqe_compress_def);
-		break;
-	case HWTSTAMP_FILTER_ALL:
-	case HWTSTAMP_FILTER_SOME:
-	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
-	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
-	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
-	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
-	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
-	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
-	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
-	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
-	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
-	case HWTSTAMP_FILTER_PTP_V2_EVENT:
-	case HWTSTAMP_FILTER_PTP_V2_SYNC:
-	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
-		/* Disable CQE compression */
-		netdev_warn(dev, "Disabling cqe compression");
-		mlx5e_modify_rx_cqe_compression(priv, false);
-		config.rx_filter = HWTSTAMP_FILTER_ALL;
-		break;
-	default:
-		return -ERANGE;
-	}
-
-	memcpy(&priv->tstamp.hwtstamp_config, &config, sizeof(config));
-
-	return copy_to_user(ifr->ifr_data, &config,
-			    sizeof(config)) ? -EFAULT : 0;
-}
-
-int mlx5e_hwstamp_get(struct net_device *dev, struct ifreq *ifr)
-{
-	struct mlx5e_priv *priv = netdev_priv(dev);
-	struct hwtstamp_config *cfg = &priv->tstamp.hwtstamp_config;
-
-	if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
-		return -EOPNOTSUPP;
-
-	return copy_to_user(ifr->ifr_data, cfg, sizeof(*cfg)) ? -EFAULT : 0;
-}
-
-static int mlx5e_ptp_settime(struct ptp_clock_info *ptp,
-			     const struct timespec64 *ts)
-{
-	struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp,
-						   ptp_info);
-	u64 ns = timespec64_to_ns(ts);
-	unsigned long flags;
-
-	write_lock_irqsave(&tstamp->lock, flags);
-	timecounter_init(&tstamp->clock, &tstamp->cycles, ns);
-	write_unlock_irqrestore(&tstamp->lock, flags);
-
-	return 0;
-}
-
-static int mlx5e_ptp_gettime(struct ptp_clock_info *ptp,
-			     struct timespec64 *ts)
-{
-	struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp,
-						   ptp_info);
-	u64 ns;
-	unsigned long flags;
-
-	write_lock_irqsave(&tstamp->lock, flags);
-	ns = timecounter_read(&tstamp->clock);
-	write_unlock_irqrestore(&tstamp->lock, flags);
-
-	*ts = ns_to_timespec64(ns);
-
-	return 0;
-}
-
-static int mlx5e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
-{
-	struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp,
-						   ptp_info);
-	unsigned long flags;
-
-	write_lock_irqsave(&tstamp->lock, flags);
-	timecounter_adjtime(&tstamp->clock, delta);
-	write_unlock_irqrestore(&tstamp->lock, flags);
-
-	return 0;
-}
-
-static int mlx5e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
-{
-	u64 adj;
-	u32 diff;
-	unsigned long flags;
-	int neg_adj = 0;
-	struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp,
-						  ptp_info);
-
-	if (delta < 0) {
-		neg_adj = 1;
-		delta = -delta;
-	}
-
-	adj = tstamp->nominal_c_mult;
-	adj *= delta;
-	diff = div_u64(adj, 1000000000ULL);
-
-	write_lock_irqsave(&tstamp->lock, flags);
-	timecounter_read(&tstamp->clock);
-	tstamp->cycles.mult = neg_adj ? tstamp->nominal_c_mult - diff :
-					tstamp->nominal_c_mult + diff;
-	write_unlock_irqrestore(&tstamp->lock, flags);
-
-	return 0;
-}
-
-static const struct ptp_clock_info mlx5e_ptp_clock_info = {
-	.owner		= THIS_MODULE,
-	.max_adj	= 100000000,
-	.n_alarm	= 0,
-	.n_ext_ts	= 0,
-	.n_per_out	= 0,
-	.n_pins		= 0,
-	.pps		= 0,
-	.adjfreq	= mlx5e_ptp_adjfreq,
-	.adjtime	= mlx5e_ptp_adjtime,
-	.gettime64	= mlx5e_ptp_gettime,
-	.settime64	= mlx5e_ptp_settime,
-	.enable		= NULL,
-};
-
-static void mlx5e_timestamp_init_config(struct mlx5e_tstamp *tstamp)
-{
-	tstamp->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF;
-	tstamp->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
-}
-
-void mlx5e_timestamp_init(struct mlx5e_priv *priv)
-{
-	struct mlx5e_tstamp *tstamp = &priv->tstamp;
-	u64 ns;
-	u64 frac = 0;
-	u32 dev_freq;
-
-	mlx5e_timestamp_init_config(tstamp);
-	dev_freq = MLX5_CAP_GEN(priv->mdev, device_frequency_khz);
-	if (!dev_freq) {
-		mlx5_core_warn(priv->mdev, "invalid device_frequency_khz, aborting HW clock init\n");
-		return;
-	}
-	rwlock_init(&tstamp->lock);
-	tstamp->cycles.read = mlx5e_read_internal_timer;
-	tstamp->cycles.shift = MLX5E_CYCLES_SHIFT;
-	tstamp->cycles.mult = clocksource_khz2mult(dev_freq,
-						   tstamp->cycles.shift);
-	tstamp->nominal_c_mult = tstamp->cycles.mult;
-	tstamp->cycles.mask = CLOCKSOURCE_MASK(41);
-	tstamp->mdev = priv->mdev;
-
-	timecounter_init(&tstamp->clock, &tstamp->cycles,
-			 ktime_to_ns(ktime_get_real()));
-
-	/* Calculate period in seconds to call the overflow watchdog - to make
-	 * sure counter is checked at least once every wrap around.
-	 */
-	ns = cyclecounter_cyc2ns(&tstamp->cycles, tstamp->cycles.mask,
-				 frac, &frac);
-	do_div(ns, NSEC_PER_SEC / 2 / HZ);
-	tstamp->overflow_period = ns;
-
-	INIT_DELAYED_WORK(&tstamp->overflow_work, mlx5e_timestamp_overflow);
-	if (tstamp->overflow_period)
-		schedule_delayed_work(&tstamp->overflow_work, 0);
-	else
-		mlx5_core_warn(priv->mdev, "invalid overflow period, overflow_work is not scheduled\n");
-
-	/* Configure the PHC */
-	tstamp->ptp_info = mlx5e_ptp_clock_info;
-	snprintf(tstamp->ptp_info.name, 16, "mlx5 ptp");
-
-	tstamp->ptp = ptp_clock_register(&tstamp->ptp_info,
-					 &priv->mdev->pdev->dev);
-	if (IS_ERR(tstamp->ptp)) {
-		mlx5_core_warn(priv->mdev, "ptp_clock_register failed %ld\n",
-			       PTR_ERR(tstamp->ptp));
-		tstamp->ptp = NULL;
-	}
-}
-
-void mlx5e_timestamp_cleanup(struct mlx5e_priv *priv)
-{
-	struct mlx5e_tstamp *tstamp = &priv->tstamp;
-
-	if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
-		return;
-
-	if (priv->tstamp.ptp) {
-		ptp_clock_unregister(priv->tstamp.ptp);
-		priv->tstamp.ptp = NULL;
-	}
-
-	cancel_delayed_work_sync(&tstamp->overflow_work);
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
deleted file mode 100644
index bd898d8..0000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+++ /dev/null
@@ -1,159 +0,0 @@
-/*
- * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "en.h"
-
-/* mlx5e global resources should be placed in this file.
- * Global resources are common to all the netdevices crated on the same nic.
- */
-
-int mlx5e_create_tir(struct mlx5_core_dev *mdev,
-		     struct mlx5e_tir *tir, u32 *in, int inlen)
-{
-	int err;
-
-	err = mlx5_core_create_tir(mdev, in, inlen, &tir->tirn);
-	if (err)
-		return err;
-
-	list_add(&tir->list, &mdev->mlx5e_res.td.tirs_list);
-
-	return 0;
-}
-
-void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
-		       struct mlx5e_tir *tir)
-{
-	mlx5_core_destroy_tir(mdev, tir->tirn);
-	list_del(&tir->list);
-}
-
-static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
-			     struct mlx5_core_mkey *mkey)
-{
-	int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
-	void *mkc;
-	u32 *in;
-	int err;
-
-	in = mlx5_vzalloc(inlen);
-	if (!in)
-		return -ENOMEM;
-
-	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
-	MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_PA);
-	MLX5_SET(mkc, mkc, lw, 1);
-	MLX5_SET(mkc, mkc, lr, 1);
-
-	MLX5_SET(mkc, mkc, pd, pdn);
-	MLX5_SET(mkc, mkc, length64, 1);
-	MLX5_SET(mkc, mkc, qpn, 0xffffff);
-
-	err = mlx5_core_create_mkey(mdev, mkey, in, inlen);
-
-	kvfree(in);
-	return err;
-}
-
-int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
-{
-	struct mlx5e_resources *res = &mdev->mlx5e_res;
-	int err;
-
-	err = mlx5_core_alloc_pd(mdev, &res->pdn);
-	if (err) {
-		mlx5_core_err(mdev, "alloc pd failed, %d\n", err);
-		return err;
-	}
-
-	err = mlx5_core_alloc_transport_domain(mdev, &res->td.tdn);
-	if (err) {
-		mlx5_core_err(mdev, "alloc td failed, %d\n", err);
-		goto err_dealloc_pd;
-	}
-
-	err = mlx5e_create_mkey(mdev, res->pdn, &res->mkey);
-	if (err) {
-		mlx5_core_err(mdev, "create mkey failed, %d\n", err);
-		goto err_dealloc_transport_domain;
-	}
-
-	INIT_LIST_HEAD(&mdev->mlx5e_res.td.tirs_list);
-
-	return 0;
-
-err_dealloc_transport_domain:
-	mlx5_core_dealloc_transport_domain(mdev, res->td.tdn);
-err_dealloc_pd:
-	mlx5_core_dealloc_pd(mdev, res->pdn);
-	return err;
-}
-
-void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev)
-{
-	struct mlx5e_resources *res = &mdev->mlx5e_res;
-
-	mlx5_core_destroy_mkey(mdev, &res->mkey);
-	mlx5_core_dealloc_transport_domain(mdev, res->td.tdn);
-	mlx5_core_dealloc_pd(mdev, res->pdn);
-}
-
-int mlx5e_refresh_tirs_self_loopback(struct mlx5_core_dev *mdev,
-				     bool enable_uc_lb)
-{
-	struct mlx5e_tir *tir;
-	void *in;
-	int inlen;
-	int err = 0;
-
-	inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
-	in = mlx5_vzalloc(inlen);
-	if (!in)
-		return -ENOMEM;
-
-	if (enable_uc_lb)
-		MLX5_SET(modify_tir_in, in, ctx.self_lb_block,
-			 MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST_);
-
-	MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
-
-	list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) {
-		err = mlx5_core_modify_tir(mdev, tir->tirn, in, inlen);
-		if (err)
-			goto out;
-	}
-
-out:
-	kvfree(in);
-
-	return err;
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
deleted file mode 100644
index 7f6c225..0000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ /dev/null
@@ -1,751 +0,0 @@
-/*
- * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <linux/device.h>
-#include <linux/netdevice.h>
-#include "en.h"
-
-#define MLX5E_MAX_PRIORITY 8
-
-#define MLX5E_100MB (100000)
-#define MLX5E_1GB   (1000000)
-
-#define MLX5E_CEE_STATE_UP    1
-#define MLX5E_CEE_STATE_DOWN  0
-
-/* If dcbx mode is non-host set the dcbx mode to host.
- */
-static int mlx5e_dcbnl_set_dcbx_mode(struct mlx5e_priv *priv,
-				     enum mlx5_dcbx_oper_mode mode)
-{
-	struct mlx5_core_dev *mdev = priv->mdev;
-	u32 param[MLX5_ST_SZ_DW(dcbx_param)];
-	int err;
-
-	err = mlx5_query_port_dcbx_param(mdev, param);
-	if (err)
-		return err;
-
-	MLX5_SET(dcbx_param, param, version_admin, mode);
-	if (mode != MLX5E_DCBX_PARAM_VER_OPER_HOST)
-		MLX5_SET(dcbx_param, param, willing_admin, 1);
-
-	return mlx5_set_port_dcbx_param(mdev, param);
-}
-
-static int mlx5e_dcbnl_switch_to_host_mode(struct mlx5e_priv *priv)
-{
-	struct mlx5e_dcbx *dcbx = &priv->dcbx;
-	int err;
-
-	if (!MLX5_CAP_GEN(priv->mdev, dcbx))
-		return 0;
-
-	if (dcbx->mode == MLX5E_DCBX_PARAM_VER_OPER_HOST)
-		return 0;
-
-	err = mlx5e_dcbnl_set_dcbx_mode(priv, MLX5E_DCBX_PARAM_VER_OPER_HOST);
-	if (err)
-		return err;
-
-	dcbx->mode = MLX5E_DCBX_PARAM_VER_OPER_HOST;
-	return 0;
-}
-
-static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev,
-				   struct ieee_ets *ets)
-{
-	struct mlx5e_priv *priv = netdev_priv(netdev);
-	struct mlx5_core_dev *mdev = priv->mdev;
-	int err = 0;
-	int i;
-
-	if (!MLX5_CAP_GEN(priv->mdev, ets))
-		return -ENOTSUPP;
-
-	ets->ets_cap = mlx5_max_tc(priv->mdev) + 1;
-	for (i = 0; i < ets->ets_cap; i++) {
-		err = mlx5_query_port_prio_tc(mdev, i, &ets->prio_tc[i]);
-		if (err)
-			return err;
-	}
-
-	for (i = 0; i < ets->ets_cap; i++) {
-		err = mlx5_query_port_tc_bw_alloc(mdev, i, &ets->tc_tx_bw[i]);
-		if (err)
-			return err;
-		if (ets->tc_tx_bw[i] < MLX5E_MAX_BW_ALLOC)
-			priv->dcbx.tc_tsa[i] = IEEE_8021QAZ_TSA_ETS;
-	}
-
-	memcpy(ets->tc_tsa, priv->dcbx.tc_tsa, sizeof(ets->tc_tsa));
-
-	return err;
-}
-
-enum {
-	MLX5E_VENDOR_TC_GROUP_NUM = 7,
-	MLX5E_ETS_TC_GROUP_NUM    = 0,
-};
-
-static void mlx5e_build_tc_group(struct ieee_ets *ets, u8 *tc_group, int max_tc)
-{
-	bool any_tc_mapped_to_ets = false;
-	int strict_group;
-	int i;
-
-	for (i = 0; i <= max_tc; i++)
-		if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS)
-			any_tc_mapped_to_ets = true;
-
-	strict_group = any_tc_mapped_to_ets ? 1 : 0;
-
-	for (i = 0; i <= max_tc; i++) {
-		switch (ets->tc_tsa[i]) {
-		case IEEE_8021QAZ_TSA_VENDOR:
-			tc_group[i] = MLX5E_VENDOR_TC_GROUP_NUM;
-			break;
-		case IEEE_8021QAZ_TSA_STRICT:
-			tc_group[i] = strict_group++;
-			break;
-		case IEEE_8021QAZ_TSA_ETS:
-			tc_group[i] = MLX5E_ETS_TC_GROUP_NUM;
-			break;
-		}
-	}
-}
-
-static void mlx5e_build_tc_tx_bw(struct ieee_ets *ets, u8 *tc_tx_bw,
-				 u8 *tc_group, int max_tc)
-{
-	int i;
-
-	for (i = 0; i <= max_tc; i++) {
-		switch (ets->tc_tsa[i]) {
-		case IEEE_8021QAZ_TSA_VENDOR:
-			tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
-			break;
-		case IEEE_8021QAZ_TSA_STRICT:
-			tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
-			break;
-		case IEEE_8021QAZ_TSA_ETS:
-			tc_tx_bw[i] = ets->tc_tx_bw[i];
-			break;
-		}
-	}
-}
-
-int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
-{
-	struct mlx5_core_dev *mdev = priv->mdev;
-	u8 tc_tx_bw[IEEE_8021QAZ_MAX_TCS];
-	u8 tc_group[IEEE_8021QAZ_MAX_TCS];
-	int max_tc = mlx5_max_tc(mdev);
-	int err;
-
-	mlx5e_build_tc_group(ets, tc_group, max_tc);
-	mlx5e_build_tc_tx_bw(ets, tc_tx_bw, tc_group, max_tc);
-
-	err = mlx5_set_port_prio_tc(mdev, ets->prio_tc);
-	if (err)
-		return err;
-
-	err = mlx5_set_port_tc_group(mdev, tc_group);
-	if (err)
-		return err;
-
-	err = mlx5_set_port_tc_bw_alloc(mdev, tc_tx_bw);
-
-	if (err)
-		return err;
-
-	memcpy(priv->dcbx.tc_tsa, ets->tc_tsa, sizeof(ets->tc_tsa));
-
-	return err;
-}
-
-static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
-				    struct ieee_ets *ets)
-{
-	int bw_sum = 0;
-	int i;
-
-	/* Validate Priority */
-	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
-		if (ets->prio_tc[i] >= MLX5E_MAX_PRIORITY) {
-			netdev_err(netdev,
-				   "Failed to validate ETS: priority value greater than max(%d)\n",
-				    MLX5E_MAX_PRIORITY);
-			return -EINVAL;
-		}
-	}
-
-	/* Validate Bandwidth Sum */
-	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
-		if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
-			if (!ets->tc_tx_bw[i]) {
-				netdev_err(netdev,
-					   "Failed to validate ETS: BW 0 is illegal\n");
-				return -EINVAL;
-			}
-
-			bw_sum += ets->tc_tx_bw[i];
-		}
-	}
-
-	if (bw_sum != 0 && bw_sum != 100) {
-		netdev_err(netdev,
-			   "Failed to validate ETS: BW sum is illegal\n");
-		return -EINVAL;
-	}
-	return 0;
-}
-
-static int mlx5e_dcbnl_ieee_setets(struct net_device *netdev,
-				   struct ieee_ets *ets)
-{
-	struct mlx5e_priv *priv = netdev_priv(netdev);
-	int err;
-
-	if (!MLX5_CAP_GEN(priv->mdev, ets))
-		return -ENOTSUPP;
-
-	err = mlx5e_dbcnl_validate_ets(netdev, ets);
-	if (err)
-		return err;
-
-	err = mlx5e_dcbnl_ieee_setets_core(priv, ets);
-	if (err)
-		return err;
-
-	return 0;
-}
-
-static int mlx5e_dcbnl_ieee_getpfc(struct net_device *dev,
-				   struct ieee_pfc *pfc)
-{
-	struct mlx5e_priv *priv = netdev_priv(dev);
-	struct mlx5_core_dev *mdev = priv->mdev;
-	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
-	int i;
-
-	pfc->pfc_cap = mlx5_max_tc(mdev) + 1;
-	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
-		pfc->requests[i]    = PPORT_PER_PRIO_GET(pstats, i, tx_pause);
-		pfc->indications[i] = PPORT_PER_PRIO_GET(pstats, i, rx_pause);
-	}
-
-	return mlx5_query_port_pfc(mdev, &pfc->pfc_en, NULL);
-}
-
-static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev,
-				   struct ieee_pfc *pfc)
-{
-	struct mlx5e_priv *priv = netdev_priv(dev);
-	struct mlx5_core_dev *mdev = priv->mdev;
-	u8 curr_pfc_en;
-	int ret;
-
-	mlx5_query_port_pfc(mdev, &curr_pfc_en, NULL);
-
-	if (pfc->pfc_en == curr_pfc_en)
-		return 0;
-
-	ret = mlx5_set_port_pfc(mdev, pfc->pfc_en, pfc->pfc_en);
-	mlx5_toggle_port_link(mdev);
-
-	return ret;
-}
-
-static u8 mlx5e_dcbnl_getdcbx(struct net_device *dev)
-{
-	struct mlx5e_priv *priv = netdev_priv(dev);
-	struct mlx5e_dcbx *dcbx = &priv->dcbx;
-	u8 mode = DCB_CAP_DCBX_VER_IEEE | DCB_CAP_DCBX_VER_CEE;
-
-	if (dcbx->mode == MLX5E_DCBX_PARAM_VER_OPER_HOST)
-		mode |= DCB_CAP_DCBX_HOST;
-
-	return mode;
-}
-
-static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode)
-{
-	struct mlx5e_priv *priv = netdev_priv(dev);
-	struct mlx5e_dcbx *dcbx = &priv->dcbx;
-
-	if ((!mode) && MLX5_CAP_GEN(priv->mdev, dcbx)) {
-		if (dcbx->mode == MLX5E_DCBX_PARAM_VER_OPER_AUTO)
-			return 0;
-
-		/* set dcbx to fw controlled */
-		if (!mlx5e_dcbnl_set_dcbx_mode(priv, MLX5E_DCBX_PARAM_VER_OPER_AUTO)) {
-			dcbx->mode = MLX5E_DCBX_PARAM_VER_OPER_AUTO;
-			return 0;
-		}
-
-		return 1;
-	}
-
-	if (mlx5e_dcbnl_switch_to_host_mode(netdev_priv(dev)))
-		return 1;
-
-	if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
-	    !(mode & DCB_CAP_DCBX_VER_CEE) ||
-	    !(mode & DCB_CAP_DCBX_VER_IEEE) ||
-	    !(mode & DCB_CAP_DCBX_HOST))
-		return 1;
-
-	return 0;
-}
-
-static int mlx5e_dcbnl_ieee_getmaxrate(struct net_device *netdev,
-				       struct ieee_maxrate *maxrate)
-{
-	struct mlx5e_priv *priv    = netdev_priv(netdev);
-	struct mlx5_core_dev *mdev = priv->mdev;
-	u8 max_bw_value[IEEE_8021QAZ_MAX_TCS];
-	u8 max_bw_unit[IEEE_8021QAZ_MAX_TCS];
-	int err;
-	int i;
-
-	err = mlx5_query_port_ets_rate_limit(mdev, max_bw_value, max_bw_unit);
-	if (err)
-		return err;
-
-	memset(maxrate->tc_maxrate, 0, sizeof(maxrate->tc_maxrate));
-
-	for (i = 0; i <= mlx5_max_tc(mdev); i++) {
-		switch (max_bw_unit[i]) {
-		case MLX5_100_MBPS_UNIT:
-			maxrate->tc_maxrate[i] = max_bw_value[i] * MLX5E_100MB;
-			break;
-		case MLX5_GBPS_UNIT:
-			maxrate->tc_maxrate[i] = max_bw_value[i] * MLX5E_1GB;
-			break;
-		case MLX5_BW_NO_LIMIT:
-			break;
-		default:
-			WARN(true, "non-supported BW unit");
-			break;
-		}
-	}
-
-	return 0;
-}
-
-static int mlx5e_dcbnl_ieee_setmaxrate(struct net_device *netdev,
-				       struct ieee_maxrate *maxrate)
-{
-	struct mlx5e_priv *priv    = netdev_priv(netdev);
-	struct mlx5_core_dev *mdev = priv->mdev;
-	u8 max_bw_value[IEEE_8021QAZ_MAX_TCS];
-	u8 max_bw_unit[IEEE_8021QAZ_MAX_TCS];
-	__u64 upper_limit_mbps = roundup(255 * MLX5E_100MB, MLX5E_1GB);
-	int i;
-
-	memset(max_bw_value, 0, sizeof(max_bw_value));
-	memset(max_bw_unit, 0, sizeof(max_bw_unit));
-
-	for (i = 0; i <= mlx5_max_tc(mdev); i++) {
-		if (!maxrate->tc_maxrate[i]) {
-			max_bw_unit[i]  = MLX5_BW_NO_LIMIT;
-			continue;
-		}
-		if (maxrate->tc_maxrate[i] < upper_limit_mbps) {
-			max_bw_value[i] = div_u64(maxrate->tc_maxrate[i],
-						  MLX5E_100MB);
-			max_bw_value[i] = max_bw_value[i] ? max_bw_value[i] : 1;
-			max_bw_unit[i]  = MLX5_100_MBPS_UNIT;
-		} else {
-			max_bw_value[i] = div_u64(maxrate->tc_maxrate[i],
-						  MLX5E_1GB);
-			max_bw_unit[i]  = MLX5_GBPS_UNIT;
-		}
-	}
-
-	return mlx5_modify_port_ets_rate_limit(mdev, max_bw_value, max_bw_unit);
-}
-
-static u8 mlx5e_dcbnl_setall(struct net_device *netdev)
-{
-	struct mlx5e_priv *priv = netdev_priv(netdev);
-	struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
-	struct mlx5_core_dev *mdev = priv->mdev;
-	struct ieee_ets ets;
-	struct ieee_pfc pfc;
-	int err = -ENOTSUPP;
-	int i;
-
-	if (!MLX5_CAP_GEN(mdev, ets))
-		goto out;
-
-	memset(&ets, 0, sizeof(ets));
-	memset(&pfc, 0, sizeof(pfc));
-
-	ets.ets_cap = IEEE_8021QAZ_MAX_TCS;
-	for (i = 0; i < CEE_DCBX_MAX_PGS; i++) {
-		ets.tc_tx_bw[i] = cee_cfg->pg_bw_pct[i];
-		ets.tc_rx_bw[i] = cee_cfg->pg_bw_pct[i];
-		ets.tc_tsa[i]   = IEEE_8021QAZ_TSA_ETS;
-		ets.prio_tc[i]  = cee_cfg->prio_to_pg_map[i];
-	}
-
-	err = mlx5e_dbcnl_validate_ets(netdev, &ets);
-	if (err) {
-		netdev_err(netdev,
-			   "%s, Failed to validate ETS: %d\n", __func__, err);
-		goto out;
-	}
-
-	err = mlx5e_dcbnl_ieee_setets_core(priv, &ets);
-	if (err) {
-		netdev_err(netdev,
-			   "%s, Failed to set ETS: %d\n", __func__, err);
-		goto out;
-	}
-
-	/* Set PFC */
-	pfc.pfc_cap = mlx5_max_tc(mdev) + 1;
-	if (!cee_cfg->pfc_enable)
-		pfc.pfc_en = 0;
-	else
-		for (i = 0; i < CEE_DCBX_MAX_PRIO; i++)
-			pfc.pfc_en |= cee_cfg->pfc_setting[i] << i;
-
-	err = mlx5e_dcbnl_ieee_setpfc(netdev, &pfc);
-	if (err) {
-		netdev_err(netdev,
-			   "%s, Failed to set PFC: %d\n", __func__, err);
-		goto out;
-	}
-out:
-	return err ? MLX5_DCB_NO_CHG : MLX5_DCB_CHG_RESET;
-}
-
-static u8 mlx5e_dcbnl_getstate(struct net_device *netdev)
-{
-	return MLX5E_CEE_STATE_UP;
-}
-
-static void mlx5e_dcbnl_getpermhwaddr(struct net_device *netdev,
-				      u8 *perm_addr)
-{
-	struct mlx5e_priv *priv = netdev_priv(netdev);
-
-	if (!perm_addr)
-		return;
-
-	mlx5_query_nic_vport_mac_address(priv->mdev, 0, perm_addr);
-}
-
-static void mlx5e_dcbnl_setpgtccfgtx(struct net_device *netdev,
-				     int priority, u8 prio_type,
-				     u8 pgid, u8 bw_pct, u8 up_map)
-{
-	struct mlx5e_priv *priv = netdev_priv(netdev);
-	struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
-
-	if (priority >= CEE_DCBX_MAX_PRIO) {
-		netdev_err(netdev,
-			   "%s, priority is out of range\n", __func__);
-		return;
-	}
-
-	if (pgid >= CEE_DCBX_MAX_PGS) {
-		netdev_err(netdev,
-			   "%s, priority group is out of range\n", __func__);
-		return;
-	}
-
-	cee_cfg->prio_to_pg_map[priority] = pgid;
-}
-
-static void mlx5e_dcbnl_setpgbwgcfgtx(struct net_device *netdev,
-				      int pgid, u8 bw_pct)
-{
-	struct mlx5e_priv *priv = netdev_priv(netdev);
-	struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
-
-	if (pgid >= CEE_DCBX_MAX_PGS) {
-		netdev_err(netdev,
-			   "%s, priority group is out of range\n", __func__);
-		return;
-	}
-
-	cee_cfg->pg_bw_pct[pgid] = bw_pct;
-}
-
-static void mlx5e_dcbnl_getpgtccfgtx(struct net_device *netdev,
-				     int priority, u8 *prio_type,
-				     u8 *pgid, u8 *bw_pct, u8 *up_map)
-{
-	struct mlx5e_priv *priv = netdev_priv(netdev);
-	struct mlx5_core_dev *mdev = priv->mdev;
-
-	if (priority >= CEE_DCBX_MAX_PRIO) {
-		netdev_err(netdev,
-			   "%s, priority is out of range\n", __func__);
-		return;
-	}
-
-	*prio_type = 0;
-	*bw_pct = 0;
-	*up_map = 0;
-
-	if (mlx5_query_port_prio_tc(mdev, priority, pgid))
-		*pgid = 0;
-}
-
-static void mlx5e_dcbnl_getpgbwgcfgtx(struct net_device *netdev,
-				      int pgid, u8 *bw_pct)
-{
-	struct mlx5e_priv *priv = netdev_priv(netdev);
-	struct mlx5_core_dev *mdev = priv->mdev;
-
-	if (pgid >= CEE_DCBX_MAX_PGS) {
-		netdev_err(netdev,
-			   "%s, priority group is out of range\n", __func__);
-		return;
-	}
-
-	if (mlx5_query_port_tc_bw_alloc(mdev, pgid, bw_pct))
-		*bw_pct = 0;
-}
-
-static void mlx5e_dcbnl_setpfccfg(struct net_device *netdev,
-				  int priority, u8 setting)
-{
-	struct mlx5e_priv *priv = netdev_priv(netdev);
-	struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
-
-	if (priority >= CEE_DCBX_MAX_PRIO) {
-		netdev_err(netdev,
-			   "%s, priority is out of range\n", __func__);
-		return;
-	}
-
-	if (setting > 1)
-		return;
-
-	cee_cfg->pfc_setting[priority] = setting;
-}
-
-static int
-mlx5e_dcbnl_get_priority_pfc(struct net_device *netdev,
-			     int priority, u8 *setting)
-{
-	struct ieee_pfc pfc;
-	int err;
-
-	err = mlx5e_dcbnl_ieee_getpfc(netdev, &pfc);
-
-	if (err)
-		*setting = 0;
-	else
-		*setting = (pfc.pfc_en >> priority) & 0x01;
-
-	return err;
-}
-
-static void mlx5e_dcbnl_getpfccfg(struct net_device *netdev,
-				  int priority, u8 *setting)
-{
-	if (priority >= CEE_DCBX_MAX_PRIO) {
-		netdev_err(netdev,
-			   "%s, priority is out of range\n", __func__);
-		return;
-	}
-
-	if (!setting)
-		return;
-
-	mlx5e_dcbnl_get_priority_pfc(netdev, priority, setting);
-}
-
-static u8 mlx5e_dcbnl_getcap(struct net_device *netdev,
-			     int capid, u8 *cap)
-{
-	struct mlx5e_priv *priv = netdev_priv(netdev);
-	struct mlx5_core_dev *mdev = priv->mdev;
-	u8 rval = 0;
-
-	switch (capid) {
-	case DCB_CAP_ATTR_PG:
-		*cap = true;
-		break;
-	case DCB_CAP_ATTR_PFC:
-		*cap = true;
-		break;
-	case DCB_CAP_ATTR_UP2TC:
-		*cap = false;
-		break;
-	case DCB_CAP_ATTR_PG_TCS:
-		*cap = 1 << mlx5_max_tc(mdev);
-		break;
-	case DCB_CAP_ATTR_PFC_TCS:
-		*cap = 1 << mlx5_max_tc(mdev);
-		break;
-	case DCB_CAP_ATTR_GSP:
-		*cap = false;
-		break;
-	case DCB_CAP_ATTR_BCN:
-		*cap = false;
-		break;
-	case DCB_CAP_ATTR_DCBX:
-		*cap = (DCB_CAP_DCBX_LLD_MANAGED |
-			DCB_CAP_DCBX_VER_CEE |
-			DCB_CAP_DCBX_STATIC);
-		break;
-	default:
-		*cap = 0;
-		rval = 1;
-		break;
-	}
-
-	return rval;
-}
-
-static int mlx5e_dcbnl_getnumtcs(struct net_device *netdev,
-				 int tcs_id, u8 *num)
-{
-	struct mlx5e_priv *priv = netdev_priv(netdev);
-	struct mlx5_core_dev *mdev = priv->mdev;
-
-	switch (tcs_id) {
-	case DCB_NUMTCS_ATTR_PG:
-	case DCB_NUMTCS_ATTR_PFC:
-		*num = mlx5_max_tc(mdev) + 1;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static u8 mlx5e_dcbnl_getpfcstate(struct net_device *netdev)
-{
-	struct ieee_pfc pfc;
-
-	if (mlx5e_dcbnl_ieee_getpfc(netdev, &pfc))
-		return MLX5E_CEE_STATE_DOWN;
-
-	return pfc.pfc_en ? MLX5E_CEE_STATE_UP : MLX5E_CEE_STATE_DOWN;
-}
-
-static void mlx5e_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
-{
-	struct mlx5e_priv *priv = netdev_priv(netdev);
-	struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
-
-	if ((state != MLX5E_CEE_STATE_UP) && (state != MLX5E_CEE_STATE_DOWN))
-		return;
-
-	cee_cfg->pfc_enable = state;
-}
-
-const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops = {
-	.ieee_getets	= mlx5e_dcbnl_ieee_getets,
-	.ieee_setets	= mlx5e_dcbnl_ieee_setets,
-	.ieee_getmaxrate = mlx5e_dcbnl_ieee_getmaxrate,
-	.ieee_setmaxrate = mlx5e_dcbnl_ieee_setmaxrate,
-	.ieee_getpfc	= mlx5e_dcbnl_ieee_getpfc,
-	.ieee_setpfc	= mlx5e_dcbnl_ieee_setpfc,
-	.getdcbx	= mlx5e_dcbnl_getdcbx,
-	.setdcbx	= mlx5e_dcbnl_setdcbx,
-
-/* CEE interfaces */
-	.setall         = mlx5e_dcbnl_setall,
-	.getstate       = mlx5e_dcbnl_getstate,
-	.getpermhwaddr  = mlx5e_dcbnl_getpermhwaddr,
-
-	.setpgtccfgtx   = mlx5e_dcbnl_setpgtccfgtx,
-	.setpgbwgcfgtx  = mlx5e_dcbnl_setpgbwgcfgtx,
-	.getpgtccfgtx   = mlx5e_dcbnl_getpgtccfgtx,
-	.getpgbwgcfgtx  = mlx5e_dcbnl_getpgbwgcfgtx,
-
-	.setpfccfg      = mlx5e_dcbnl_setpfccfg,
-	.getpfccfg      = mlx5e_dcbnl_getpfccfg,
-	.getcap         = mlx5e_dcbnl_getcap,
-	.getnumtcs      = mlx5e_dcbnl_getnumtcs,
-	.getpfcstate    = mlx5e_dcbnl_getpfcstate,
-	.setpfcstate    = mlx5e_dcbnl_setpfcstate,
-};
-
-static void mlx5e_dcbnl_query_dcbx_mode(struct mlx5e_priv *priv,
-					enum mlx5_dcbx_oper_mode *mode)
-{
-	u32 out[MLX5_ST_SZ_DW(dcbx_param)];
-
-	*mode = MLX5E_DCBX_PARAM_VER_OPER_HOST;
-
-	if (!mlx5_query_port_dcbx_param(priv->mdev, out))
-		*mode = MLX5_GET(dcbx_param, out, version_oper);
-
-	/* From driver's point of view, we only care if the mode
-	 * is host (HOST) or non-host (AUTO)
-	 */
-	if (*mode != MLX5E_DCBX_PARAM_VER_OPER_HOST)
-		*mode = MLX5E_DCBX_PARAM_VER_OPER_AUTO;
-}
-
-static void mlx5e_ets_init(struct mlx5e_priv *priv)
-{
-	int i;
-	struct ieee_ets ets;
-
-	memset(&ets, 0, sizeof(ets));
-	ets.ets_cap = mlx5_max_tc(priv->mdev) + 1;
-	for (i = 0; i < ets.ets_cap; i++) {
-		ets.tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
-		ets.tc_tsa[i] = IEEE_8021QAZ_TSA_VENDOR;
-		ets.prio_tc[i] = i;
-	}
-
-	memcpy(priv->dcbx.tc_tsa, ets.tc_tsa, sizeof(ets.tc_tsa));
-
-	/* tclass[prio=0]=1, tclass[prio=1]=0, tclass[prio=i]=i (for i>1) */
-	ets.prio_tc[0] = 1;
-	ets.prio_tc[1] = 0;
-
-	mlx5e_dcbnl_ieee_setets_core(priv, &ets);
-}
-
-void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv)
-{
-	struct mlx5e_dcbx *dcbx = &priv->dcbx;
-
-	if (MLX5_CAP_GEN(priv->mdev, dcbx))
-		mlx5e_dcbnl_query_dcbx_mode(priv, &dcbx->mode);
-
-	mlx5e_ets_init(priv);
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
deleted file mode 100644
index 352462a..0000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ /dev/null
@@ -1,1608 +0,0 @@
-/*
- * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "en.h"
-
-static void mlx5e_get_drvinfo(struct net_device *dev,
-			      struct ethtool_drvinfo *drvinfo)
-{
-	struct mlx5e_priv *priv = netdev_priv(dev);
-	struct mlx5_core_dev *mdev = priv->mdev;
-
-	strlcpy(drvinfo->driver, DRIVER_NAME, sizeof(drvinfo->driver));
-	strlcpy(drvinfo->version, DRIVER_VERSION " (" DRIVER_RELDATE ")",
-		sizeof(drvinfo->version));
-	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
-		 "%d.%d.%d",
-		 fw_rev_maj(mdev), fw_rev_min(mdev), fw_rev_sub(mdev));
-	strlcpy(drvinfo->bus_info, pci_name(mdev->pdev),
-		sizeof(drvinfo->bus_info));
-}
-
-struct ptys2ethtool_config {
-	__ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
-	__ETHTOOL_DECLARE_LINK_MODE_MASK(advertised);
-	u32 speed;
-};
-
-static struct ptys2ethtool_config ptys2ethtool_table[MLX5E_LINK_MODES_NUMBER];
-
-#define MLX5_BUILD_PTYS2ETHTOOL_CONFIG(reg_, speed_, ...)               \
-	({                                                              \
-		struct ptys2ethtool_config *cfg;                        \
-		const unsigned int modes[] = { __VA_ARGS__ };           \
-		unsigned int i;                                         \
-		cfg = &ptys2ethtool_table[reg_];                        \
-		cfg->speed = speed_;                                    \
-		bitmap_zero(cfg->supported,                             \
-			    __ETHTOOL_LINK_MODE_MASK_NBITS);            \
-		bitmap_zero(cfg->advertised,                            \
-			    __ETHTOOL_LINK_MODE_MASK_NBITS);            \
-		for (i = 0 ; i < ARRAY_SIZE(modes) ; ++i) {             \
-			__set_bit(modes[i], cfg->supported);            \
-			__set_bit(modes[i], cfg->advertised);           \
-		}                                                       \
-	})
-
-void mlx5e_build_ptys2ethtool_map(void)
-{
-	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1000BASE_CX_SGMII, SPEED_1000,
-				       ETHTOOL_LINK_MODE_1000baseKX_Full_BIT);
-	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1000BASE_KX, SPEED_1000,
-				       ETHTOOL_LINK_MODE_1000baseKX_Full_BIT);
-	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_CX4, SPEED_10000,
-				       ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT);
-	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_KX4, SPEED_10000,
-				       ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT);
-	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_KR, SPEED_10000,
-				       ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
-	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_20GBASE_KR2, SPEED_20000,
-				       ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT);
-	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_CR4, SPEED_40000,
-				       ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT);
-	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_KR4, SPEED_40000,
-				       ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT);
-	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_56GBASE_R4, SPEED_56000,
-				       ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT);
-	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_CR, SPEED_10000,
-				       ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
-	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_SR, SPEED_10000,
-				       ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
-	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_ER, SPEED_10000,
-				       ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
-	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_SR4, SPEED_40000,
-				       ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT);
-	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_LR4, SPEED_40000,
-				       ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT);
-	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_SR2, SPEED_50000,
-				       ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT);
-	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_CR4, SPEED_100000,
-				       ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT);
-	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_SR4, SPEED_100000,
-				       ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT);
-	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_KR4, SPEED_100000,
-				       ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT);
-	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_LR4, SPEED_100000,
-				       ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT);
-	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_T, SPEED_10000,
-				       ETHTOOL_LINK_MODE_10000baseT_Full_BIT);
-	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_CR, SPEED_25000,
-				       ETHTOOL_LINK_MODE_25000baseCR_Full_BIT);
-	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_KR, SPEED_25000,
-				       ETHTOOL_LINK_MODE_25000baseKR_Full_BIT);
-	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_SR, SPEED_25000,
-				       ETHTOOL_LINK_MODE_25000baseSR_Full_BIT);
-	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_CR2, SPEED_50000,
-				       ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT);
-	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_KR2, SPEED_50000,
-				       ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT);
-}
-
-static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv)
-{
-	struct mlx5_core_dev *mdev = priv->mdev;
-	u8 pfc_en_tx;
-	u8 pfc_en_rx;
-	int err;
-
-	err = mlx5_query_port_pfc(mdev, &pfc_en_tx, &pfc_en_rx);
-
-	return err ? 0 : pfc_en_tx | pfc_en_rx;
-}
-
-static bool mlx5e_query_global_pause_combined(struct mlx5e_priv *priv)
-{
-	struct mlx5_core_dev *mdev = priv->mdev;
-	u32 rx_pause;
-	u32 tx_pause;
-	int err;
-
-	err = mlx5_query_port_pause(mdev, &rx_pause, &tx_pause);
-
-	return err ? false : rx_pause | tx_pause;
-}
-
-#define MLX5E_NUM_Q_CNTRS(priv) (NUM_Q_COUNTERS * (!!priv->q_counter))
-#define MLX5E_NUM_RQ_STATS(priv) \
-	(NUM_RQ_STATS * priv->params.num_channels * \
-	 test_bit(MLX5E_STATE_OPENED, &priv->state))
-#define MLX5E_NUM_SQ_STATS(priv) \
-	(NUM_SQ_STATS * priv->params.num_channels * priv->params.num_tc * \
-	 test_bit(MLX5E_STATE_OPENED, &priv->state))
-#define MLX5E_NUM_PFC_COUNTERS(priv) \
-	((mlx5e_query_global_pause_combined(priv) + hweight8(mlx5e_query_pfc_combined(priv))) * \
-	  NUM_PPORT_PER_PRIO_PFC_COUNTERS)
-
-static int mlx5e_get_sset_count(struct net_device *dev, int sset)
-{
-	struct mlx5e_priv *priv = netdev_priv(dev);
-
-	switch (sset) {
-	case ETH_SS_STATS:
-		return NUM_SW_COUNTERS +
-		       MLX5E_NUM_Q_CNTRS(priv) +
-		       NUM_VPORT_COUNTERS + NUM_PPORT_COUNTERS +
-		       NUM_PCIE_COUNTERS +
-		       MLX5E_NUM_RQ_STATS(priv) +
-		       MLX5E_NUM_SQ_STATS(priv) +
-		       MLX5E_NUM_PFC_COUNTERS(priv) +
-		       ARRAY_SIZE(mlx5e_pme_status_desc) +
-		       ARRAY_SIZE(mlx5e_pme_error_desc);
-
-	case ETH_SS_PRIV_FLAGS:
-		return ARRAY_SIZE(mlx5e_priv_flags);
-	case ETH_SS_TEST:
-		return mlx5e_self_test_num(priv);
-	/* fallthrough */
-	default:
-		return -EOPNOTSUPP;
-	}
-}
-
-static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data)
-{
-	int i, j, tc, prio, idx = 0;
-	unsigned long pfc_combined;
-
-	/* SW counters */
-	for (i = 0; i < NUM_SW_COUNTERS; i++)
-		strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].format);
-
-	/* Q counters */
-	for (i = 0; i < MLX5E_NUM_Q_CNTRS(priv); i++)
-		strcpy(data + (idx++) * ETH_GSTRING_LEN, q_stats_desc[i].format);
-
-	/* VPORT counters */
-	for (i = 0; i < NUM_VPORT_COUNTERS; i++)
-		strcpy(data + (idx++) * ETH_GSTRING_LEN,
-		       vport_stats_desc[i].format);
-
-	/* PPORT counters */
-	for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
-		strcpy(data + (idx++) * ETH_GSTRING_LEN,
-		       pport_802_3_stats_desc[i].format);
-
-	for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
-		strcpy(data + (idx++) * ETH_GSTRING_LEN,
-		       pport_2863_stats_desc[i].format);
-
-	for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
-		strcpy(data + (idx++) * ETH_GSTRING_LEN,
-		       pport_2819_stats_desc[i].format);
-
-	for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
-		strcpy(data + (idx++) * ETH_GSTRING_LEN,
-		       pcie_perf_stats_desc[i].format);
-
-	for (i = 0; i < NUM_PCIE_TAS_COUNTERS; i++)
-		strcpy(data + (idx++) * ETH_GSTRING_LEN,
-		       pcie_tas_stats_desc[i].format);
-
-	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
-		for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
-			sprintf(data + (idx++) * ETH_GSTRING_LEN,
-				pport_per_prio_traffic_stats_desc[i].format, prio);
-	}
-
-	pfc_combined = mlx5e_query_pfc_combined(priv);
-	for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
-		for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
-			char pfc_string[ETH_GSTRING_LEN];
-
-			snprintf(pfc_string, sizeof(pfc_string), "prio%d", prio);
-			sprintf(data + (idx++) * ETH_GSTRING_LEN,
-				pport_per_prio_pfc_stats_desc[i].format, pfc_string);
-		}
-	}
-
-	if (mlx5e_query_global_pause_combined(priv)) {
-		for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
-			sprintf(data + (idx++) * ETH_GSTRING_LEN,
-				pport_per_prio_pfc_stats_desc[i].format, "global");
-		}
-	}
-
-	/* port module event counters */
-	for (i = 0; i < ARRAY_SIZE(mlx5e_pme_status_desc); i++)
-		strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_status_desc[i].format);
-
-	for (i = 0; i < ARRAY_SIZE(mlx5e_pme_error_desc); i++)
-		strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_error_desc[i].format);
-
-	if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
-		return;
-
-	/* per channel counters */
-	for (i = 0; i < priv->params.num_channels; i++)
-		for (j = 0; j < NUM_RQ_STATS; j++)
-			sprintf(data + (idx++) * ETH_GSTRING_LEN,
-				rq_stats_desc[j].format, i);
-
-	for (tc = 0; tc < priv->params.num_tc; tc++)
-		for (i = 0; i < priv->params.num_channels; i++)
-			for (j = 0; j < NUM_SQ_STATS; j++)
-				sprintf(data + (idx++) * ETH_GSTRING_LEN,
-					sq_stats_desc[j].format,
-					priv->channeltc_to_txq_map[i][tc]);
-}
-
-static void mlx5e_get_strings(struct net_device *dev,
-			      uint32_t stringset, uint8_t *data)
-{
-	struct mlx5e_priv *priv = netdev_priv(dev);
-	int i;
-
-	switch (stringset) {
-	case ETH_SS_PRIV_FLAGS:
-		for (i = 0; i < ARRAY_SIZE(mlx5e_priv_flags); i++)
-			strcpy(data + i * ETH_GSTRING_LEN, mlx5e_priv_flags[i]);
-		break;
-
-	case ETH_SS_TEST:
-		for (i = 0; i < mlx5e_self_test_num(priv); i++)
-			strcpy(data + i * ETH_GSTRING_LEN,
-			       mlx5e_self_tests[i]);
-		break;
-
-	case ETH_SS_STATS:
-		mlx5e_fill_stats_strings(priv, data);
-		break;
-	}
-}
-
-static void mlx5e_get_ethtool_stats(struct net_device *dev,
-				    struct ethtool_stats *stats, u64 *data)
-{
-	struct mlx5e_priv *priv = netdev_priv(dev);
-	struct mlx5_priv *mlx5_priv;
-	int i, j, tc, prio, idx = 0;
-	unsigned long pfc_combined;
-
-	if (!data)
-		return;
-
-	mutex_lock(&priv->state_lock);
-	if (test_bit(MLX5E_STATE_OPENED, &priv->state))
-		mlx5e_update_stats(priv);
-	mutex_unlock(&priv->state_lock);
-
-	for (i = 0; i < NUM_SW_COUNTERS; i++)
-		data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw,
-						   sw_stats_desc, i);
-
-	for (i = 0; i < MLX5E_NUM_Q_CNTRS(priv); i++)
-		data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
-						   q_stats_desc, i);
-
-	for (i = 0; i < NUM_VPORT_COUNTERS; i++)
-		data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out,
-						  vport_stats_desc, i);
-
-	for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
-		data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.IEEE_802_3_counters,
-						  pport_802_3_stats_desc, i);
-
-	for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
-		data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2863_counters,
-						  pport_2863_stats_desc, i);
-
-	for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
-		data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters,
-						  pport_2819_stats_desc, i);
-
-	for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
-		data[idx++] = MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
-						  pcie_perf_stats_desc, i);
-
-	for (i = 0; i < NUM_PCIE_TAS_COUNTERS; i++)
-		data[idx++] = MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_tas_counters,
-						  pcie_tas_stats_desc, i);
-
-	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
-		for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
-			data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
-						 pport_per_prio_traffic_stats_desc, i);
-	}
-
-	pfc_combined = mlx5e_query_pfc_combined(priv);
-	for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
-		for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
-			data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
-							  pport_per_prio_pfc_stats_desc, i);
-		}
-	}
-
-	if (mlx5e_query_global_pause_combined(priv)) {
-		for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
-			data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
-							  pport_per_prio_pfc_stats_desc, i);
-		}
-	}
-
-	/* port module event counters */
-	mlx5_priv =  &priv->mdev->priv;
-	for (i = 0; i < ARRAY_SIZE(mlx5e_pme_status_desc); i++)
-		data[idx++] = MLX5E_READ_CTR64_CPU(mlx5_priv->pme_stats.status_counters,
-						   mlx5e_pme_status_desc, i);
-
-	for (i = 0; i < ARRAY_SIZE(mlx5e_pme_error_desc); i++)
-		data[idx++] = MLX5E_READ_CTR64_CPU(mlx5_priv->pme_stats.error_counters,
-						   mlx5e_pme_error_desc, i);
-
-	if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
-		return;
-
-	/* per channel counters */
-	for (i = 0; i < priv->params.num_channels; i++)
-		for (j = 0; j < NUM_RQ_STATS; j++)
-			data[idx++] =
-			       MLX5E_READ_CTR64_CPU(&priv->channel[i]->rq.stats,
-						    rq_stats_desc, j);
-
-	for (tc = 0; tc < priv->params.num_tc; tc++)
-		for (i = 0; i < priv->params.num_channels; i++)
-			for (j = 0; j < NUM_SQ_STATS; j++)
-				data[idx++] = MLX5E_READ_CTR64_CPU(&priv->channel[i]->sq[tc].stats,
-								   sq_stats_desc, j);
-}
-
-static u32 mlx5e_rx_wqes_to_packets(struct mlx5e_priv *priv, int rq_wq_type,
-				    int num_wqe)
-{
-	int packets_per_wqe;
-	int stride_size;
-	int num_strides;
-	int wqe_size;
-
-	if (rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
-		return num_wqe;
-
-	stride_size = 1 << priv->params.mpwqe_log_stride_sz;
-	num_strides = 1 << priv->params.mpwqe_log_num_strides;
-	wqe_size = stride_size * num_strides;
-
-	packets_per_wqe = wqe_size /
-			  ALIGN(ETH_DATA_LEN, stride_size);
-	return (1 << (order_base_2(num_wqe * packets_per_wqe) - 1));
-}
-
-static u32 mlx5e_packets_to_rx_wqes(struct mlx5e_priv *priv, int rq_wq_type,
-				    int num_packets)
-{
-	int packets_per_wqe;
-	int stride_size;
-	int num_strides;
-	int wqe_size;
-	int num_wqes;
-
-	if (rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
-		return num_packets;
-
-	stride_size = 1 << priv->params.mpwqe_log_stride_sz;
-	num_strides = 1 << priv->params.mpwqe_log_num_strides;
-	wqe_size = stride_size * num_strides;
-
-	num_packets = (1 << order_base_2(num_packets));
-
-	packets_per_wqe = wqe_size /
-			  ALIGN(ETH_DATA_LEN, stride_size);
-	num_wqes = DIV_ROUND_UP(num_packets, packets_per_wqe);
-	return 1 << (order_base_2(num_wqes));
-}
-
-static void mlx5e_get_ringparam(struct net_device *dev,
-				struct ethtool_ringparam *param)
-{
-	struct mlx5e_priv *priv = netdev_priv(dev);
-	int rq_wq_type = priv->params.rq_wq_type;
-
-	param->rx_max_pending = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
-							 1 << mlx5_max_log_rq_size(rq_wq_type));
-	param->tx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE;
-	param->rx_pending = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
-						     1 << priv->params.log_rq_size);
-	param->tx_pending     = 1 << priv->params.log_sq_size;
-}
-
-static int mlx5e_set_ringparam(struct net_device *dev,
-			       struct ethtool_ringparam *param)
-{
-	struct mlx5e_priv *priv = netdev_priv(dev);
-	bool was_opened;
-	int rq_wq_type = priv->params.rq_wq_type;
-	u32 rx_pending_wqes;
-	u32 min_rq_size;
-	u32 max_rq_size;
-	u16 min_rx_wqes;
-	u8 log_rq_size;
-	u8 log_sq_size;
-	u32 num_mtts;
-	int err = 0;
-
-	if (param->rx_jumbo_pending) {
-		netdev_info(dev, "%s: rx_jumbo_pending not supported\n",
-			    __func__);
-		return -EINVAL;
-	}
-	if (param->rx_mini_pending) {
-		netdev_info(dev, "%s: rx_mini_pending not supported\n",
-			    __func__);
-		return -EINVAL;
-	}
-
-	min_rq_size = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
-					       1 << mlx5_min_log_rq_size(rq_wq_type));
-	max_rq_size = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
-					       1 << mlx5_max_log_rq_size(rq_wq_type));
-	rx_pending_wqes = mlx5e_packets_to_rx_wqes(priv, rq_wq_type,
-						   param->rx_pending);
-
-	if (param->rx_pending < min_rq_size) {
-		netdev_info(dev, "%s: rx_pending (%d) < min (%d)\n",
-			    __func__, param->rx_pending,
-			    min_rq_size);
-		return -EINVAL;
-	}
-	if (param->rx_pending > max_rq_size) {
-		netdev_info(dev, "%s: rx_pending (%d) > max (%d)\n",
-			    __func__, param->rx_pending,
-			    max_rq_size);
-		return -EINVAL;
-	}
-
-	num_mtts = MLX5E_REQUIRED_MTTS(rx_pending_wqes);
-	if (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
-	    !MLX5E_VALID_NUM_MTTS(num_mtts)) {
-		netdev_info(dev, "%s: rx_pending (%d) request can't be satisfied, try to reduce.\n",
-			    __func__, param->rx_pending);
-		return -EINVAL;
-	}
-
-	if (param->tx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)) {
-		netdev_info(dev, "%s: tx_pending (%d) < min (%d)\n",
-			    __func__, param->tx_pending,
-			    1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE);
-		return -EINVAL;
-	}
-	if (param->tx_pending > (1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE)) {
-		netdev_info(dev, "%s: tx_pending (%d) > max (%d)\n",
-			    __func__, param->tx_pending,
-			    1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE);
-		return -EINVAL;
-	}
-
-	log_rq_size = order_base_2(rx_pending_wqes);
-	log_sq_size = order_base_2(param->tx_pending);
-	min_rx_wqes = mlx5_min_rx_wqes(rq_wq_type, rx_pending_wqes);
-
-	if (log_rq_size == priv->params.log_rq_size &&
-	    log_sq_size == priv->params.log_sq_size &&
-	    min_rx_wqes == priv->params.min_rx_wqes)
-		return 0;
-
-	mutex_lock(&priv->state_lock);
-
-	was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
-	if (was_opened)
-		mlx5e_close_locked(dev);
-
-	priv->params.log_rq_size = log_rq_size;
-	priv->params.log_sq_size = log_sq_size;
-	priv->params.min_rx_wqes = min_rx_wqes;
-
-	if (was_opened)
-		err = mlx5e_open_locked(dev);
-
-	mutex_unlock(&priv->state_lock);
-
-	return err;
-}
-
-static void mlx5e_get_channels(struct net_device *dev,
-			       struct ethtool_channels *ch)
-{
-	struct mlx5e_priv *priv = netdev_priv(dev);
-
-	ch->max_combined   = mlx5e_get_max_num_channels(priv->mdev);
-	ch->combined_count = priv->params.num_channels;
-}
-
-static int mlx5e_set_channels(struct net_device *dev,
-			      struct ethtool_channels *ch)
-{
-	struct mlx5e_priv *priv = netdev_priv(dev);
-	int ncv = mlx5e_get_max_num_channels(priv->mdev);
-	unsigned int count = ch->combined_count;
-	bool arfs_enabled;
-	bool was_opened;
-	int err = 0;
-
-	if (!count) {
-		netdev_info(dev, "%s: combined_count=0 not supported\n",
-			    __func__);
-		return -EINVAL;
-	}
-	if (ch->rx_count || ch->tx_count) {
-		netdev_info(dev, "%s: separate rx/tx count not supported\n",
-			    __func__);
-		return -EINVAL;
-	}
-	if (count > ncv) {
-		netdev_info(dev, "%s: count (%d) > max (%d)\n",
-			    __func__, count, ncv);
-		return -EINVAL;
-	}
-
-	if (priv->params.num_channels == count)
-		return 0;
-
-	mutex_lock(&priv->state_lock);
-
-	was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
-	if (was_opened)
-		mlx5e_close_locked(dev);
-
-	arfs_enabled = dev->features & NETIF_F_NTUPLE;
-	if (arfs_enabled)
-		mlx5e_arfs_disable(priv);
-
-	priv->params.num_channels = count;
-	mlx5e_build_default_indir_rqt(priv->mdev, priv->params.indirection_rqt,
-				      MLX5E_INDIR_RQT_SIZE, count);
-
-	if (was_opened)
-		err = mlx5e_open_locked(dev);
-	if (err)
-		goto out;
-
-	if (arfs_enabled) {
-		err = mlx5e_arfs_enable(priv);
-		if (err)
-			netdev_err(dev, "%s: mlx5e_arfs_enable failed: %d\n",
-				   __func__, err);
-	}
-
-out:
-	mutex_unlock(&priv->state_lock);
-
-	return err;
-}
-
-static int mlx5e_get_coalesce(struct net_device *netdev,
-			      struct ethtool_coalesce *coal)
-{
-	struct mlx5e_priv *priv = netdev_priv(netdev);
-
-	if (!MLX5_CAP_GEN(priv->mdev, cq_moderation))
-		return -ENOTSUPP;
-
-	coal->rx_coalesce_usecs       = priv->params.rx_cq_moderation.usec;
-	coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation.pkts;
-	coal->tx_coalesce_usecs       = priv->params.tx_cq_moderation.usec;
-	coal->tx_max_coalesced_frames = priv->params.tx_cq_moderation.pkts;
-	coal->use_adaptive_rx_coalesce = priv->params.rx_am_enabled;
-
-	return 0;
-}
-
-static int mlx5e_set_coalesce(struct net_device *netdev,
-			      struct ethtool_coalesce *coal)
-{
-	struct mlx5e_priv *priv    = netdev_priv(netdev);
-	struct mlx5_core_dev *mdev = priv->mdev;
-	struct mlx5e_channel *c;
-	bool restart =
-		!!coal->use_adaptive_rx_coalesce != priv->params.rx_am_enabled;
-	bool was_opened;
-	int err = 0;
-	int tc;
-	int i;
-
-	if (!MLX5_CAP_GEN(mdev, cq_moderation))
-		return -ENOTSUPP;
-
-	mutex_lock(&priv->state_lock);
-
-	was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
-	if (was_opened && restart) {
-		mlx5e_close_locked(netdev);
-		priv->params.rx_am_enabled = !!coal->use_adaptive_rx_coalesce;
-	}
-
-	priv->params.tx_cq_moderation.usec = coal->tx_coalesce_usecs;
-	priv->params.tx_cq_moderation.pkts = coal->tx_max_coalesced_frames;
-	priv->params.rx_cq_moderation.usec = coal->rx_coalesce_usecs;
-	priv->params.rx_cq_moderation.pkts = coal->rx_max_coalesced_frames;
-
-	if (!was_opened || restart)
-		goto out;
-
-	for (i = 0; i < priv->params.num_channels; ++i) {
-		c = priv->channel[i];
-
-		for (tc = 0; tc < c->num_tc; tc++) {
-			mlx5_core_modify_cq_moderation(mdev,
-						&c->sq[tc].cq.mcq,
-						coal->tx_coalesce_usecs,
-						coal->tx_max_coalesced_frames);
-		}
-
-		mlx5_core_modify_cq_moderation(mdev, &c->rq.cq.mcq,
-					       coal->rx_coalesce_usecs,
-					       coal->rx_max_coalesced_frames);
-	}
-
-out:
-	if (was_opened && restart)
-		err = mlx5e_open_locked(netdev);
-
-	mutex_unlock(&priv->state_lock);
-	return err;
-}
-
-static void ptys2ethtool_supported_link(unsigned long *supported_modes,
-					u32 eth_proto_cap)
-{
-	unsigned long proto_cap = eth_proto_cap;
-	int proto;
-
-	for_each_set_bit(proto, &proto_cap, MLX5E_LINK_MODES_NUMBER)
-		bitmap_or(supported_modes, supported_modes,
-			  ptys2ethtool_table[proto].supported,
-			  __ETHTOOL_LINK_MODE_MASK_NBITS);
-}
-
-static void ptys2ethtool_adver_link(unsigned long *advertising_modes,
-				    u32 eth_proto_cap)
-{
-	unsigned long proto_cap = eth_proto_cap;
-	int proto;
-
-	for_each_set_bit(proto, &proto_cap, MLX5E_LINK_MODES_NUMBER)
-		bitmap_or(advertising_modes, advertising_modes,
-			  ptys2ethtool_table[proto].advertised,
-			  __ETHTOOL_LINK_MODE_MASK_NBITS);
-}
-
-static void ptys2ethtool_supported_port(struct ethtool_link_ksettings *link_ksettings,
-					u32 eth_proto_cap)
-{
-	if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_10GBASE_CR)
-			   | MLX5E_PROT_MASK(MLX5E_10GBASE_SR)
-			   | MLX5E_PROT_MASK(MLX5E_40GBASE_CR4)
-			   | MLX5E_PROT_MASK(MLX5E_40GBASE_SR4)
-			   | MLX5E_PROT_MASK(MLX5E_100GBASE_SR4)
-			   | MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII))) {
-		ethtool_link_ksettings_add_link_mode(link_ksettings, supported, FIBRE);
-	}
-
-	if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_100GBASE_KR4)
-			   | MLX5E_PROT_MASK(MLX5E_40GBASE_KR4)
-			   | MLX5E_PROT_MASK(MLX5E_10GBASE_KR)
-			   | MLX5E_PROT_MASK(MLX5E_10GBASE_KX4)
-			   | MLX5E_PROT_MASK(MLX5E_1000BASE_KX))) {
-		ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Backplane);
-	}
-}
-
-int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
-{
-	u32 max_speed = 0;
-	u32 proto_cap;
-	int err;
-	int i;
-
-	err = mlx5_query_port_proto_cap(mdev, &proto_cap, MLX5_PTYS_EN);
-	if (err)
-		return err;
-
-	for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i)
-		if (proto_cap & MLX5E_PROT_MASK(i))
-			max_speed = max(max_speed, ptys2ethtool_table[i].speed);
-
-	*speed = max_speed;
-	return 0;
-}
-
-static void get_speed_duplex(struct net_device *netdev,
-			     u32 eth_proto_oper,
-			     struct ethtool_link_ksettings *link_ksettings)
-{
-	int i;
-	u32 speed = SPEED_UNKNOWN;
-	u8 duplex = DUPLEX_UNKNOWN;
-
-	if (!netif_carrier_ok(netdev))
-		goto out;
-
-	for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
-		if (eth_proto_oper & MLX5E_PROT_MASK(i)) {
-			speed = ptys2ethtool_table[i].speed;
-			duplex = DUPLEX_FULL;
-			break;
-		}
-	}
-out:
-	link_ksettings->base.speed = speed;
-	link_ksettings->base.duplex = duplex;
-}
-
-static void get_supported(u32 eth_proto_cap,
-			  struct ethtool_link_ksettings *link_ksettings)
-{
-	unsigned long *supported = link_ksettings->link_modes.supported;
-
-	ptys2ethtool_supported_port(link_ksettings, eth_proto_cap);
-	ptys2ethtool_supported_link(supported, eth_proto_cap);
-	ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Pause);
-	ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Asym_Pause);
-}
-
-static void get_advertising(u32 eth_proto_cap, u8 tx_pause,
-			    u8 rx_pause,
-			    struct ethtool_link_ksettings *link_ksettings)
-{
-	unsigned long *advertising = link_ksettings->link_modes.advertising;
-
-	ptys2ethtool_adver_link(advertising, eth_proto_cap);
-	if (tx_pause)
-		ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Pause);
-	if (tx_pause ^ rx_pause)
-		ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Asym_Pause);
-}
-
-static u8 get_connector_port(u32 eth_proto)
-{
-	if (eth_proto & (MLX5E_PROT_MASK(MLX5E_10GBASE_SR)
-			 | MLX5E_PROT_MASK(MLX5E_40GBASE_SR4)
-			 | MLX5E_PROT_MASK(MLX5E_100GBASE_SR4)
-			 | MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII))) {
-			return PORT_FIBRE;
-	}
-
-	if (eth_proto & (MLX5E_PROT_MASK(MLX5E_40GBASE_CR4)
-			 | MLX5E_PROT_MASK(MLX5E_10GBASE_CR)
-			 | MLX5E_PROT_MASK(MLX5E_100GBASE_CR4))) {
-			return PORT_DA;
-	}
-
-	if (eth_proto & (MLX5E_PROT_MASK(MLX5E_10GBASE_KX4)
-			 | MLX5E_PROT_MASK(MLX5E_10GBASE_KR)
-			 | MLX5E_PROT_MASK(MLX5E_40GBASE_KR4)
-			 | MLX5E_PROT_MASK(MLX5E_100GBASE_KR4))) {
-			return PORT_NONE;
-	}
-
-	return PORT_OTHER;
-}
-
-static void get_lp_advertising(u32 eth_proto_lp,
-			       struct ethtool_link_ksettings *link_ksettings)
-{
-	unsigned long *lp_advertising = link_ksettings->link_modes.lp_advertising;
-
-	ptys2ethtool_adver_link(lp_advertising, eth_proto_lp);
-}
-
-static int mlx5e_get_link_ksettings(struct net_device *netdev,
-				    struct ethtool_link_ksettings *link_ksettings)
-{
-	struct mlx5e_priv *priv    = netdev_priv(netdev);
-	struct mlx5_core_dev *mdev = priv->mdev;
-	u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {0};
-	u32 eth_proto_cap;
-	u32 eth_proto_admin;
-	u32 eth_proto_lp;
-	u32 eth_proto_oper;
-	u8 an_disable_admin;
-	u8 an_status;
-	int err;
-
-	err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1);
-	if (err) {
-		netdev_err(netdev, "%s: query port ptys failed: %d\n",
-			   __func__, err);
-		goto err_query_ptys;
-	}
-
-	eth_proto_cap    = MLX5_GET(ptys_reg, out, eth_proto_capability);
-	eth_proto_admin  = MLX5_GET(ptys_reg, out, eth_proto_admin);
-	eth_proto_oper   = MLX5_GET(ptys_reg, out, eth_proto_oper);
-	eth_proto_lp     = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise);
-	an_disable_admin = MLX5_GET(ptys_reg, out, an_disable_admin);
-	an_status        = MLX5_GET(ptys_reg, out, an_status);
-
-	ethtool_link_ksettings_zero_link_mode(link_ksettings, supported);
-	ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
-
-	get_supported(eth_proto_cap, link_ksettings);
-	get_advertising(eth_proto_admin, 0, 0, link_ksettings);
-	get_speed_duplex(netdev, eth_proto_oper, link_ksettings);
-
-	eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
-
-	link_ksettings->base.port = get_connector_port(eth_proto_oper);
-	get_lp_advertising(eth_proto_lp, link_ksettings);
-
-	if (an_status == MLX5_AN_COMPLETE)
-		ethtool_link_ksettings_add_link_mode(link_ksettings,
-						     lp_advertising, Autoneg);
-
-	link_ksettings->base.autoneg = an_disable_admin ? AUTONEG_DISABLE :
-							  AUTONEG_ENABLE;
-	ethtool_link_ksettings_add_link_mode(link_ksettings, supported,
-					     Autoneg);
-	if (!an_disable_admin)
-		ethtool_link_ksettings_add_link_mode(link_ksettings,
-						     advertising, Autoneg);
-
-err_query_ptys:
-	return err;
-}
-
-static u32 mlx5e_ethtool2ptys_adver_link(const unsigned long *link_modes)
-{
-	u32 i, ptys_modes = 0;
-
-	for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
-		if (bitmap_intersects(ptys2ethtool_table[i].advertised,
-				      link_modes,
-				      __ETHTOOL_LINK_MODE_MASK_NBITS))
-			ptys_modes |= MLX5E_PROT_MASK(i);
-	}
-
-	return ptys_modes;
-}
-
-static u32 mlx5e_ethtool2ptys_speed_link(u32 speed)
-{
-	u32 i, speed_links = 0;
-
-	for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
-		if (ptys2ethtool_table[i].speed == speed)
-			speed_links |= MLX5E_PROT_MASK(i);
-	}
-
-	return speed_links;
-}
-
-static int mlx5e_set_link_ksettings(struct net_device *netdev,
-				    const struct ethtool_link_ksettings *link_ksettings)
-{
-	struct mlx5e_priv *priv    = netdev_priv(netdev);
-	struct mlx5_core_dev *mdev = priv->mdev;
-	u32 eth_proto_cap, eth_proto_admin;
-	bool an_changes = false;
-	u8 an_disable_admin;
-	u8 an_disable_cap;
-	bool an_disable;
-	u32 link_modes;
-	u8 an_status;
-	u32 speed;
-	int err;
-
-	speed = link_ksettings->base.speed;
-
-	link_modes = link_ksettings->base.autoneg == AUTONEG_ENABLE ?
-		mlx5e_ethtool2ptys_adver_link(link_ksettings->link_modes.advertising) :
-		mlx5e_ethtool2ptys_speed_link(speed);
-
-	err = mlx5_query_port_proto_cap(mdev, &eth_proto_cap, MLX5_PTYS_EN);
-	if (err) {
-		netdev_err(netdev, "%s: query port eth proto cap failed: %d\n",
-			   __func__, err);
-		goto out;
-	}
-
-	link_modes = link_modes & eth_proto_cap;
-	if (!link_modes) {
-		netdev_err(netdev, "%s: Not supported link mode(s) requested",
-			   __func__);
-		err = -EINVAL;
-		goto out;
-	}
-
-	err = mlx5_query_port_proto_admin(mdev, &eth_proto_admin, MLX5_PTYS_EN);
-	if (err) {
-		netdev_err(netdev, "%s: query port eth proto admin failed: %d\n",
-			   __func__, err);
-		goto out;
-	}
-
-	mlx5_query_port_autoneg(mdev, MLX5_PTYS_EN, &an_status,
-				&an_disable_cap, &an_disable_admin);
-
-	an_disable = link_ksettings->base.autoneg == AUTONEG_DISABLE;
-	an_changes = ((!an_disable && an_disable_admin) ||
-		      (an_disable && !an_disable_admin));
-
-	if (!an_changes && link_modes == eth_proto_admin)
-		goto out;
-
-	mlx5_set_port_ptys(mdev, an_disable, link_modes, MLX5_PTYS_EN);
-	mlx5_toggle_port_link(mdev);
-
-out:
-	return err;
-}
-
-static u32 mlx5e_get_rxfh_key_size(struct net_device *netdev)
-{
-	struct mlx5e_priv *priv = netdev_priv(netdev);
-
-	return sizeof(priv->params.toeplitz_hash_key);
-}
-
-static u32 mlx5e_get_rxfh_indir_size(struct net_device *netdev)
-{
-	return MLX5E_INDIR_RQT_SIZE;
-}
-
-static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
-			  u8 *hfunc)
-{
-	struct mlx5e_priv *priv = netdev_priv(netdev);
-
-	if (indir)
-		memcpy(indir, priv->params.indirection_rqt,
-		       sizeof(priv->params.indirection_rqt));
-
-	if (key)
-		memcpy(key, priv->params.toeplitz_hash_key,
-		       sizeof(priv->params.toeplitz_hash_key));
-
-	if (hfunc)
-		*hfunc = priv->params.rss_hfunc;
-
-	return 0;
-}
-
-static void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen)
-{
-	struct mlx5_core_dev *mdev = priv->mdev;
-	void *tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
-	int i;
-
-	MLX5_SET(modify_tir_in, in, bitmask.hash, 1);
-	mlx5e_build_tir_ctx_hash(tirc, priv);
-
-	for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
-		mlx5_core_modify_tir(mdev, priv->indir_tir[i].tirn, in, inlen);
-}
-
-static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
-			  const u8 *key, const u8 hfunc)
-{
-	struct mlx5e_priv *priv = netdev_priv(dev);
-	int inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
-	void *in;
-
-	if ((hfunc != ETH_RSS_HASH_NO_CHANGE) &&
-	    (hfunc != ETH_RSS_HASH_XOR) &&
-	    (hfunc != ETH_RSS_HASH_TOP))
-		return -EINVAL;
-
-	in = mlx5_vzalloc(inlen);
-	if (!in)
-		return -ENOMEM;
-
-	mutex_lock(&priv->state_lock);
-
-	if (indir) {
-		u32 rqtn = priv->indir_rqt.rqtn;
-
-		memcpy(priv->params.indirection_rqt, indir,
-		       sizeof(priv->params.indirection_rqt));
-		mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0);
-	}
-
-	if (key)
-		memcpy(priv->params.toeplitz_hash_key, key,
-		       sizeof(priv->params.toeplitz_hash_key));
-
-	if (hfunc != ETH_RSS_HASH_NO_CHANGE)
-		priv->params.rss_hfunc = hfunc;
-
-	mlx5e_modify_tirs_hash(priv, in, inlen);
-
-	mutex_unlock(&priv->state_lock);
-
-	kvfree(in);
-
-	return 0;
-}
-
-static int mlx5e_get_rxnfc(struct net_device *netdev,
-			   struct ethtool_rxnfc *info, u32 *rule_locs)
-{
-	struct mlx5e_priv *priv = netdev_priv(netdev);
-	int err = 0;
-
-	switch (info->cmd) {
-	case ETHTOOL_GRXRINGS:
-		info->data = priv->params.num_channels;
-		break;
-	case ETHTOOL_GRXCLSRLCNT:
-		info->rule_cnt = priv->fs.ethtool.tot_num_rules;
-		break;
-	case ETHTOOL_GRXCLSRULE:
-		err = mlx5e_ethtool_get_flow(priv, info, info->fs.location);
-		break;
-	case ETHTOOL_GRXCLSRLALL:
-		err = mlx5e_ethtool_get_all_flows(priv, info, rule_locs);
-		break;
-	default:
-		err = -EOPNOTSUPP;
-		break;
-	}
-
-	return err;
-}
-
-static int mlx5e_get_tunable(struct net_device *dev,
-			     const struct ethtool_tunable *tuna,
-			     void *data)
-{
-	const struct mlx5e_priv *priv = netdev_priv(dev);
-	int err = 0;
-
-	switch (tuna->id) {
-	case ETHTOOL_TX_COPYBREAK:
-		*(u32 *)data = priv->params.tx_max_inline;
-		break;
-	default:
-		err = -EINVAL;
-		break;
-	}
-
-	return err;
-}
-
-static int mlx5e_set_tunable(struct net_device *dev,
-			     const struct ethtool_tunable *tuna,
-			     const void *data)
-{
-	struct mlx5e_priv *priv = netdev_priv(dev);
-	struct mlx5_core_dev *mdev = priv->mdev;
-	bool was_opened;
-	u32 val;
-	int err = 0;
-
-	switch (tuna->id) {
-	case ETHTOOL_TX_COPYBREAK:
-		val = *(u32 *)data;
-		if (val > mlx5e_get_max_inline_cap(mdev)) {
-			err = -EINVAL;
-			break;
-		}
-
-		mutex_lock(&priv->state_lock);
-
-		was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
-		if (was_opened)
-			mlx5e_close_locked(dev);
-
-		priv->params.tx_max_inline = val;
-
-		if (was_opened)
-			err = mlx5e_open_locked(dev);
-
-		mutex_unlock(&priv->state_lock);
-		break;
-	default:
-		err = -EINVAL;
-		break;
-	}
-
-	return err;
-}
-
-static void mlx5e_get_pauseparam(struct net_device *netdev,
-				 struct ethtool_pauseparam *pauseparam)
-{
-	struct mlx5e_priv *priv    = netdev_priv(netdev);
-	struct mlx5_core_dev *mdev = priv->mdev;
-	int err;
-
-	err = mlx5_query_port_pause(mdev, &pauseparam->rx_pause,
-				    &pauseparam->tx_pause);
-	if (err) {
-		netdev_err(netdev, "%s: mlx5_query_port_pause failed:0x%x\n",
-			   __func__, err);
-	}
-}
-
-static int mlx5e_set_pauseparam(struct net_device *netdev,
-				struct ethtool_pauseparam *pauseparam)
-{
-	struct mlx5e_priv *priv    = netdev_priv(netdev);
-	struct mlx5_core_dev *mdev = priv->mdev;
-	int err;
-
-	if (pauseparam->autoneg)
-		return -EINVAL;
-
-	err = mlx5_set_port_pause(mdev,
-				  pauseparam->rx_pause ? 1 : 0,
-				  pauseparam->tx_pause ? 1 : 0);
-	if (err) {
-		netdev_err(netdev, "%s: mlx5_set_port_pause failed:0x%x\n",
-			   __func__, err);
-	}
-
-	return err;
-}
-
-static int mlx5e_get_ts_info(struct net_device *dev,
-			     struct ethtool_ts_info *info)
-{
-	struct mlx5e_priv *priv = netdev_priv(dev);
-	int ret;
-
-	ret = ethtool_op_get_ts_info(dev, info);
-	if (ret)
-		return ret;
-
-	info->phc_index = priv->tstamp.ptp ?
-			  ptp_clock_index(priv->tstamp.ptp) : -1;
-
-	if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
-		return 0;
-
-	info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
-				 SOF_TIMESTAMPING_RX_HARDWARE |
-				 SOF_TIMESTAMPING_RAW_HARDWARE;
-
-	info->tx_types = (BIT(1) << HWTSTAMP_TX_OFF) |
-			 (BIT(1) << HWTSTAMP_TX_ON);
-
-	info->rx_filters = (BIT(1) << HWTSTAMP_FILTER_NONE) |
-			   (BIT(1) << HWTSTAMP_FILTER_ALL);
-
-	return 0;
-}
-
-static __u32 mlx5e_get_wol_supported(struct mlx5_core_dev *mdev)
-{
-	__u32 ret = 0;
-
-	if (MLX5_CAP_GEN(mdev, wol_g))
-		ret |= WAKE_MAGIC;
-
-	if (MLX5_CAP_GEN(mdev, wol_s))
-		ret |= WAKE_MAGICSECURE;
-
-	if (MLX5_CAP_GEN(mdev, wol_a))
-		ret |= WAKE_ARP;
-
-	if (MLX5_CAP_GEN(mdev, wol_b))
-		ret |= WAKE_BCAST;
-
-	if (MLX5_CAP_GEN(mdev, wol_m))
-		ret |= WAKE_MCAST;
-
-	if (MLX5_CAP_GEN(mdev, wol_u))
-		ret |= WAKE_UCAST;
-
-	if (MLX5_CAP_GEN(mdev, wol_p))
-		ret |= WAKE_PHY;
-
-	return ret;
-}
-
-static __u32 mlx5e_refomrat_wol_mode_mlx5_to_linux(u8 mode)
-{
-	__u32 ret = 0;
-
-	if (mode & MLX5_WOL_MAGIC)
-		ret |= WAKE_MAGIC;
-
-	if (mode & MLX5_WOL_SECURED_MAGIC)
-		ret |= WAKE_MAGICSECURE;
-
-	if (mode & MLX5_WOL_ARP)
-		ret |= WAKE_ARP;
-
-	if (mode & MLX5_WOL_BROADCAST)
-		ret |= WAKE_BCAST;
-
-	if (mode & MLX5_WOL_MULTICAST)
-		ret |= WAKE_MCAST;
-
-	if (mode & MLX5_WOL_UNICAST)
-		ret |= WAKE_UCAST;
-
-	if (mode & MLX5_WOL_PHY_ACTIVITY)
-		ret |= WAKE_PHY;
-
-	return ret;
-}
-
-static u8 mlx5e_refomrat_wol_mode_linux_to_mlx5(__u32 mode)
-{
-	u8 ret = 0;
-
-	if (mode & WAKE_MAGIC)
-		ret |= MLX5_WOL_MAGIC;
-
-	if (mode & WAKE_MAGICSECURE)
-		ret |= MLX5_WOL_SECURED_MAGIC;
-
-	if (mode & WAKE_ARP)
-		ret |= MLX5_WOL_ARP;
-
-	if (mode & WAKE_BCAST)
-		ret |= MLX5_WOL_BROADCAST;
-
-	if (mode & WAKE_MCAST)
-		ret |= MLX5_WOL_MULTICAST;
-
-	if (mode & WAKE_UCAST)
-		ret |= MLX5_WOL_UNICAST;
-
-	if (mode & WAKE_PHY)
-		ret |= MLX5_WOL_PHY_ACTIVITY;
-
-	return ret;
-}
-
-static void mlx5e_get_wol(struct net_device *netdev,
-			  struct ethtool_wolinfo *wol)
-{
-	struct mlx5e_priv *priv = netdev_priv(netdev);
-	struct mlx5_core_dev *mdev = priv->mdev;
-	u8 mlx5_wol_mode;
-	int err;
-
-	memset(wol, 0, sizeof(*wol));
-
-	wol->supported = mlx5e_get_wol_supported(mdev);
-	if (!wol->supported)
-		return;
-
-	err = mlx5_query_port_wol(mdev, &mlx5_wol_mode);
-	if (err)
-		return;
-
-	wol->wolopts = mlx5e_refomrat_wol_mode_mlx5_to_linux(mlx5_wol_mode);
-}
-
-static int mlx5e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
-{
-	struct mlx5e_priv *priv = netdev_priv(netdev);
-	struct mlx5_core_dev *mdev = priv->mdev;
-	__u32 wol_supported = mlx5e_get_wol_supported(mdev);
-	u32 mlx5_wol_mode;
-
-	if (!wol_supported)
-		return -ENOTSUPP;
-
-	if (wol->wolopts & ~wol_supported)
-		return -EINVAL;
-
-	mlx5_wol_mode = mlx5e_refomrat_wol_mode_linux_to_mlx5(wol->wolopts);
-
-	return mlx5_set_port_wol(mdev, mlx5_wol_mode);
-}
-
-static int mlx5e_set_phys_id(struct net_device *dev,
-			     enum ethtool_phys_id_state state)
-{
-	struct mlx5e_priv *priv = netdev_priv(dev);
-	struct mlx5_core_dev *mdev = priv->mdev;
-	u16 beacon_duration;
-
-	if (!MLX5_CAP_GEN(mdev, beacon_led))
-		return -EOPNOTSUPP;
-
-	switch (state) {
-	case ETHTOOL_ID_ACTIVE:
-		beacon_duration = MLX5_BEACON_DURATION_INF;
-		break;
-	case ETHTOOL_ID_INACTIVE:
-		beacon_duration = MLX5_BEACON_DURATION_OFF;
-		break;
-	default:
-		return -EOPNOTSUPP;
-	}
-
-	return mlx5_set_port_beacon(mdev, beacon_duration);
-}
-
-static int mlx5e_get_module_info(struct net_device *netdev,
-				 struct ethtool_modinfo *modinfo)
-{
-	struct mlx5e_priv *priv = netdev_priv(netdev);
-	struct mlx5_core_dev *dev = priv->mdev;
-	int size_read = 0;
-	u8 data[4];
-
-	size_read = mlx5_query_module_eeprom(dev, 0, 2, data);
-	if (size_read < 2)
-		return -EIO;
-
-	/* data[0] = identifier byte */
-	switch (data[0]) {
-	case MLX5_MODULE_ID_QSFP:
-		modinfo->type       = ETH_MODULE_SFF_8436;
-		modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
-		break;
-	case MLX5_MODULE_ID_QSFP_PLUS:
-	case MLX5_MODULE_ID_QSFP28:
-		/* data[1] = revision id */
-		if (data[0] == MLX5_MODULE_ID_QSFP28 || data[1] >= 0x3) {
-			modinfo->type       = ETH_MODULE_SFF_8636;
-			modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
-		} else {
-			modinfo->type       = ETH_MODULE_SFF_8436;
-			modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
-		}
-		break;
-	case MLX5_MODULE_ID_SFP:
-		modinfo->type       = ETH_MODULE_SFF_8472;
-		modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
-		break;
-	default:
-		netdev_err(priv->netdev, "%s: cable type not recognized:0x%x\n",
-			   __func__, data[0]);
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static int mlx5e_get_module_eeprom(struct net_device *netdev,
-				   struct ethtool_eeprom *ee,
-				   u8 *data)
-{
-	struct mlx5e_priv *priv = netdev_priv(netdev);
-	struct mlx5_core_dev *mdev = priv->mdev;
-	int offset = ee->offset;
-	int size_read;
-	int i = 0;
-
-	if (!ee->len)
-		return -EINVAL;
-
-	memset(data, 0, ee->len);
-
-	while (i < ee->len) {
-		size_read = mlx5_query_module_eeprom(mdev, offset, ee->len - i,
-						     data + i);
-
-		if (!size_read)
-			/* Done reading */
-			return 0;
-
-		if (size_read < 0) {
-			netdev_err(priv->netdev, "%s: mlx5_query_eeprom failed:0x%x\n",
-				   __func__, size_read);
-			return 0;
-		}
-
-		i += size_read;
-		offset += size_read;
-	}
-
-	return 0;
-}
-
-typedef int (*mlx5e_pflag_handler)(struct net_device *netdev, bool enable);
-
-static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable)
-{
-	struct mlx5e_priv *priv = netdev_priv(netdev);
-	struct mlx5_core_dev *mdev = priv->mdev;
-	bool rx_mode_changed;
-	u8 rx_cq_period_mode;
-	int err = 0;
-	bool reset;
-
-	rx_cq_period_mode = enable ?
-		MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
-		MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
-	rx_mode_changed = rx_cq_period_mode != priv->params.rx_cq_period_mode;
-
-	if (rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE &&
-	    !MLX5_CAP_GEN(mdev, cq_period_start_from_cqe))
-		return -ENOTSUPP;
-
-	if (!rx_mode_changed)
-		return 0;
-
-	reset = test_bit(MLX5E_STATE_OPENED, &priv->state);
-	if (reset)
-		mlx5e_close_locked(netdev);
-
-	mlx5e_set_rx_cq_mode_params(&priv->params, rx_cq_period_mode);
-
-	if (reset)
-		err = mlx5e_open_locked(netdev);
-
-	return err;
-}
-
-static int set_pflag_rx_cqe_compress(struct net_device *netdev,
-				     bool enable)
-{
-	struct mlx5e_priv *priv = netdev_priv(netdev);
-	struct mlx5_core_dev *mdev = priv->mdev;
-	int err = 0;
-	bool reset;
-
-	if (!MLX5_CAP_GEN(mdev, cqe_compression))
-		return -ENOTSUPP;
-
-	if (enable && priv->tstamp.hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE) {
-		netdev_err(netdev, "Can't enable cqe compression while timestamping is enabled.\n");
-		return -EINVAL;
-	}
-
-	reset = test_bit(MLX5E_STATE_OPENED, &priv->state);
-
-	if (reset)
-		mlx5e_close_locked(netdev);
-
-	MLX5E_SET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS, enable);
-	priv->params.rx_cqe_compress_def = enable;
-
-	if (reset)
-		err = mlx5e_open_locked(netdev);
-	return err;
-}
-
-static int mlx5e_handle_pflag(struct net_device *netdev,
-			      u32 wanted_flags,
-			      enum mlx5e_priv_flag flag,
-			      mlx5e_pflag_handler pflag_handler)
-{
-	struct mlx5e_priv *priv = netdev_priv(netdev);
-	bool enable = !!(wanted_flags & flag);
-	u32 changes = wanted_flags ^ priv->params.pflags;
-	int err;
-
-	if (!(changes & flag))
-		return 0;
-
-	err = pflag_handler(netdev, enable);
-	if (err) {
-		netdev_err(netdev, "%s private flag 0x%x failed err %d\n",
-			   enable ? "Enable" : "Disable", flag, err);
-		return err;
-	}
-
-	MLX5E_SET_PFLAG(priv, flag, enable);
-	return 0;
-}
-
-static int mlx5e_set_priv_flags(struct net_device *netdev, u32 pflags)
-{
-	struct mlx5e_priv *priv = netdev_priv(netdev);
-	int err;
-
-	mutex_lock(&priv->state_lock);
-	err = mlx5e_handle_pflag(netdev, pflags,
-				 MLX5E_PFLAG_RX_CQE_BASED_MODER,
-				 set_pflag_rx_cqe_based_moder);
-	if (err)
-		goto out;
-
-	err = mlx5e_handle_pflag(netdev, pflags,
-				 MLX5E_PFLAG_RX_CQE_COMPRESS,
-				 set_pflag_rx_cqe_compress);
-
-out:
-	mutex_unlock(&priv->state_lock);
-	return err;
-}
-
-static u32 mlx5e_get_priv_flags(struct net_device *netdev)
-{
-	struct mlx5e_priv *priv = netdev_priv(netdev);
-
-	return priv->params.pflags;
-}
-
-static int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
-{
-	int err = 0;
-	struct mlx5e_priv *priv = netdev_priv(dev);
-
-	switch (cmd->cmd) {
-	case ETHTOOL_SRXCLSRLINS:
-		err = mlx5e_ethtool_flow_replace(priv, &cmd->fs);
-		break;
-	case ETHTOOL_SRXCLSRLDEL:
-		err = mlx5e_ethtool_flow_remove(priv, cmd->fs.location);
-		break;
-	default:
-		err = -EOPNOTSUPP;
-		break;
-	}
-
-	return err;
-}
-
-const struct ethtool_ops mlx5e_ethtool_ops = {
-	.get_drvinfo       = mlx5e_get_drvinfo,
-	.get_link          = ethtool_op_get_link,
-	.get_strings       = mlx5e_get_strings,
-	.get_sset_count    = mlx5e_get_sset_count,
-	.get_ethtool_stats = mlx5e_get_ethtool_stats,
-	.get_ringparam     = mlx5e_get_ringparam,
-	.set_ringparam     = mlx5e_set_ringparam,
-	.get_channels      = mlx5e_get_channels,
-	.set_channels      = mlx5e_set_channels,
-	.get_coalesce      = mlx5e_get_coalesce,
-	.set_coalesce      = mlx5e_set_coalesce,
-	.get_link_ksettings  = mlx5e_get_link_ksettings,
-	.set_link_ksettings  = mlx5e_set_link_ksettings,
-	.get_rxfh_key_size   = mlx5e_get_rxfh_key_size,
-	.get_rxfh_indir_size = mlx5e_get_rxfh_indir_size,
-	.get_rxfh          = mlx5e_get_rxfh,
-	.set_rxfh          = mlx5e_set_rxfh,
-	.get_rxnfc         = mlx5e_get_rxnfc,
-	.set_rxnfc         = mlx5e_set_rxnfc,
-	.get_tunable       = mlx5e_get_tunable,
-	.set_tunable       = mlx5e_set_tunable,
-	.get_pauseparam    = mlx5e_get_pauseparam,
-	.set_pauseparam    = mlx5e_set_pauseparam,
-	.get_ts_info       = mlx5e_get_ts_info,
-	.set_phys_id       = mlx5e_set_phys_id,
-	.get_wol	   = mlx5e_get_wol,
-	.set_wol	   = mlx5e_set_wol,
-	.get_module_info   = mlx5e_get_module_info,
-	.get_module_eeprom = mlx5e_get_module_eeprom,
-	.get_priv_flags    = mlx5e_get_priv_flags,
-	.set_priv_flags    = mlx5e_set_priv_flags,
-	.self_test         = mlx5e_self_test,
-};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
deleted file mode 100644
index 1fe80de..0000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ /dev/null
@@ -1,1143 +0,0 @@
-/*
- * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/list.h>
-#include <linux/ip.h>
-#include <linux/ipv6.h>
-#include <linux/tcp.h>
-#include <linux/mlx5/fs.h>
-#include "en.h"
-
-static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
-				  struct mlx5e_l2_rule *ai, int type);
-static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
-				   struct mlx5e_l2_rule *ai);
-
-enum {
-	MLX5E_FULLMATCH = 0,
-	MLX5E_ALLMULTI  = 1,
-	MLX5E_PROMISC   = 2,
-};
-
-enum {
-	MLX5E_UC        = 0,
-	MLX5E_MC_IPV4   = 1,
-	MLX5E_MC_IPV6   = 2,
-	MLX5E_MC_OTHER  = 3,
-};
-
-enum {
-	MLX5E_ACTION_NONE = 0,
-	MLX5E_ACTION_ADD  = 1,
-	MLX5E_ACTION_DEL  = 2,
-};
-
-struct mlx5e_l2_hash_node {
-	struct hlist_node          hlist;
-	u8                         action;
-	struct mlx5e_l2_rule ai;
-};
-
-static inline int mlx5e_hash_l2(u8 *addr)
-{
-	return addr[5];
-}
-
-static void mlx5e_add_l2_to_hash(struct hlist_head *hash, u8 *addr)
-{
-	struct mlx5e_l2_hash_node *hn;
-	int ix = mlx5e_hash_l2(addr);
-	int found = 0;
-
-	hlist_for_each_entry(hn, &hash[ix], hlist)
-		if (ether_addr_equal_64bits(hn->ai.addr, addr)) {
-			found = 1;
-			break;
-		}
-
-	if (found) {
-		hn->action = MLX5E_ACTION_NONE;
-		return;
-	}
-
-	hn = kzalloc(sizeof(*hn), GFP_ATOMIC);
-	if (!hn)
-		return;
-
-	ether_addr_copy(hn->ai.addr, addr);
-	hn->action = MLX5E_ACTION_ADD;
-
-	hlist_add_head(&hn->hlist, &hash[ix]);
-}
-
-static void mlx5e_del_l2_from_hash(struct mlx5e_l2_hash_node *hn)
-{
-	hlist_del(&hn->hlist);
-	kfree(hn);
-}
-
-static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
-{
-	struct net_device *ndev = priv->netdev;
-	int max_list_size;
-	int list_size;
-	u16 *vlans;
-	int vlan;
-	int err;
-	int i;
-
-	list_size = 0;
-	for_each_set_bit(vlan, priv->fs.vlan.active_vlans, VLAN_N_VID)
-		list_size++;
-
-	max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list);
-
-	if (list_size > max_list_size) {
-		netdev_warn(ndev,
-			    "netdev vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n",
-			    list_size, max_list_size);
-		list_size = max_list_size;
-	}
-
-	vlans = kcalloc(list_size, sizeof(*vlans), GFP_KERNEL);
-	if (!vlans)
-		return -ENOMEM;
-
-	i = 0;
-	for_each_set_bit(vlan, priv->fs.vlan.active_vlans, VLAN_N_VID) {
-		if (i >= list_size)
-			break;
-		vlans[i++] = vlan;
-	}
-
-	err = mlx5_modify_nic_vport_vlans(priv->mdev, vlans, list_size);
-	if (err)
-		netdev_err(ndev, "Failed to modify vport vlans list err(%d)\n",
-			   err);
-
-	kfree(vlans);
-	return err;
-}
-
-enum mlx5e_vlan_rule_type {
-	MLX5E_VLAN_RULE_TYPE_UNTAGGED,
-	MLX5E_VLAN_RULE_TYPE_ANY_VID,
-	MLX5E_VLAN_RULE_TYPE_MATCH_VID,
-};
-
-static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
-				 enum mlx5e_vlan_rule_type rule_type,
-				 u16 vid, struct mlx5_flow_spec *spec)
-{
-	struct mlx5_flow_act flow_act = {
-		.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
-		.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG,
-		.encap_id = 0,
-	};
-	struct mlx5_flow_table *ft = priv->fs.vlan.ft.t;
-	struct mlx5_flow_destination dest;
-	struct mlx5_flow_handle **rule_p;
-	int err = 0;
-
-	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
-	dest.ft = priv->fs.l2.ft.t;
-
-	spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
-	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag);
-
-	switch (rule_type) {
-	case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
-		rule_p = &priv->fs.vlan.untagged_rule;
-		break;
-	case MLX5E_VLAN_RULE_TYPE_ANY_VID:
-		rule_p = &priv->fs.vlan.any_vlan_rule;
-		MLX5_SET(fte_match_param, spec->match_value, outer_headers.vlan_tag, 1);
-		break;
-	default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
-		rule_p = &priv->fs.vlan.active_vlans_rule[vid];
-		MLX5_SET(fte_match_param, spec->match_value, outer_headers.vlan_tag, 1);
-		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
-				 outer_headers.first_vid);
-		MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid,
-			 vid);
-		break;
-	}
-
-	*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
-
-	if (IS_ERR(*rule_p)) {
-		err = PTR_ERR(*rule_p);
-		*rule_p = NULL;
-		netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
-	}
-
-	return err;
-}
-
-static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
-			       enum mlx5e_vlan_rule_type rule_type, u16 vid)
-{
-	struct mlx5_flow_spec *spec;
-	int err = 0;
-
-	spec = mlx5_vzalloc(sizeof(*spec));
-	if (!spec) {
-		netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
-		return -ENOMEM;
-	}
-
-	if (rule_type == MLX5E_VLAN_RULE_TYPE_MATCH_VID)
-		mlx5e_vport_context_update_vlans(priv);
-
-	err = __mlx5e_add_vlan_rule(priv, rule_type, vid, spec);
-
-	kvfree(spec);
-
-	return err;
-}
-
-static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
-				enum mlx5e_vlan_rule_type rule_type, u16 vid)
-{
-	switch (rule_type) {
-	case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
-		if (priv->fs.vlan.untagged_rule) {
-			mlx5_del_flow_rules(priv->fs.vlan.untagged_rule);
-			priv->fs.vlan.untagged_rule = NULL;
-		}
-		break;
-	case MLX5E_VLAN_RULE_TYPE_ANY_VID:
-		if (priv->fs.vlan.any_vlan_rule) {
-			mlx5_del_flow_rules(priv->fs.vlan.any_vlan_rule);
-			priv->fs.vlan.any_vlan_rule = NULL;
-		}
-		break;
-	case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
-		mlx5e_vport_context_update_vlans(priv);
-		if (priv->fs.vlan.active_vlans_rule[vid]) {
-			mlx5_del_flow_rules(priv->fs.vlan.active_vlans_rule[vid]);
-			priv->fs.vlan.active_vlans_rule[vid] = NULL;
-		}
-		mlx5e_vport_context_update_vlans(priv);
-		break;
-	}
-}
-
-void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
-{
-	if (!priv->fs.vlan.filter_disabled)
-		return;
-
-	priv->fs.vlan.filter_disabled = false;
-	if (priv->netdev->flags & IFF_PROMISC)
-		return;
-	mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
-}
-
-void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
-{
-	if (priv->fs.vlan.filter_disabled)
-		return;
-
-	priv->fs.vlan.filter_disabled = true;
-	if (priv->netdev->flags & IFF_PROMISC)
-		return;
-	mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
-}
-
-int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
-			  u16 vid)
-{
-	struct mlx5e_priv *priv = netdev_priv(dev);
-
-	set_bit(vid, priv->fs.vlan.active_vlans);
-
-	return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
-}
-
-int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
-			   u16 vid)
-{
-	struct mlx5e_priv *priv = netdev_priv(dev);
-
-	clear_bit(vid, priv->fs.vlan.active_vlans);
-
-	mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
-
-	return 0;
-}
-
-static void mlx5e_add_vlan_rules(struct mlx5e_priv *priv)
-{
-	int i;
-
-	mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
-
-	for_each_set_bit(i, priv->fs.vlan.active_vlans, VLAN_N_VID) {
-		mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, i);
-	}
-
-	if (priv->fs.vlan.filter_disabled &&
-	    !(priv->netdev->flags & IFF_PROMISC))
-		mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
-}
-
-static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv)
-{
-	int i;
-
-	mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
-
-	for_each_set_bit(i, priv->fs.vlan.active_vlans, VLAN_N_VID) {
-		mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, i);
-	}
-
-	if (priv->fs.vlan.filter_disabled &&
-	    !(priv->netdev->flags & IFF_PROMISC))
-		mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
-}
-
-#define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
-	for (i = 0; i < MLX5E_L2_ADDR_HASH_SIZE; i++) \
-		hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
-
-static void mlx5e_execute_l2_action(struct mlx5e_priv *priv,
-				    struct mlx5e_l2_hash_node *hn)
-{
-	switch (hn->action) {
-	case MLX5E_ACTION_ADD:
-		mlx5e_add_l2_flow_rule(priv, &hn->ai, MLX5E_FULLMATCH);
-		hn->action = MLX5E_ACTION_NONE;
-		break;
-
-	case MLX5E_ACTION_DEL:
-		mlx5e_del_l2_flow_rule(priv, &hn->ai);
-		mlx5e_del_l2_from_hash(hn);
-		break;
-	}
-}
-
-static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv)
-{
-	struct net_device *netdev = priv->netdev;
-	struct netdev_hw_addr *ha;
-
-	netif_addr_lock_bh(netdev);
-
-	mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc,
-			     priv->netdev->dev_addr);
-
-	netdev_for_each_uc_addr(ha, netdev)
-		mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc, ha->addr);
-
-	netdev_for_each_mc_addr(ha, netdev)
-		mlx5e_add_l2_to_hash(priv->fs.l2.netdev_mc, ha->addr);
-
-	netif_addr_unlock_bh(netdev);
-}
-
-static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type,
-				  u8 addr_array[][ETH_ALEN], int size)
-{
-	bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
-	struct net_device *ndev = priv->netdev;
-	struct mlx5e_l2_hash_node *hn;
-	struct hlist_head *addr_list;
-	struct hlist_node *tmp;
-	int i = 0;
-	int hi;
-
-	addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc;
-
-	if (is_uc) /* Make sure our own address is pushed first */
-		ether_addr_copy(addr_array[i++], ndev->dev_addr);
-	else if (priv->fs.l2.broadcast_enabled)
-		ether_addr_copy(addr_array[i++], ndev->broadcast);
-
-	mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) {
-		if (ether_addr_equal(ndev->dev_addr, hn->ai.addr))
-			continue;
-		if (i >= size)
-			break;
-		ether_addr_copy(addr_array[i++], hn->ai.addr);
-	}
-}
-
-static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
-						 int list_type)
-{
-	bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
-	struct mlx5e_l2_hash_node *hn;
-	u8 (*addr_array)[ETH_ALEN] = NULL;
-	struct hlist_head *addr_list;
-	struct hlist_node *tmp;
-	int max_size;
-	int size;
-	int err;
-	int hi;
-
-	size = is_uc ? 0 : (priv->fs.l2.broadcast_enabled ? 1 : 0);
-	max_size = is_uc ?
-		1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) :
-		1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list);
-
-	addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc;
-	mlx5e_for_each_hash_node(hn, tmp, addr_list, hi)
-		size++;
-
-	if (size > max_size) {
-		netdev_warn(priv->netdev,
-			    "netdev %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n",
-			    is_uc ? "UC" : "MC", size, max_size);
-		size = max_size;
-	}
-
-	if (size) {
-		addr_array = kcalloc(size, ETH_ALEN, GFP_KERNEL);
-		if (!addr_array) {
-			err = -ENOMEM;
-			goto out;
-		}
-		mlx5e_fill_addr_array(priv, list_type, addr_array, size);
-	}
-
-	err = mlx5_modify_nic_vport_mac_list(priv->mdev, list_type, addr_array, size);
-out:
-	if (err)
-		netdev_err(priv->netdev,
-			   "Failed to modify vport %s list err(%d)\n",
-			   is_uc ? "UC" : "MC", err);
-	kfree(addr_array);
-}
-
-static void mlx5e_vport_context_update(struct mlx5e_priv *priv)
-{
-	struct mlx5e_l2_table *ea = &priv->fs.l2;
-
-	mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_UC);
-	mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_MC);
-	mlx5_modify_nic_vport_promisc(priv->mdev, 0,
-				      ea->allmulti_enabled,
-				      ea->promisc_enabled);
-}
-
-static void mlx5e_apply_netdev_addr(struct mlx5e_priv *priv)
-{
-	struct mlx5e_l2_hash_node *hn;
-	struct hlist_node *tmp;
-	int i;
-
-	mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i)
-		mlx5e_execute_l2_action(priv, hn);
-
-	mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i)
-		mlx5e_execute_l2_action(priv, hn);
-}
-
-static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv)
-{
-	struct mlx5e_l2_hash_node *hn;
-	struct hlist_node *tmp;
-	int i;
-
-	mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i)
-		hn->action = MLX5E_ACTION_DEL;
-	mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i)
-		hn->action = MLX5E_ACTION_DEL;
-
-	if (!test_bit(MLX5E_STATE_DESTROYING, &priv->state))
-		mlx5e_sync_netdev_addr(priv);
-
-	mlx5e_apply_netdev_addr(priv);
-}
-
-void mlx5e_set_rx_mode_work(struct work_struct *work)
-{
-	struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
-					       set_rx_mode_work);
-
-	struct mlx5e_l2_table *ea = &priv->fs.l2;
-	struct net_device *ndev = priv->netdev;
-
-	bool rx_mode_enable   = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
-	bool promisc_enabled   = rx_mode_enable && (ndev->flags & IFF_PROMISC);
-	bool allmulti_enabled  = rx_mode_enable && (ndev->flags & IFF_ALLMULTI);
-	bool broadcast_enabled = rx_mode_enable;
-
-	bool enable_promisc    = !ea->promisc_enabled   &&  promisc_enabled;
-	bool disable_promisc   =  ea->promisc_enabled   && !promisc_enabled;
-	bool enable_allmulti   = !ea->allmulti_enabled  &&  allmulti_enabled;
-	bool disable_allmulti  =  ea->allmulti_enabled  && !allmulti_enabled;
-	bool enable_broadcast  = !ea->broadcast_enabled &&  broadcast_enabled;
-	bool disable_broadcast =  ea->broadcast_enabled && !broadcast_enabled;
-
-	if (enable_promisc) {
-		mlx5e_add_l2_flow_rule(priv, &ea->promisc, MLX5E_PROMISC);
-		if (!priv->fs.vlan.filter_disabled)
-			mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
-					    0);
-	}
-	if (enable_allmulti)
-		mlx5e_add_l2_flow_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
-	if (enable_broadcast)
-		mlx5e_add_l2_flow_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
-
-	mlx5e_handle_netdev_addr(priv);
-
-	if (disable_broadcast)
-		mlx5e_del_l2_flow_rule(priv, &ea->broadcast);
-	if (disable_allmulti)
-		mlx5e_del_l2_flow_rule(priv, &ea->allmulti);
-	if (disable_promisc) {
-		if (!priv->fs.vlan.filter_disabled)
-			mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
-					    0);
-		mlx5e_del_l2_flow_rule(priv, &ea->promisc);
-	}
-
-	ea->promisc_enabled   = promisc_enabled;
-	ea->allmulti_enabled  = allmulti_enabled;
-	ea->broadcast_enabled = broadcast_enabled;
-
-	mlx5e_vport_context_update(priv);
-}
-
-static void mlx5e_destroy_groups(struct mlx5e_flow_table *ft)
-{
-	int i;
-
-	for (i = ft->num_groups - 1; i >= 0; i--) {
-		if (!IS_ERR_OR_NULL(ft->g[i]))
-			mlx5_destroy_flow_group(ft->g[i]);
-		ft->g[i] = NULL;
-	}
-	ft->num_groups = 0;
-}
-
-void mlx5e_init_l2_addr(struct mlx5e_priv *priv)
-{
-	ether_addr_copy(priv->fs.l2.broadcast.addr, priv->netdev->broadcast);
-}
-
-void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
-{
-	mlx5e_destroy_groups(ft);
-	kfree(ft->g);
-	mlx5_destroy_flow_table(ft->t);
-	ft->t = NULL;
-}
-
-static void mlx5e_cleanup_ttc_rules(struct mlx5e_ttc_table *ttc)
-{
-	int i;
-
-	for (i = 0; i < MLX5E_NUM_TT; i++) {
-		if (!IS_ERR_OR_NULL(ttc->rules[i])) {
-			mlx5_del_flow_rules(ttc->rules[i]);
-			ttc->rules[i] = NULL;
-		}
-	}
-}
-
-static struct {
-	u16 etype;
-	u8 proto;
-} ttc_rules[] = {
-	[MLX5E_TT_IPV4_TCP] = {
-		.etype = ETH_P_IP,
-		.proto = IPPROTO_TCP,
-	},
-	[MLX5E_TT_IPV6_TCP] = {
-		.etype = ETH_P_IPV6,
-		.proto = IPPROTO_TCP,
-	},
-	[MLX5E_TT_IPV4_UDP] = {
-		.etype = ETH_P_IP,
-		.proto = IPPROTO_UDP,
-	},
-	[MLX5E_TT_IPV6_UDP] = {
-		.etype = ETH_P_IPV6,
-		.proto = IPPROTO_UDP,
-	},
-	[MLX5E_TT_IPV4_IPSEC_AH] = {
-		.etype = ETH_P_IP,
-		.proto = IPPROTO_AH,
-	},
-	[MLX5E_TT_IPV6_IPSEC_AH] = {
-		.etype = ETH_P_IPV6,
-		.proto = IPPROTO_AH,
-	},
-	[MLX5E_TT_IPV4_IPSEC_ESP] = {
-		.etype = ETH_P_IP,
-		.proto = IPPROTO_ESP,
-	},
-	[MLX5E_TT_IPV6_IPSEC_ESP] = {
-		.etype = ETH_P_IPV6,
-		.proto = IPPROTO_ESP,
-	},
-	[MLX5E_TT_IPV4] = {
-		.etype = ETH_P_IP,
-		.proto = 0,
-	},
-	[MLX5E_TT_IPV6] = {
-		.etype = ETH_P_IPV6,
-		.proto = 0,
-	},
-	[MLX5E_TT_ANY] = {
-		.etype = 0,
-		.proto = 0,
-	},
-};
-
-static struct mlx5_flow_handle *
-mlx5e_generate_ttc_rule(struct mlx5e_priv *priv,
-			struct mlx5_flow_table *ft,
-			struct mlx5_flow_destination *dest,
-			u16 etype,
-			u8 proto)
-{
-	struct mlx5_flow_act flow_act = {
-		.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
-		.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG,
-		.encap_id = 0,
-	};
-	struct mlx5_flow_handle *rule;
-	struct mlx5_flow_spec *spec;
-	int err = 0;
-
-	spec = mlx5_vzalloc(sizeof(*spec));
-	if (!spec) {
-		netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
-		return ERR_PTR(-ENOMEM);
-	}
-
-	if (proto) {
-		spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
-		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
-		MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, proto);
-	}
-	if (etype) {
-		spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
-		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype);
-		MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, etype);
-	}
-
-	rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
-	if (IS_ERR(rule)) {
-		err = PTR_ERR(rule);
-		netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
-	}
-
-	kvfree(spec);
-	return err ? ERR_PTR(err) : rule;
-}
-
-static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv)
-{
-	struct mlx5_flow_destination dest;
-	struct mlx5e_ttc_table *ttc;
-	struct mlx5_flow_handle **rules;
-	struct mlx5_flow_table *ft;
-	int tt;
-	int err;
-
-	ttc = &priv->fs.ttc;
-	ft = ttc->ft.t;
-	rules = ttc->rules;
-
-	dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
-	for (tt = 0; tt < MLX5E_NUM_TT; tt++) {
-		if (tt == MLX5E_TT_ANY)
-			dest.tir_num = priv->direct_tir[0].tirn;
-		else
-			dest.tir_num = priv->indir_tir[tt].tirn;
-		rules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest,
-						    ttc_rules[tt].etype,
-						    ttc_rules[tt].proto);
-		if (IS_ERR(rules[tt]))
-			goto del_rules;
-	}
-
-	return 0;
-
-del_rules:
-	err = PTR_ERR(rules[tt]);
-	rules[tt] = NULL;
-	mlx5e_cleanup_ttc_rules(ttc);
-	return err;
-}
-
-#define MLX5E_TTC_NUM_GROUPS	3
-#define MLX5E_TTC_GROUP1_SIZE	BIT(3)
-#define MLX5E_TTC_GROUP2_SIZE	BIT(1)
-#define MLX5E_TTC_GROUP3_SIZE	BIT(0)
-#define MLX5E_TTC_TABLE_SIZE	(MLX5E_TTC_GROUP1_SIZE +\
-				 MLX5E_TTC_GROUP2_SIZE +\
-				 MLX5E_TTC_GROUP3_SIZE)
-static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc)
-{
-	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
-	struct mlx5e_flow_table *ft = &ttc->ft;
-	int ix = 0;
-	u32 *in;
-	int err;
-	u8 *mc;
-
-	ft->g = kcalloc(MLX5E_TTC_NUM_GROUPS,
-			sizeof(*ft->g), GFP_KERNEL);
-	if (!ft->g)
-		return -ENOMEM;
-	in = mlx5_vzalloc(inlen);
-	if (!in) {
-		kfree(ft->g);
-		return -ENOMEM;
-	}
-
-	/* L4 Group */
-	mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
-	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
-	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
-	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
-	MLX5_SET_CFG(in, start_flow_index, ix);
-	ix += MLX5E_TTC_GROUP1_SIZE;
-	MLX5_SET_CFG(in, end_flow_index, ix - 1);
-	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
-	if (IS_ERR(ft->g[ft->num_groups]))
-		goto err;
-	ft->num_groups++;
-
-	/* L3 Group */
-	MLX5_SET(fte_match_param, mc, outer_headers.ip_protocol, 0);
-	MLX5_SET_CFG(in, start_flow_index, ix);
-	ix += MLX5E_TTC_GROUP2_SIZE;
-	MLX5_SET_CFG(in, end_flow_index, ix - 1);
-	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
-	if (IS_ERR(ft->g[ft->num_groups]))
-		goto err;
-	ft->num_groups++;
-
-	/* Any Group */
-	memset(in, 0, inlen);
-	MLX5_SET_CFG(in, start_flow_index, ix);
-	ix += MLX5E_TTC_GROUP3_SIZE;
-	MLX5_SET_CFG(in, end_flow_index, ix - 1);
-	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
-	if (IS_ERR(ft->g[ft->num_groups]))
-		goto err;
-	ft->num_groups++;
-
-	kvfree(in);
-	return 0;
-
-err:
-	err = PTR_ERR(ft->g[ft->num_groups]);
-	ft->g[ft->num_groups] = NULL;
-	kvfree(in);
-
-	return err;
-}
-
-static void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv)
-{
-	struct mlx5e_ttc_table *ttc = &priv->fs.ttc;
-
-	mlx5e_cleanup_ttc_rules(ttc);
-	mlx5e_destroy_flow_table(&ttc->ft);
-}
-
-static int mlx5e_create_ttc_table(struct mlx5e_priv *priv)
-{
-	struct mlx5e_ttc_table *ttc = &priv->fs.ttc;
-	struct mlx5e_flow_table *ft = &ttc->ft;
-	int err;
-
-	ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO,
-				       MLX5E_TTC_TABLE_SIZE, MLX5E_TTC_FT_LEVEL, 0);
-	if (IS_ERR(ft->t)) {
-		err = PTR_ERR(ft->t);
-		ft->t = NULL;
-		return err;
-	}
-
-	err = mlx5e_create_ttc_table_groups(ttc);
-	if (err)
-		goto err;
-
-	err = mlx5e_generate_ttc_table_rules(priv);
-	if (err)
-		goto err;
-
-	return 0;
-err:
-	mlx5e_destroy_flow_table(ft);
-	return err;
-}
-
-static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
-				   struct mlx5e_l2_rule *ai)
-{
-	if (!IS_ERR_OR_NULL(ai->rule)) {
-		mlx5_del_flow_rules(ai->rule);
-		ai->rule = NULL;
-	}
-}
-
-static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
-				  struct mlx5e_l2_rule *ai, int type)
-{
-	struct mlx5_flow_act flow_act = {
-		.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
-		.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG,
-		.encap_id = 0,
-	};
-	struct mlx5_flow_table *ft = priv->fs.l2.ft.t;
-	struct mlx5_flow_destination dest;
-	struct mlx5_flow_spec *spec;
-	int err = 0;
-	u8 *mc_dmac;
-	u8 *mv_dmac;
-
-	spec = mlx5_vzalloc(sizeof(*spec));
-	if (!spec) {
-		netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
-		return -ENOMEM;
-	}
-
-	mc_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
-			       outer_headers.dmac_47_16);
-	mv_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_value,
-			       outer_headers.dmac_47_16);
-
-	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
-	dest.ft = priv->fs.ttc.ft.t;
-
-	switch (type) {
-	case MLX5E_FULLMATCH:
-		spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
-		eth_broadcast_addr(mc_dmac);
-		ether_addr_copy(mv_dmac, ai->addr);
-		break;
-
-	case MLX5E_ALLMULTI:
-		spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
-		mc_dmac[0] = 0x01;
-		mv_dmac[0] = 0x01;
-		break;
-
-	case MLX5E_PROMISC:
-		break;
-	}
-
-	ai->rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
-	if (IS_ERR(ai->rule)) {
-		netdev_err(priv->netdev, "%s: add l2 rule(mac:%pM) failed\n",
-			   __func__, mv_dmac);
-		err = PTR_ERR(ai->rule);
-		ai->rule = NULL;
-	}
-
-	kvfree(spec);
-
-	return err;
-}
-
-#define MLX5E_NUM_L2_GROUPS	   3
-#define MLX5E_L2_GROUP1_SIZE	   BIT(0)
-#define MLX5E_L2_GROUP2_SIZE	   BIT(15)
-#define MLX5E_L2_GROUP3_SIZE	   BIT(0)
-#define MLX5E_L2_TABLE_SIZE	   (MLX5E_L2_GROUP1_SIZE +\
-				    MLX5E_L2_GROUP2_SIZE +\
-				    MLX5E_L2_GROUP3_SIZE)
-static int mlx5e_create_l2_table_groups(struct mlx5e_l2_table *l2_table)
-{
-	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
-	struct mlx5e_flow_table *ft = &l2_table->ft;
-	int ix = 0;
-	u8 *mc_dmac;
-	u32 *in;
-	int err;
-	u8 *mc;
-
-	ft->g = kcalloc(MLX5E_NUM_L2_GROUPS, sizeof(*ft->g), GFP_KERNEL);
-	if (!ft->g)
-		return -ENOMEM;
-	in = mlx5_vzalloc(inlen);
-	if (!in) {
-		kfree(ft->g);
-		return -ENOMEM;
-	}
-
-	mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
-	mc_dmac = MLX5_ADDR_OF(fte_match_param, mc,
-			       outer_headers.dmac_47_16);
-	/* Flow Group for promiscuous */
-	MLX5_SET_CFG(in, start_flow_index, ix);
-	ix += MLX5E_L2_GROUP1_SIZE;
-	MLX5_SET_CFG(in, end_flow_index, ix - 1);
-	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
-	if (IS_ERR(ft->g[ft->num_groups]))
-		goto err_destroy_groups;
-	ft->num_groups++;
-
-	/* Flow Group for full match */
-	eth_broadcast_addr(mc_dmac);
-	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
-	MLX5_SET_CFG(in, start_flow_index, ix);
-	ix += MLX5E_L2_GROUP2_SIZE;
-	MLX5_SET_CFG(in, end_flow_index, ix - 1);
-	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
-	if (IS_ERR(ft->g[ft->num_groups]))
-		goto err_destroy_groups;
-	ft->num_groups++;
-
-	/* Flow Group for allmulti */
-	eth_zero_addr(mc_dmac);
-	mc_dmac[0] = 0x01;
-	MLX5_SET_CFG(in, start_flow_index, ix);
-	ix += MLX5E_L2_GROUP3_SIZE;
-	MLX5_SET_CFG(in, end_flow_index, ix - 1);
-	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
-	if (IS_ERR(ft->g[ft->num_groups]))
-		goto err_destroy_groups;
-	ft->num_groups++;
-
-	kvfree(in);
-	return 0;
-
-err_destroy_groups:
-	err = PTR_ERR(ft->g[ft->num_groups]);
-	ft->g[ft->num_groups] = NULL;
-	mlx5e_destroy_groups(ft);
-	kvfree(in);
-
-	return err;
-}
-
-static void mlx5e_destroy_l2_table(struct mlx5e_priv *priv)
-{
-	mlx5e_destroy_flow_table(&priv->fs.l2.ft);
-}
-
-static int mlx5e_create_l2_table(struct mlx5e_priv *priv)
-{
-	struct mlx5e_l2_table *l2_table = &priv->fs.l2;
-	struct mlx5e_flow_table *ft = &l2_table->ft;
-	int err;
-
-	ft->num_groups = 0;
-	ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO,
-				       MLX5E_L2_TABLE_SIZE, MLX5E_L2_FT_LEVEL, 0);
-
-	if (IS_ERR(ft->t)) {
-		err = PTR_ERR(ft->t);
-		ft->t = NULL;
-		return err;
-	}
-
-	err = mlx5e_create_l2_table_groups(l2_table);
-	if (err)
-		goto err_destroy_flow_table;
-
-	return 0;
-
-err_destroy_flow_table:
-	mlx5_destroy_flow_table(ft->t);
-	ft->t = NULL;
-
-	return err;
-}
-
-#define MLX5E_NUM_VLAN_GROUPS	2
-#define MLX5E_VLAN_GROUP0_SIZE	BIT(12)
-#define MLX5E_VLAN_GROUP1_SIZE	BIT(1)
-#define MLX5E_VLAN_TABLE_SIZE	(MLX5E_VLAN_GROUP0_SIZE +\
-				 MLX5E_VLAN_GROUP1_SIZE)
-
-static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in,
-					    int inlen)
-{
-	int err;
-	int ix = 0;
-	u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
-
-	memset(in, 0, inlen);
-	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
-	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag);
-	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
-	MLX5_SET_CFG(in, start_flow_index, ix);
-	ix += MLX5E_VLAN_GROUP0_SIZE;
-	MLX5_SET_CFG(in, end_flow_index, ix - 1);
-	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
-	if (IS_ERR(ft->g[ft->num_groups]))
-		goto err_destroy_groups;
-	ft->num_groups++;
-
-	memset(in, 0, inlen);
-	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
-	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag);
-	MLX5_SET_CFG(in, start_flow_index, ix);
-	ix += MLX5E_VLAN_GROUP1_SIZE;
-	MLX5_SET_CFG(in, end_flow_index, ix - 1);
-	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
-	if (IS_ERR(ft->g[ft->num_groups]))
-		goto err_destroy_groups;
-	ft->num_groups++;
-
-	return 0;
-
-err_destroy_groups:
-	err = PTR_ERR(ft->g[ft->num_groups]);
-	ft->g[ft->num_groups] = NULL;
-	mlx5e_destroy_groups(ft);
-
-	return err;
-}
-
-static int mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft)
-{
-	u32 *in;
-	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
-	int err;
-
-	in = mlx5_vzalloc(inlen);
-	if (!in)
-		return -ENOMEM;
-
-	err = __mlx5e_create_vlan_table_groups(ft, in, inlen);
-
-	kvfree(in);
-	return err;
-}
-
-static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
-{
-	struct mlx5e_flow_table *ft = &priv->fs.vlan.ft;
-	int err;
-
-	ft->num_groups = 0;
-	ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO,
-				       MLX5E_VLAN_TABLE_SIZE, MLX5E_VLAN_FT_LEVEL, 0);
-
-	if (IS_ERR(ft->t)) {
-		err = PTR_ERR(ft->t);
-		ft->t = NULL;
-		return err;
-	}
-	ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
-	if (!ft->g) {
-		err = -ENOMEM;
-		goto err_destroy_vlan_table;
-	}
-
-	err = mlx5e_create_vlan_table_groups(ft);
-	if (err)
-		goto err_free_g;
-
-	mlx5e_add_vlan_rules(priv);
-
-	return 0;
-
-err_free_g:
-	kfree(ft->g);
-err_destroy_vlan_table:
-	mlx5_destroy_flow_table(ft->t);
-	ft->t = NULL;
-
-	return err;
-}
-
-static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv)
-{
-	mlx5e_del_vlan_rules(priv);
-	mlx5e_destroy_flow_table(&priv->fs.vlan.ft);
-}
-
-int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
-{
-	int err;
-
-	priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
-					       MLX5_FLOW_NAMESPACE_KERNEL);
-
-	if (!priv->fs.ns)
-		return -EINVAL;
-
-	err = mlx5e_arfs_create_tables(priv);
-	if (err) {
-		netdev_err(priv->netdev, "Failed to create arfs tables, err=%d\n",
-			   err);
-		priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
-	}
-
-	err = mlx5e_create_ttc_table(priv);
-	if (err) {
-		netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
-			   err);
-		goto err_destroy_arfs_tables;
-	}
-
-	err = mlx5e_create_l2_table(priv);
-	if (err) {
-		netdev_err(priv->netdev, "Failed to create l2 table, err=%d\n",
-			   err);
-		goto err_destroy_ttc_table;
-	}
-
-	err = mlx5e_create_vlan_table(priv);
-	if (err) {
-		netdev_err(priv->netdev, "Failed to create vlan table, err=%d\n",
-			   err);
-		goto err_destroy_l2_table;
-	}
-
-	mlx5e_ethtool_init_steering(priv);
-
-	return 0;
-
-err_destroy_l2_table:
-	mlx5e_destroy_l2_table(priv);
-err_destroy_ttc_table:
-	mlx5e_destroy_ttc_table(priv);
-err_destroy_arfs_tables:
-	mlx5e_arfs_destroy_tables(priv);
-
-	return err;
-}
-
-void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv)
-{
-	mlx5e_destroy_vlan_table(priv);
-	mlx5e_destroy_l2_table(priv);
-	mlx5e_destroy_ttc_table(priv);
-	mlx5e_arfs_destroy_tables(priv);
-	mlx5e_ethtool_cleanup_steering(priv);
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
deleted file mode 100644
index 3691451..0000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ /dev/null
@@ -1,587 +0,0 @@
-/*
- * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/mlx5/fs.h>
-#include "en.h"
-
-struct mlx5e_ethtool_rule {
-	struct list_head             list;
-	struct ethtool_rx_flow_spec  flow_spec;
-	struct mlx5_flow_handle	     *rule;
-	struct mlx5e_ethtool_table   *eth_ft;
-};
-
-static void put_flow_table(struct mlx5e_ethtool_table *eth_ft)
-{
-	if (!--eth_ft->num_rules) {
-		mlx5_destroy_flow_table(eth_ft->ft);
-		eth_ft->ft = NULL;
-	}
-}
-
-#define MLX5E_ETHTOOL_L3_L4_PRIO 0
-#define MLX5E_ETHTOOL_L2_PRIO (MLX5E_ETHTOOL_L3_L4_PRIO + ETHTOOL_NUM_L3_L4_FTS)
-#define MLX5E_ETHTOOL_NUM_ENTRIES 64000
-#define MLX5E_ETHTOOL_NUM_GROUPS  10
-static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv,
-						  struct ethtool_rx_flow_spec *fs,
-						  int num_tuples)
-{
-	struct mlx5e_ethtool_table *eth_ft;
-	struct mlx5_flow_namespace *ns;
-	struct mlx5_flow_table *ft;
-	int max_tuples;
-	int table_size;
-	int prio;
-
-	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
-	case TCP_V4_FLOW:
-	case UDP_V4_FLOW:
-		max_tuples = ETHTOOL_NUM_L3_L4_FTS;
-		prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
-		eth_ft = &priv->fs.ethtool.l3_l4_ft[prio];
-		break;
-	case IP_USER_FLOW:
-		max_tuples = ETHTOOL_NUM_L3_L4_FTS;
-		prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
-		eth_ft = &priv->fs.ethtool.l3_l4_ft[prio];
-		break;
-	case ETHER_FLOW:
-		max_tuples = ETHTOOL_NUM_L2_FTS;
-		prio = max_tuples - num_tuples;
-		eth_ft = &priv->fs.ethtool.l2_ft[prio];
-		prio += MLX5E_ETHTOOL_L2_PRIO;
-		break;
-	default:
-		return ERR_PTR(-EINVAL);
-	}
-
-	eth_ft->num_rules++;
-	if (eth_ft->ft)
-		return eth_ft;
-
-	ns = mlx5_get_flow_namespace(priv->mdev,
-				     MLX5_FLOW_NAMESPACE_ETHTOOL);
-	if (!ns)
-		return ERR_PTR(-ENOTSUPP);
-
-	table_size = min_t(u32, BIT(MLX5_CAP_FLOWTABLE(priv->mdev,
-						       flow_table_properties_nic_receive.log_max_ft_size)),
-			   MLX5E_ETHTOOL_NUM_ENTRIES);
-	ft = mlx5_create_auto_grouped_flow_table(ns, prio,
-						 table_size,
-						 MLX5E_ETHTOOL_NUM_GROUPS, 0, 0);
-	if (IS_ERR(ft))
-		return (void *)ft;
-
-	eth_ft->ft = ft;
-	return eth_ft;
-}
-
-static void mask_spec(u8 *mask, u8 *val, size_t size)
-{
-	unsigned int i;
-
-	for (i = 0; i < size; i++, mask++, val++)
-		*((u8 *)val) = *((u8 *)mask) & *((u8 *)val);
-}
-
-static void set_ips(void *outer_headers_v, void *outer_headers_c, __be32 ip4src_m,
-		    __be32 ip4src_v, __be32 ip4dst_m, __be32 ip4dst_v)
-{
-	if (ip4src_m) {
-		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
-				    src_ipv4_src_ipv6.ipv4_layout.ipv4),
-		       &ip4src_v, sizeof(ip4src_v));
-		memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
-				    src_ipv4_src_ipv6.ipv4_layout.ipv4),
-		       0xff, sizeof(ip4src_m));
-	}
-	if (ip4dst_m) {
-		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
-				    dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
-		       &ip4dst_v, sizeof(ip4dst_v));
-		memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
-				    dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
-		       0xff, sizeof(ip4dst_m));
-	}
-	MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
-		 ethertype, ETH_P_IP);
-	MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
-		 ethertype, 0xffff);
-}
-
-static int set_flow_attrs(u32 *match_c, u32 *match_v,
-			  struct ethtool_rx_flow_spec *fs)
-{
-	void *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
-					     outer_headers);
-	void *outer_headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
-					     outer_headers);
-	u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
-	struct ethtool_tcpip4_spec *l4_mask;
-	struct ethtool_tcpip4_spec *l4_val;
-	struct ethtool_usrip4_spec *l3_mask;
-	struct ethtool_usrip4_spec *l3_val;
-	struct ethhdr *eth_val;
-	struct ethhdr *eth_mask;
-
-	switch (flow_type) {
-	case TCP_V4_FLOW:
-		l4_mask = &fs->m_u.tcp_ip4_spec;
-		l4_val = &fs->h_u.tcp_ip4_spec;
-		set_ips(outer_headers_v, outer_headers_c, l4_mask->ip4src,
-			l4_val->ip4src, l4_mask->ip4dst, l4_val->ip4dst);
-
-		if (l4_mask->psrc) {
-			MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, tcp_sport,
-				 0xffff);
-			MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, tcp_sport,
-				 ntohs(l4_val->psrc));
-		}
-		if (l4_mask->pdst) {
-			MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, tcp_dport,
-				 0xffff);
-			MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, tcp_dport,
-				 ntohs(l4_val->pdst));
-		}
-		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol,
-			 0xffff);
-		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ip_protocol,
-			 IPPROTO_TCP);
-		break;
-	case UDP_V4_FLOW:
-		l4_mask = &fs->m_u.tcp_ip4_spec;
-		l4_val = &fs->h_u.tcp_ip4_spec;
-		set_ips(outer_headers_v, outer_headers_c, l4_mask->ip4src,
-			l4_val->ip4src, l4_mask->ip4dst, l4_val->ip4dst);
-
-		if (l4_mask->psrc) {
-			MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, udp_sport,
-				 0xffff);
-			MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, udp_sport,
-				 ntohs(l4_val->psrc));
-		}
-		if (l4_mask->pdst) {
-			MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, udp_dport,
-				 0xffff);
-			MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, udp_dport,
-				 ntohs(l4_val->pdst));
-		}
-		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol,
-			 0xffff);
-		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ip_protocol,
-			 IPPROTO_UDP);
-		break;
-	case IP_USER_FLOW:
-		l3_mask = &fs->m_u.usr_ip4_spec;
-		l3_val = &fs->h_u.usr_ip4_spec;
-		set_ips(outer_headers_v, outer_headers_c, l3_mask->ip4src,
-			l3_val->ip4src, l3_mask->ip4dst, l3_val->ip4dst);
-		break;
-	case ETHER_FLOW:
-		eth_mask = &fs->m_u.ether_spec;
-		eth_val = &fs->h_u.ether_spec;
-
-		mask_spec((u8 *)eth_mask, (u8 *)eth_val, sizeof(*eth_mask));
-		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
-					     outer_headers_c, smac_47_16),
-				eth_mask->h_source);
-		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
-					     outer_headers_v, smac_47_16),
-				eth_val->h_source);
-		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
-					     outer_headers_c, dmac_47_16),
-				eth_mask->h_dest);
-		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
-					     outer_headers_v, dmac_47_16),
-				eth_val->h_dest);
-		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ethertype,
-			 ntohs(eth_mask->h_proto));
-		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ethertype,
-			 ntohs(eth_val->h_proto));
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	if ((fs->flow_type & FLOW_EXT) &&
-	    (fs->m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK))) {
-		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
-			 vlan_tag, 1);
-		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
-			 vlan_tag, 1);
-		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
-			 first_vid, 0xfff);
-		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
-			 first_vid, ntohs(fs->h_ext.vlan_tci));
-	}
-	if (fs->flow_type & FLOW_MAC_EXT &&
-	    !is_zero_ether_addr(fs->m_ext.h_dest)) {
-		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
-					     outer_headers_c, dmac_47_16),
-				fs->m_ext.h_dest);
-		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
-					     outer_headers_v, dmac_47_16),
-				fs->h_ext.h_dest);
-	}
-
-	return 0;
-}
-
-static void add_rule_to_list(struct mlx5e_priv *priv,
-			     struct mlx5e_ethtool_rule *rule)
-{
-	struct mlx5e_ethtool_rule *iter;
-	struct list_head *head = &priv->fs.ethtool.rules;
-
-	list_for_each_entry(iter, &priv->fs.ethtool.rules, list) {
-		if (iter->flow_spec.location > rule->flow_spec.location)
-			break;
-		head = &iter->list;
-	}
-	priv->fs.ethtool.tot_num_rules++;
-	list_add(&rule->list, head);
-}
-
-static bool outer_header_zero(u32 *match_criteria)
-{
-	int size = MLX5_ST_SZ_BYTES(fte_match_param);
-	char *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_criteria,
-					     outer_headers);
-
-	return outer_headers_c[0] == 0 && !memcmp(outer_headers_c,
-						  outer_headers_c + 1,
-						  size - 1);
-}
-
-static struct mlx5_flow_handle *
-add_ethtool_flow_rule(struct mlx5e_priv *priv,
-		      struct mlx5_flow_table *ft,
-		      struct ethtool_rx_flow_spec *fs)
-{
-	struct mlx5_flow_destination *dst = NULL;
-	struct mlx5_flow_act flow_act = {0};
-	struct mlx5_flow_spec *spec;
-	struct mlx5_flow_handle *rule;
-	int err = 0;
-
-	spec = mlx5_vzalloc(sizeof(*spec));
-	if (!spec)
-		return ERR_PTR(-ENOMEM);
-	err = set_flow_attrs(spec->match_criteria, spec->match_value,
-			     fs);
-	if (err)
-		goto free;
-
-	if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
-		flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
-	} else {
-		dst = kzalloc(sizeof(*dst), GFP_KERNEL);
-		if (!dst) {
-			err = -ENOMEM;
-			goto free;
-		}
-
-		dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR;
-		dst->tir_num = priv->direct_tir[fs->ring_cookie].tirn;
-		flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
-	}
-
-	spec->match_criteria_enable = (!outer_header_zero(spec->match_criteria));
-	flow_act.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
-	rule = mlx5_add_flow_rules(ft, spec, &flow_act, dst, 1);
-	if (IS_ERR(rule)) {
-		err = PTR_ERR(rule);
-		netdev_err(priv->netdev, "%s: failed to add ethtool steering rule: %d\n",
-			   __func__, err);
-		goto free;
-	}
-free:
-	kvfree(spec);
-	kfree(dst);
-	return err ? ERR_PTR(err) : rule;
-}
-
-static void del_ethtool_rule(struct mlx5e_priv *priv,
-			     struct mlx5e_ethtool_rule *eth_rule)
-{
-	if (eth_rule->rule)
-		mlx5_del_flow_rules(eth_rule->rule);
-	list_del(&eth_rule->list);
-	priv->fs.ethtool.tot_num_rules--;
-	put_flow_table(eth_rule->eth_ft);
-	kfree(eth_rule);
-}
-
-static struct mlx5e_ethtool_rule *find_ethtool_rule(struct mlx5e_priv *priv,
-						    int location)
-{
-	struct mlx5e_ethtool_rule *iter;
-
-	list_for_each_entry(iter, &priv->fs.ethtool.rules, list) {
-		if (iter->flow_spec.location == location)
-			return iter;
-	}
-	return NULL;
-}
-
-static struct mlx5e_ethtool_rule *get_ethtool_rule(struct mlx5e_priv *priv,
-						   int location)
-{
-	struct mlx5e_ethtool_rule *eth_rule;
-
-	eth_rule = find_ethtool_rule(priv, location);
-	if (eth_rule)
-		del_ethtool_rule(priv, eth_rule);
-
-	eth_rule = kzalloc(sizeof(*eth_rule), GFP_KERNEL);
-	if (!eth_rule)
-		return ERR_PTR(-ENOMEM);
-
-	add_rule_to_list(priv, eth_rule);
-	return eth_rule;
-}
-
-#define MAX_NUM_OF_ETHTOOL_RULES BIT(10)
-
-#define all_ones(field) (field == (__force typeof(field))-1)
-#define all_zeros_or_all_ones(field)		\
-	((field) == 0 || (field) == (__force typeof(field))-1)
-
-static int validate_flow(struct mlx5e_priv *priv,
-			 struct ethtool_rx_flow_spec *fs)
-{
-	struct ethtool_tcpip4_spec *l4_mask;
-	struct ethtool_usrip4_spec *l3_mask;
-	struct ethhdr *eth_mask;
-	int num_tuples = 0;
-
-	if (fs->location >= MAX_NUM_OF_ETHTOOL_RULES)
-		return -EINVAL;
-
-	if (fs->ring_cookie >= priv->params.num_channels &&
-	    fs->ring_cookie != RX_CLS_FLOW_DISC)
-		return -EINVAL;
-
-	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
-	case ETHER_FLOW:
-		eth_mask = &fs->m_u.ether_spec;
-		if (!is_zero_ether_addr(eth_mask->h_dest))
-			num_tuples++;
-		if (!is_zero_ether_addr(eth_mask->h_source))
-			num_tuples++;
-		if (eth_mask->h_proto)
-			num_tuples++;
-		break;
-	case TCP_V4_FLOW:
-	case UDP_V4_FLOW:
-		if (fs->m_u.tcp_ip4_spec.tos)
-			return -EINVAL;
-		l4_mask = &fs->m_u.tcp_ip4_spec;
-		if (l4_mask->ip4src) {
-			if (!all_ones(l4_mask->ip4src))
-				return -EINVAL;
-			num_tuples++;
-		}
-		if (l4_mask->ip4dst) {
-			if (!all_ones(l4_mask->ip4dst))
-				return -EINVAL;
-			num_tuples++;
-		}
-		if (l4_mask->psrc) {
-			if (!all_ones(l4_mask->psrc))
-				return -EINVAL;
-			num_tuples++;
-		}
-		if (l4_mask->pdst) {
-			if (!all_ones(l4_mask->pdst))
-				return -EINVAL;
-			num_tuples++;
-		}
-		/* Flow is TCP/UDP */
-		num_tuples++;
-		break;
-	case IP_USER_FLOW:
-		l3_mask = &fs->m_u.usr_ip4_spec;
-		if (l3_mask->l4_4_bytes || l3_mask->tos || l3_mask->proto ||
-		    fs->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4)
-			return -EINVAL;
-		if (l3_mask->ip4src) {
-			if (!all_ones(l3_mask->ip4src))
-				return -EINVAL;
-			num_tuples++;
-		}
-		if (l3_mask->ip4dst) {
-			if (!all_ones(l3_mask->ip4dst))
-				return -EINVAL;
-			num_tuples++;
-		}
-		/* Flow is IPv4 */
-		num_tuples++;
-		break;
-	default:
-		return -EINVAL;
-	}
-	if ((fs->flow_type & FLOW_EXT)) {
-		if (fs->m_ext.vlan_etype ||
-		    (fs->m_ext.vlan_tci != cpu_to_be16(VLAN_VID_MASK)))
-			return -EINVAL;
-
-		if (fs->m_ext.vlan_tci) {
-			if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
-				return -EINVAL;
-		}
-		num_tuples++;
-	}
-
-	if (fs->flow_type & FLOW_MAC_EXT &&
-	    !is_zero_ether_addr(fs->m_ext.h_dest))
-		num_tuples++;
-
-	return num_tuples;
-}
-
-int mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
-			       struct ethtool_rx_flow_spec *fs)
-{
-	struct mlx5e_ethtool_table *eth_ft;
-	struct mlx5e_ethtool_rule *eth_rule;
-	struct mlx5_flow_handle *rule;
-	int num_tuples;
-	int err;
-
-	num_tuples = validate_flow(priv, fs);
-	if (num_tuples <= 0) {
-		netdev_warn(priv->netdev, "%s: flow is not valid\n",  __func__);
-		return -EINVAL;
-	}
-
-	eth_ft = get_flow_table(priv, fs, num_tuples);
-	if (IS_ERR(eth_ft))
-		return PTR_ERR(eth_ft);
-
-	eth_rule = get_ethtool_rule(priv, fs->location);
-	if (IS_ERR(eth_rule)) {
-		put_flow_table(eth_ft);
-		return PTR_ERR(eth_rule);
-	}
-
-	eth_rule->flow_spec = *fs;
-	eth_rule->eth_ft = eth_ft;
-	if (!eth_ft->ft) {
-		err = -EINVAL;
-		goto del_ethtool_rule;
-	}
-	rule = add_ethtool_flow_rule(priv, eth_ft->ft, fs);
-	if (IS_ERR(rule)) {
-		err = PTR_ERR(rule);
-		goto del_ethtool_rule;
-	}
-
-	eth_rule->rule = rule;
-
-	return 0;
-
-del_ethtool_rule:
-	del_ethtool_rule(priv, eth_rule);
-
-	return err;
-}
-
-int mlx5e_ethtool_flow_remove(struct mlx5e_priv *priv,
-			      int location)
-{
-	struct mlx5e_ethtool_rule *eth_rule;
-	int err = 0;
-
-	if (location >= MAX_NUM_OF_ETHTOOL_RULES)
-		return -ENOSPC;
-
-	eth_rule = find_ethtool_rule(priv, location);
-	if (!eth_rule) {
-		err =  -ENOENT;
-		goto out;
-	}
-
-	del_ethtool_rule(priv, eth_rule);
-out:
-	return err;
-}
-
-int mlx5e_ethtool_get_flow(struct mlx5e_priv *priv, struct ethtool_rxnfc *info,
-			   int location)
-{
-	struct mlx5e_ethtool_rule *eth_rule;
-
-	if (location < 0 || location >= MAX_NUM_OF_ETHTOOL_RULES)
-		return -EINVAL;
-
-	list_for_each_entry(eth_rule, &priv->fs.ethtool.rules, list) {
-		if (eth_rule->flow_spec.location == location) {
-			info->fs = eth_rule->flow_spec;
-			return 0;
-		}
-	}
-
-	return -ENOENT;
-}
-
-int mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv, struct ethtool_rxnfc *info,
-				u32 *rule_locs)
-{
-	int location = 0;
-	int idx = 0;
-	int err = 0;
-
-	while ((!err || err == -ENOENT) && idx < info->rule_cnt) {
-		err = mlx5e_ethtool_get_flow(priv, info, location);
-		if (!err)
-			rule_locs[idx++] = location;
-		location++;
-	}
-	return err;
-}
-
-void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv)
-{
-	struct mlx5e_ethtool_rule *iter;
-	struct mlx5e_ethtool_rule *temp;
-
-	list_for_each_entry_safe(iter, temp, &priv->fs.ethtool.rules, list)
-		del_ethtool_rule(priv, iter);
-}
-
-void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv)
-{
-	INIT_LIST_HEAD(&priv->fs.ethtool.rules);
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
deleted file mode 100644
index c32754b..0000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ /dev/null
@@ -1,4125 +0,0 @@
-/*
- * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <net/tc_act/tc_gact.h>
-#include <net/pkt_cls.h>
-#include <linux/mlx5/fs.h>
-#include <net/vxlan.h>
-#include <linux/bpf.h>
-#include "en.h"
-#include "en_tc.h"
-#include "eswitch.h"
-#include "vxlan.h"
-
-struct mlx5e_rq_param {
-	u32			rqc[MLX5_ST_SZ_DW(rqc)];
-	struct mlx5_wq_param	wq;
-	bool			am_enabled;
-};
-
-struct mlx5e_sq_param {
-	u32                        sqc[MLX5_ST_SZ_DW(sqc)];
-	struct mlx5_wq_param       wq;
-	u16                        max_inline;
-	u8                         min_inline_mode;
-	enum mlx5e_sq_type         type;
-};
-
-struct mlx5e_cq_param {
-	u32                        cqc[MLX5_ST_SZ_DW(cqc)];
-	struct mlx5_wq_param       wq;
-	u16                        eq_ix;
-	u8                         cq_period_mode;
-};
-
-struct mlx5e_channel_param {
-	struct mlx5e_rq_param      rq;
-	struct mlx5e_sq_param      sq;
-	struct mlx5e_sq_param      xdp_sq;
-	struct mlx5e_sq_param      icosq;
-	struct mlx5e_cq_param      rx_cq;
-	struct mlx5e_cq_param      tx_cq;
-	struct mlx5e_cq_param      icosq_cq;
-};
-
-static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
-{
-	return MLX5_CAP_GEN(mdev, striding_rq) &&
-		MLX5_CAP_GEN(mdev, umr_ptr_rlky) &&
-		MLX5_CAP_ETH(mdev, reg_umr_sq);
-}
-
-static void mlx5e_set_rq_type_params(struct mlx5e_priv *priv, u8 rq_type)
-{
-	priv->params.rq_wq_type = rq_type;
-	switch (priv->params.rq_wq_type) {
-	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
-		priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW;
-		priv->params.mpwqe_log_stride_sz =
-			MLX5E_GET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS) ?
-			MLX5_MPWRQ_LOG_STRIDE_SIZE_CQE_COMPRESS :
-			MLX5_MPWRQ_LOG_STRIDE_SIZE;
-		priv->params.mpwqe_log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ -
-			priv->params.mpwqe_log_stride_sz;
-		break;
-	default: /* MLX5_WQ_TYPE_LINKED_LIST */
-		priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
-	}
-	priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type,
-					       BIT(priv->params.log_rq_size));
-
-	mlx5_core_info(priv->mdev,
-		       "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
-		       priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ,
-		       BIT(priv->params.log_rq_size),
-		       BIT(priv->params.mpwqe_log_stride_sz),
-		       MLX5E_GET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS));
-}
-
-static void mlx5e_set_rq_priv_params(struct mlx5e_priv *priv)
-{
-	u8 rq_type = mlx5e_check_fragmented_striding_rq_cap(priv->mdev) &&
-		    !priv->xdp_prog ?
-		    MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
-		    MLX5_WQ_TYPE_LINKED_LIST;
-	mlx5e_set_rq_type_params(priv, rq_type);
-}
-
-static void mlx5e_update_carrier(struct mlx5e_priv *priv)
-{
-	struct mlx5_core_dev *mdev = priv->mdev;
-	u8 port_state;
-
-	port_state = mlx5_query_vport_state(mdev,
-		MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT, 0);
-
-	if (port_state == VPORT_STATE_UP) {
-		netdev_info(priv->netdev, "Link up\n");
-		netif_carrier_on(priv->netdev);
-	} else {
-		netdev_info(priv->netdev, "Link down\n");
-		netif_carrier_off(priv->netdev);
-	}
-}
-
-static void mlx5e_update_carrier_work(struct work_struct *work)
-{
-	struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
-					       update_carrier_work);
-
-	mutex_lock(&priv->state_lock);
-	if (test_bit(MLX5E_STATE_OPENED, &priv->state))
-		mlx5e_update_carrier(priv);
-	mutex_unlock(&priv->state_lock);
-}
-
-static void mlx5e_tx_timeout_work(struct work_struct *work)
-{
-	struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
-					       tx_timeout_work);
-	int err;
-
-	rtnl_lock();
-	mutex_lock(&priv->state_lock);
-	if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
-		goto unlock;
-	mlx5e_close_locked(priv->netdev);
-	err = mlx5e_open_locked(priv->netdev);
-	if (err)
-		netdev_err(priv->netdev, "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n",
-			   err);
-unlock:
-	mutex_unlock(&priv->state_lock);
-	rtnl_unlock();
-}
-
-static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
-{
-	struct mlx5e_sw_stats *s = &priv->stats.sw;
-	struct mlx5e_rq_stats *rq_stats;
-	struct mlx5e_sq_stats *sq_stats;
-	u64 tx_offload_none = 0;
-	int i, j;
-
-	memset(s, 0, sizeof(*s));
-	for (i = 0; i < priv->params.num_channels; i++) {
-		rq_stats = &priv->channel[i]->rq.stats;
-
-		s->rx_packets	+= rq_stats->packets;
-		s->rx_bytes	+= rq_stats->bytes;
-		s->rx_lro_packets += rq_stats->lro_packets;
-		s->rx_lro_bytes	+= rq_stats->lro_bytes;
-		s->rx_csum_none	+= rq_stats->csum_none;
-		s->rx_csum_complete += rq_stats->csum_complete;
-		s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
-		s->rx_xdp_drop += rq_stats->xdp_drop;
-		s->rx_xdp_tx += rq_stats->xdp_tx;
-		s->rx_xdp_tx_full += rq_stats->xdp_tx_full;
-		s->rx_wqe_err   += rq_stats->wqe_err;
-		s->rx_mpwqe_filler += rq_stats->mpwqe_filler;
-		s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
-		s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
-		s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
-		s->rx_cache_reuse += rq_stats->cache_reuse;
-		s->rx_cache_full  += rq_stats->cache_full;
-		s->rx_cache_empty += rq_stats->cache_empty;
-		s->rx_cache_busy  += rq_stats->cache_busy;
-
-		for (j = 0; j < priv->params.num_tc; j++) {
-			sq_stats = &priv->channel[i]->sq[j].stats;
-
-			s->tx_packets		+= sq_stats->packets;
-			s->tx_bytes		+= sq_stats->bytes;
-			s->tx_tso_packets	+= sq_stats->tso_packets;
-			s->tx_tso_bytes		+= sq_stats->tso_bytes;
-			s->tx_tso_inner_packets	+= sq_stats->tso_inner_packets;
-			s->tx_tso_inner_bytes	+= sq_stats->tso_inner_bytes;
-			s->tx_queue_stopped	+= sq_stats->stopped;
-			s->tx_queue_wake	+= sq_stats->wake;
-			s->tx_queue_dropped	+= sq_stats->dropped;
-			s->tx_xmit_more		+= sq_stats->xmit_more;
-			s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
-			tx_offload_none		+= sq_stats->csum_none;
-		}
-	}
-
-	/* Update calculated offload counters */
-	s->tx_csum_partial = s->tx_packets - tx_offload_none - s->tx_csum_partial_inner;
-	s->rx_csum_unnecessary = s->rx_packets - s->rx_csum_none - s->rx_csum_complete;
-
-	s->link_down_events_phy = MLX5_GET(ppcnt_reg,
-				priv->stats.pport.phy_counters,
-				counter_set.phys_layer_cntrs.link_down_events);
-}
-
-static void mlx5e_update_vport_counters(struct mlx5e_priv *priv)
-{
-	int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
-	u32 *out = (u32 *)priv->stats.vport.query_vport_out;
-	u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
-	struct mlx5_core_dev *mdev = priv->mdev;
-
-	MLX5_SET(query_vport_counter_in, in, opcode,
-		 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
-	MLX5_SET(query_vport_counter_in, in, op_mod, 0);
-	MLX5_SET(query_vport_counter_in, in, other_vport, 0);
-
-	memset(out, 0, outlen);
-	mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
-}
-
-static void mlx5e_update_pport_counters(struct mlx5e_priv *priv)
-{
-	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
-	struct mlx5_core_dev *mdev = priv->mdev;
-	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
-	int prio;
-	void *out;
-	u32 *in;
-
-	in = mlx5_vzalloc(sz);
-	if (!in)
-		goto free_out;
-
-	MLX5_SET(ppcnt_reg, in, local_port, 1);
-
-	out = pstats->IEEE_802_3_counters;
-	MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
-	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
-
-	out = pstats->RFC_2863_counters;
-	MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
-	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
-
-	out = pstats->RFC_2819_counters;
-	MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
-	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
-
-	out = pstats->phy_counters;
-	MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
-	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
-
-	MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
-	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
-		out = pstats->per_prio_counters[prio];
-		MLX5_SET(ppcnt_reg, in, prio_tc, prio);
-		mlx5_core_access_reg(mdev, in, sz, out, sz,
-				     MLX5_REG_PPCNT, 0, 0);
-	}
-
-free_out:
-	kvfree(in);
-}
-
-static void mlx5e_update_q_counter(struct mlx5e_priv *priv)
-{
-	struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
-
-	if (!priv->q_counter)
-		return;
-
-	mlx5_core_query_out_of_buffer(priv->mdev, priv->q_counter,
-				      &qcnt->rx_out_of_buffer);
-}
-
-static void mlx5e_update_pcie_counters(struct mlx5e_priv *priv)
-{
-	struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
-	struct mlx5_core_dev *mdev = priv->mdev;
-	int sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
-	void *out;
-	u32 *in;
-
-	in = mlx5_vzalloc(sz);
-	if (!in)
-		return;
-
-	out = pcie_stats->pcie_perf_counters;
-	MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
-	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
-
-	out = pcie_stats->pcie_tas_counters;
-	MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_TIMERS_AND_STATES_COUNTERS_GROUP);
-	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
-
-	kvfree(in);
-}
-
-void mlx5e_update_stats(struct mlx5e_priv *priv)
-{
-	mlx5e_update_q_counter(priv);
-	mlx5e_update_vport_counters(priv);
-	mlx5e_update_pport_counters(priv);
-	mlx5e_update_sw_counters(priv);
-	mlx5e_update_pcie_counters(priv);
-}
-
-void mlx5e_update_stats_work(struct work_struct *work)
-{
-	struct delayed_work *dwork = to_delayed_work(work);
-	struct mlx5e_priv *priv = container_of(dwork, struct mlx5e_priv,
-					       update_stats_work);
-	mutex_lock(&priv->state_lock);
-	if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
-		priv->profile->update_stats(priv);
-		queue_delayed_work(priv->wq, dwork,
-				   msecs_to_jiffies(MLX5E_UPDATE_STATS_INTERVAL));
-	}
-	mutex_unlock(&priv->state_lock);
-}
-
-static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
-			      enum mlx5_dev_event event, unsigned long param)
-{
-	struct mlx5e_priv *priv = vpriv;
-
-	if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state))
-		return;
-
-	switch (event) {
-	case MLX5_DEV_EVENT_PORT_UP:
-	case MLX5_DEV_EVENT_PORT_DOWN:
-		queue_work(priv->wq, &priv->update_carrier_work);
-		break;
-
-	default:
-		break;
-	}
-}
-
-static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
-{
-	set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
-}
-
-static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
-{
-	clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
-	synchronize_irq(mlx5_get_msix_vec(priv->mdev, MLX5_EQ_VEC_ASYNC));
-}
-
-#define MLX5E_HW2SW_MTU(hwmtu) (hwmtu - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
-#define MLX5E_SW2HW_MTU(swmtu) (swmtu + (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
-
-static inline int mlx5e_get_wqe_mtt_sz(void)
-{
-	/* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes.
-	 * To avoid copying garbage after the mtt array, we allocate
-	 * a little more.
-	 */
-	return ALIGN(MLX5_MPWRQ_PAGES_PER_WQE * sizeof(__be64),
-		     MLX5_UMR_MTT_ALIGNMENT);
-}
-
-static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq, struct mlx5e_sq *sq,
-				       struct mlx5e_umr_wqe *wqe, u16 ix)
-{
-	struct mlx5_wqe_ctrl_seg      *cseg = &wqe->ctrl;
-	struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
-	struct mlx5_wqe_data_seg      *dseg = &wqe->data;
-	struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
-	u8 ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS);
-	u32 umr_wqe_mtt_offset = mlx5e_get_wqe_mtt_offset(rq, ix);
-
-	cseg->qpn_ds    = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
-				      ds_cnt);
-	cseg->fm_ce_se  = MLX5_WQE_CTRL_CQ_UPDATE;
-	cseg->imm       = rq->mkey_be;
-
-	ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN;
-	ucseg->xlt_octowords =
-		cpu_to_be16(MLX5_MTT_OCTW(MLX5_MPWRQ_PAGES_PER_WQE));
-	ucseg->bsf_octowords =
-		cpu_to_be16(MLX5_MTT_OCTW(umr_wqe_mtt_offset));
-	ucseg->mkey_mask     = cpu_to_be64(MLX5_MKEY_MASK_FREE);
-
-	dseg->lkey = sq->mkey_be;
-	dseg->addr = cpu_to_be64(wi->umr.mtt_addr);
-}
-
-static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq,
-				     struct mlx5e_channel *c)
-{
-	int wq_sz = mlx5_wq_ll_get_size(&rq->wq);
-	int mtt_sz = mlx5e_get_wqe_mtt_sz();
-	int mtt_alloc = mtt_sz + MLX5_UMR_ALIGN - 1;
-	int i;
-
-	rq->mpwqe.info = kzalloc_node(wq_sz * sizeof(*rq->mpwqe.info),
-				      GFP_KERNEL, cpu_to_node(c->cpu));
-	if (!rq->mpwqe.info)
-		goto err_out;
-
-	/* We allocate more than mtt_sz as we will align the pointer */
-	rq->mpwqe.mtt_no_align = kzalloc_node(mtt_alloc * wq_sz, GFP_KERNEL,
-					cpu_to_node(c->cpu));
-	if (unlikely(!rq->mpwqe.mtt_no_align))
-		goto err_free_wqe_info;
-
-	for (i = 0; i < wq_sz; i++) {
-		struct mlx5e_mpw_info *wi = &rq->mpwqe.info[i];
-
-		wi->umr.mtt = PTR_ALIGN(rq->mpwqe.mtt_no_align + i * mtt_alloc,
-					MLX5_UMR_ALIGN);
-		wi->umr.mtt_addr = dma_map_single(c->pdev, wi->umr.mtt, mtt_sz,
-						  PCI_DMA_TODEVICE);
-		if (unlikely(dma_mapping_error(c->pdev, wi->umr.mtt_addr)))
-			goto err_unmap_mtts;
-
-		mlx5e_build_umr_wqe(rq, &c->icosq, &wi->umr.wqe, i);
-	}
-
-	return 0;
-
-err_unmap_mtts:
-	while (--i >= 0) {
-		struct mlx5e_mpw_info *wi = &rq->mpwqe.info[i];
-
-		dma_unmap_single(c->pdev, wi->umr.mtt_addr, mtt_sz,
-				 PCI_DMA_TODEVICE);
-	}
-	kfree(rq->mpwqe.mtt_no_align);
-err_free_wqe_info:
-	kfree(rq->mpwqe.info);
-
-err_out:
-	return -ENOMEM;
-}
-
-static void mlx5e_rq_free_mpwqe_info(struct mlx5e_rq *rq)
-{
-	int wq_sz = mlx5_wq_ll_get_size(&rq->wq);
-	int mtt_sz = mlx5e_get_wqe_mtt_sz();
-	int i;
-
-	for (i = 0; i < wq_sz; i++) {
-		struct mlx5e_mpw_info *wi = &rq->mpwqe.info[i];
-
-		dma_unmap_single(rq->pdev, wi->umr.mtt_addr, mtt_sz,
-				 PCI_DMA_TODEVICE);
-	}
-	kfree(rq->mpwqe.mtt_no_align);
-	kfree(rq->mpwqe.info);
-}
-
-static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv,
-				 u64 npages, u8 page_shift,
-				 struct mlx5_core_mkey *umr_mkey)
-{
-	struct mlx5_core_dev *mdev = priv->mdev;
-	int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
-	void *mkc;
-	u32 *in;
-	int err;
-
-	if (!MLX5E_VALID_NUM_MTTS(npages))
-		return -EINVAL;
-
-	in = mlx5_vzalloc(inlen);
-	if (!in)
-		return -ENOMEM;
-
-	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
-
-	MLX5_SET(mkc, mkc, free, 1);
-	MLX5_SET(mkc, mkc, umr_en, 1);
-	MLX5_SET(mkc, mkc, lw, 1);
-	MLX5_SET(mkc, mkc, lr, 1);
-	MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_MTT);
-
-	MLX5_SET(mkc, mkc, qpn, 0xffffff);
-	MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.pdn);
-	MLX5_SET64(mkc, mkc, len, npages << page_shift);
-	MLX5_SET(mkc, mkc, translations_octword_size,
-		 MLX5_MTT_OCTW(npages));
-	MLX5_SET(mkc, mkc, log_page_size, page_shift);
-
-	err = mlx5_core_create_mkey(mdev, umr_mkey, in, inlen);
-
-	kvfree(in);
-	return err;
-}
-
-static int mlx5e_create_rq_umr_mkey(struct mlx5e_rq *rq)
-{
-	struct mlx5e_priv *priv = rq->priv;
-	u64 num_mtts = MLX5E_REQUIRED_MTTS(BIT(priv->params.log_rq_size));
-
-	return mlx5e_create_umr_mkey(priv, num_mtts, PAGE_SHIFT, &rq->umr_mkey);
-}
-
-static int mlx5e_create_rq(struct mlx5e_channel *c,
-			   struct mlx5e_rq_param *param,
-			   struct mlx5e_rq *rq)
-{
-	struct mlx5e_priv *priv = c->priv;
-	struct mlx5_core_dev *mdev = priv->mdev;
-	void *rqc = param->rqc;
-	void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
-	u32 byte_count;
-	u32 frag_sz;
-	int npages;
-	int wq_sz;
-	int err;
-	int i;
-
-	param->wq.db_numa_node = cpu_to_node(c->cpu);
-
-	err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
-				&rq->wq_ctrl);
-	if (err)
-		return err;
-
-	rq->wq.db = &rq->wq.db[MLX5_RCV_DBR];
-
-	wq_sz = mlx5_wq_ll_get_size(&rq->wq);
-
-	rq->wq_type = priv->params.rq_wq_type;
-	rq->pdev    = c->pdev;
-	rq->netdev  = c->netdev;
-	rq->tstamp  = &priv->tstamp;
-	rq->channel = c;
-	rq->ix      = c->ix;
-	rq->priv    = c->priv;
-
-	rq->xdp_prog = priv->xdp_prog ? bpf_prog_inc(priv->xdp_prog) : NULL;
-	if (IS_ERR(rq->xdp_prog)) {
-		err = PTR_ERR(rq->xdp_prog);
-		rq->xdp_prog = NULL;
-		goto err_rq_wq_destroy;
-	}
-
-	rq->buff.map_dir = DMA_FROM_DEVICE;
-	if (rq->xdp_prog)
-		rq->buff.map_dir = DMA_BIDIRECTIONAL;
-
-	switch (priv->params.rq_wq_type) {
-	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
-		if (mlx5e_is_vf_vport_rep(priv)) {
-			err = -EINVAL;
-			goto err_rq_wq_destroy;
-		}
-
-		rq->handle_rx_cqe = mlx5e_handle_rx_cqe_mpwrq;
-		rq->alloc_wqe = mlx5e_alloc_rx_mpwqe;
-		rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
-
-		rq->mpwqe_stride_sz = BIT(priv->params.mpwqe_log_stride_sz);
-		rq->mpwqe_num_strides = BIT(priv->params.mpwqe_log_num_strides);
-
-		rq->buff.wqe_sz = rq->mpwqe_stride_sz * rq->mpwqe_num_strides;
-		byte_count = rq->buff.wqe_sz;
-
-		err = mlx5e_create_rq_umr_mkey(rq);
-		if (err)
-			goto err_rq_wq_destroy;
-		rq->mkey_be = cpu_to_be32(rq->umr_mkey.key);
-
-		err = mlx5e_rq_alloc_mpwqe_info(rq, c);
-		if (err)
-			goto err_destroy_umr_mkey;
-		break;
-	default: /* MLX5_WQ_TYPE_LINKED_LIST */
-		rq->dma_info = kzalloc_node(wq_sz * sizeof(*rq->dma_info),
-					    GFP_KERNEL, cpu_to_node(c->cpu));
-		if (!rq->dma_info) {
-			err = -ENOMEM;
-			goto err_rq_wq_destroy;
-		}
-
-		if (mlx5e_is_vf_vport_rep(priv))
-			rq->handle_rx_cqe = mlx5e_handle_rx_cqe_rep;
-		else
-			rq->handle_rx_cqe = mlx5e_handle_rx_cqe;
-
-		rq->alloc_wqe = mlx5e_alloc_rx_wqe;
-		rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
-
-		rq->buff.wqe_sz = (priv->params.lro_en) ?
-				priv->params.lro_wqe_sz :
-				MLX5E_SW2HW_MTU(priv->netdev->mtu);
-		byte_count = rq->buff.wqe_sz;
-
-		/* calc the required page order */
-		frag_sz = MLX5_RX_HEADROOM +
-			  byte_count /* packet data */ +
-			  SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
-		frag_sz = SKB_DATA_ALIGN(frag_sz);
-
-		npages = DIV_ROUND_UP(frag_sz, PAGE_SIZE);
-		rq->buff.page_order = order_base_2(npages);
-
-		byte_count |= MLX5_HW_START_PADDING;
-		rq->mkey_be = c->mkey_be;
-	}
-
-	for (i = 0; i < wq_sz; i++) {
-		struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
-
-		wqe->data.byte_count = cpu_to_be32(byte_count);
-		wqe->data.lkey = rq->mkey_be;
-	}
-
-	INIT_WORK(&rq->am.work, mlx5e_rx_am_work);
-	rq->am.mode = priv->params.rx_cq_period_mode;
-
-	rq->page_cache.head = 0;
-	rq->page_cache.tail = 0;
-
-	return 0;
-
-err_destroy_umr_mkey:
-	mlx5_core_destroy_mkey(mdev, &rq->umr_mkey);
-
-err_rq_wq_destroy:
-	if (rq->xdp_prog)
-		bpf_prog_put(rq->xdp_prog);
-	mlx5_wq_destroy(&rq->wq_ctrl);
-
-	return err;
-}
-
-static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
-{
-	int i;
-
-	if (rq->xdp_prog)
-		bpf_prog_put(rq->xdp_prog);
-
-	switch (rq->wq_type) {
-	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
-		mlx5e_rq_free_mpwqe_info(rq);
-		mlx5_core_destroy_mkey(rq->priv->mdev, &rq->umr_mkey);
-		break;
-	default: /* MLX5_WQ_TYPE_LINKED_LIST */
-		kfree(rq->dma_info);
-	}
-
-	for (i = rq->page_cache.head; i != rq->page_cache.tail;
-	     i = (i + 1) & (MLX5E_CACHE_SIZE - 1)) {
-		struct mlx5e_dma_info *dma_info = &rq->page_cache.page_cache[i];
-
-		mlx5e_page_release(rq, dma_info, false);
-	}
-	mlx5_wq_destroy(&rq->wq_ctrl);
-}
-
-static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
-{
-	struct mlx5e_priv *priv = rq->priv;
-	struct mlx5_core_dev *mdev = priv->mdev;
-
-	void *in;
-	void *rqc;
-	void *wq;
-	int inlen;
-	int err;
-
-	inlen = MLX5_ST_SZ_BYTES(create_rq_in) +
-		sizeof(u64) * rq->wq_ctrl.buf.npages;
-	in = mlx5_vzalloc(inlen);
-	if (!in)
-		return -ENOMEM;
-
-	rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
-	wq  = MLX5_ADDR_OF(rqc, rqc, wq);
-
-	memcpy(rqc, param->rqc, sizeof(param->rqc));
-
-	MLX5_SET(rqc,  rqc, cqn,		rq->cq.mcq.cqn);
-	MLX5_SET(rqc,  rqc, state,		MLX5_RQC_STATE_RST);
-	MLX5_SET(rqc,  rqc, vsd, priv->params.vlan_strip_disable);
-	MLX5_SET(wq,   wq,  log_wq_pg_sz,	rq->wq_ctrl.buf.page_shift -
-						MLX5_ADAPTER_PAGE_SHIFT);
-	MLX5_SET64(wq, wq,  dbr_addr,		rq->wq_ctrl.db.dma);
-
-	mlx5_fill_page_array(&rq->wq_ctrl.buf,
-			     (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
-
-	err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn);
-
-	kvfree(in);
-
-	return err;
-}
-
-static int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state,
-				 int next_state)
-{
-	struct mlx5e_channel *c = rq->channel;
-	struct mlx5e_priv *priv = c->priv;
-	struct mlx5_core_dev *mdev = priv->mdev;
-
-	void *in;
-	void *rqc;
-	int inlen;
-	int err;
-
-	inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
-	in = mlx5_vzalloc(inlen);
-	if (!in)
-		return -ENOMEM;
-
-	rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
-
-	MLX5_SET(modify_rq_in, in, rq_state, curr_state);
-	MLX5_SET(rqc, rqc, state, next_state);
-
-	err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
-
-	kvfree(in);
-
-	return err;
-}
-
-static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
-{
-	struct mlx5e_channel *c = rq->channel;
-	struct mlx5e_priv *priv = c->priv;
-	struct mlx5_core_dev *mdev = priv->mdev;
-
-	void *in;
-	void *rqc;
-	int inlen;
-	int err;
-
-	inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
-	in = mlx5_vzalloc(inlen);
-	if (!in)
-		return -ENOMEM;
-
-	rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
-
-	MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY);
-	MLX5_SET64(modify_rq_in, in, modify_bitmask,
-		   MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD);
-	MLX5_SET(rqc, rqc, vsd, vsd);
-	MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
-
-	err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
-
-	kvfree(in);
-
-	return err;
-}
-
-static void mlx5e_disable_rq(struct mlx5e_rq *rq)
-{
-	mlx5_core_destroy_rq(rq->priv->mdev, rq->rqn);
-}
-
-static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
-{
-	unsigned long exp_time = jiffies + msecs_to_jiffies(20000);
-	struct mlx5e_channel *c = rq->channel;
-	struct mlx5e_priv *priv = c->priv;
-	struct mlx5_wq_ll *wq = &rq->wq;
-
-	while (time_before(jiffies, exp_time)) {
-		if (wq->cur_sz >= priv->params.min_rx_wqes)
-			return 0;
-
-		msleep(20);
-	}
-
-	return -ETIMEDOUT;
-}
-
-static void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
-{
-	struct mlx5_wq_ll *wq = &rq->wq;
-	struct mlx5e_rx_wqe *wqe;
-	__be16 wqe_ix_be;
-	u16 wqe_ix;
-
-	/* UMR WQE (if in progress) is always at wq->head */
-	if (test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state))
-		mlx5e_free_rx_mpwqe(rq, &rq->mpwqe.info[wq->head]);
-
-	while (!mlx5_wq_ll_is_empty(wq)) {
-		wqe_ix_be = *wq->tail_next;
-		wqe_ix    = be16_to_cpu(wqe_ix_be);
-		wqe       = mlx5_wq_ll_get_wqe(&rq->wq, wqe_ix);
-		rq->dealloc_wqe(rq, wqe_ix);
-		mlx5_wq_ll_pop(&rq->wq, wqe_ix_be,
-			       &wqe->next.next_wqe_index);
-	}
-}
-
-static int mlx5e_open_rq(struct mlx5e_channel *c,
-			 struct mlx5e_rq_param *param,
-			 struct mlx5e_rq *rq)
-{
-	struct mlx5e_sq *sq = &c->icosq;
-	u16 pi = sq->pc & sq->wq.sz_m1;
-	int err;
-
-	err = mlx5e_create_rq(c, param, rq);
-	if (err)
-		return err;
-
-	err = mlx5e_enable_rq(rq, param);
-	if (err)
-		goto err_destroy_rq;
-
-	set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
-	err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
-	if (err)
-		goto err_disable_rq;
-
-	if (param->am_enabled)
-		set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);
-
-	sq->db.ico_wqe[pi].opcode     = MLX5_OPCODE_NOP;
-	sq->db.ico_wqe[pi].num_wqebbs = 1;
-	mlx5e_send_nop(sq, true); /* trigger mlx5e_post_rx_wqes() */
-
-	return 0;
-
-err_disable_rq:
-	clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
-	mlx5e_disable_rq(rq);
-err_destroy_rq:
-	mlx5e_destroy_rq(rq);
-
-	return err;
-}
-
-static void mlx5e_close_rq(struct mlx5e_rq *rq)
-{
-	clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
-	napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
-	cancel_work_sync(&rq->am.work);
-
-	mlx5e_disable_rq(rq);
-	mlx5e_free_rx_descs(rq);
-	mlx5e_destroy_rq(rq);
-}
-
-static void mlx5e_free_sq_xdp_db(struct mlx5e_sq *sq)
-{
-	kfree(sq->db.xdp.di);
-	kfree(sq->db.xdp.wqe_info);
-}
-
-static int mlx5e_alloc_sq_xdp_db(struct mlx5e_sq *sq, int numa)
-{
-	int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
-
-	sq->db.xdp.di = kzalloc_node(sizeof(*sq->db.xdp.di) * wq_sz,
-				     GFP_KERNEL, numa);
-	sq->db.xdp.wqe_info = kzalloc_node(sizeof(*sq->db.xdp.wqe_info) * wq_sz,
-					   GFP_KERNEL, numa);
-	if (!sq->db.xdp.di || !sq->db.xdp.wqe_info) {
-		mlx5e_free_sq_xdp_db(sq);
-		return -ENOMEM;
-	}
-
-	return 0;
-}
-
-static void mlx5e_free_sq_ico_db(struct mlx5e_sq *sq)
-{
-	kfree(sq->db.ico_wqe);
-}
-
-static int mlx5e_alloc_sq_ico_db(struct mlx5e_sq *sq, int numa)
-{
-	u8 wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
-
-	sq->db.ico_wqe = kzalloc_node(sizeof(*sq->db.ico_wqe) * wq_sz,
-				      GFP_KERNEL, numa);
-	if (!sq->db.ico_wqe)
-		return -ENOMEM;
-
-	return 0;
-}
-
-static void mlx5e_free_sq_txq_db(struct mlx5e_sq *sq)
-{
-	kfree(sq->db.txq.wqe_info);
-	kfree(sq->db.txq.dma_fifo);
-	kfree(sq->db.txq.skb);
-}
-
-static int mlx5e_alloc_sq_txq_db(struct mlx5e_sq *sq, int numa)
-{
-	int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
-	int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
-
-	sq->db.txq.skb = kzalloc_node(wq_sz * sizeof(*sq->db.txq.skb),
-				      GFP_KERNEL, numa);
-	sq->db.txq.dma_fifo = kzalloc_node(df_sz * sizeof(*sq->db.txq.dma_fifo),
-					   GFP_KERNEL, numa);
-	sq->db.txq.wqe_info = kzalloc_node(wq_sz * sizeof(*sq->db.txq.wqe_info),
-					   GFP_KERNEL, numa);
-	if (!sq->db.txq.skb || !sq->db.txq.dma_fifo || !sq->db.txq.wqe_info) {
-		mlx5e_free_sq_txq_db(sq);
-		return -ENOMEM;
-	}
-
-	sq->dma_fifo_mask = df_sz - 1;
-
-	return 0;
-}
-
-static void mlx5e_free_sq_db(struct mlx5e_sq *sq)
-{
-	switch (sq->type) {
-	case MLX5E_SQ_TXQ:
-		mlx5e_free_sq_txq_db(sq);
-		break;
-	case MLX5E_SQ_ICO:
-		mlx5e_free_sq_ico_db(sq);
-		break;
-	case MLX5E_SQ_XDP:
-		mlx5e_free_sq_xdp_db(sq);
-		break;
-	}
-}
-
-static int mlx5e_alloc_sq_db(struct mlx5e_sq *sq, int numa)
-{
-	switch (sq->type) {
-	case MLX5E_SQ_TXQ:
-		return mlx5e_alloc_sq_txq_db(sq, numa);
-	case MLX5E_SQ_ICO:
-		return mlx5e_alloc_sq_ico_db(sq, numa);
-	case MLX5E_SQ_XDP:
-		return mlx5e_alloc_sq_xdp_db(sq, numa);
-	}
-
-	return 0;
-}
-
-static int mlx5e_sq_get_max_wqebbs(u8 sq_type)
-{
-	switch (sq_type) {
-	case MLX5E_SQ_ICO:
-		return MLX5E_ICOSQ_MAX_WQEBBS;
-	case MLX5E_SQ_XDP:
-		return MLX5E_XDP_TX_WQEBBS;
-	}
-	return MLX5_SEND_WQE_MAX_WQEBBS;
-}
-
-static int mlx5e_create_sq(struct mlx5e_channel *c,
-			   int tc,
-			   struct mlx5e_sq_param *param,
-			   struct mlx5e_sq *sq)
-{
-	struct mlx5e_priv *priv = c->priv;
-	struct mlx5_core_dev *mdev = priv->mdev;
-
-	void *sqc = param->sqc;
-	void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq);
-	int err;
-
-	sq->type      = param->type;
-	sq->pdev      = c->pdev;
-	sq->tstamp    = &priv->tstamp;
-	sq->mkey_be   = c->mkey_be;
-	sq->channel   = c;
-	sq->tc        = tc;
-
-	err = mlx5_alloc_bfreg(mdev, &sq->bfreg, MLX5_CAP_GEN(mdev, bf), false);
-	if (err)
-		return err;
-
-	param->wq.db_numa_node = cpu_to_node(c->cpu);
-
-	err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq,
-				 &sq->wq_ctrl);
-	if (err)
-		goto err_unmap_free_uar;
-
-	sq->wq.db       = &sq->wq.db[MLX5_SND_DBR];
-	if (sq->bfreg.wc)
-		set_bit(MLX5E_SQ_STATE_BF_ENABLE, &sq->state);
-
-	sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
-	sq->max_inline  = param->max_inline;
-	sq->min_inline_mode =
-		MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5_CAP_INLINE_MODE_VPORT_CONTEXT ?
-		param->min_inline_mode : 0;
-
-	err = mlx5e_alloc_sq_db(sq, cpu_to_node(c->cpu));
-	if (err)
-		goto err_sq_wq_destroy;
-
-	if (sq->type == MLX5E_SQ_TXQ) {
-		int txq_ix;
-
-		txq_ix = c->ix + tc * priv->params.num_channels;
-		sq->txq = netdev_get_tx_queue(priv->netdev, txq_ix);
-		priv->txq_to_sq_map[txq_ix] = sq;
-	}
-
-	sq->edge = (sq->wq.sz_m1 + 1) - mlx5e_sq_get_max_wqebbs(sq->type);
-	sq->bf_budget = MLX5E_SQ_BF_BUDGET;
-
-	return 0;
-
-err_sq_wq_destroy:
-	mlx5_wq_destroy(&sq->wq_ctrl);
-
-err_unmap_free_uar:
-	mlx5_free_bfreg(mdev, &sq->bfreg);
-
-	return err;
-}
-
-static void mlx5e_destroy_sq(struct mlx5e_sq *sq)
-{
-	struct mlx5e_channel *c = sq->channel;
-	struct mlx5e_priv *priv = c->priv;
-
-	mlx5e_free_sq_db(sq);
-	mlx5_wq_destroy(&sq->wq_ctrl);
-	mlx5_free_bfreg(priv->mdev, &sq->bfreg);
-}
-
-static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
-{
-	struct mlx5e_channel *c = sq->channel;
-	struct mlx5e_priv *priv = c->priv;
-	struct mlx5_core_dev *mdev = priv->mdev;
-
-	void *in;
-	void *sqc;
-	void *wq;
-	int inlen;
-	int err;
-
-	inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
-		sizeof(u64) * sq->wq_ctrl.buf.npages;
-	in = mlx5_vzalloc(inlen);
-	if (!in)
-		return -ENOMEM;
-
-	sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
-	wq = MLX5_ADDR_OF(sqc, sqc, wq);
-
-	memcpy(sqc, param->sqc, sizeof(param->sqc));
-
-	MLX5_SET(sqc,  sqc, tis_num_0, param->type == MLX5E_SQ_ICO ?
-				       0 : priv->tisn[sq->tc]);
-	MLX5_SET(sqc,  sqc, cqn,		sq->cq.mcq.cqn);
-	MLX5_SET(sqc,  sqc, min_wqe_inline_mode, sq->min_inline_mode);
-	MLX5_SET(sqc,  sqc, state,		MLX5_SQC_STATE_RST);
-	MLX5_SET(sqc,  sqc, tis_lst_sz, param->type == MLX5E_SQ_ICO ? 0 : 1);
-
-	MLX5_SET(wq,   wq, wq_type,       MLX5_WQ_TYPE_CYCLIC);
-	MLX5_SET(wq,   wq, uar_page,      sq->bfreg.index);
-	MLX5_SET(wq,   wq, log_wq_pg_sz,  sq->wq_ctrl.buf.page_shift -
-					  MLX5_ADAPTER_PAGE_SHIFT);
-	MLX5_SET64(wq, wq, dbr_addr,      sq->wq_ctrl.db.dma);
-
-	mlx5_fill_page_array(&sq->wq_ctrl.buf,
-			     (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
-
-	err = mlx5_core_create_sq(mdev, in, inlen, &sq->sqn);
-
-	kvfree(in);
-
-	return err;
-}
-
-static int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state,
-			   int next_state, bool update_rl, int rl_index)
-{
-	struct mlx5e_channel *c = sq->channel;
-	struct mlx5e_priv *priv = c->priv;
-	struct mlx5_core_dev *mdev = priv->mdev;
-
-	void *in;
-	void *sqc;
-	int inlen;
-	int err;
-
-	inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
-	in = mlx5_vzalloc(inlen);
-	if (!in)
-		return -ENOMEM;
-
-	sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
-
-	MLX5_SET(modify_sq_in, in, sq_state, curr_state);
-	MLX5_SET(sqc, sqc, state, next_state);
-	if (update_rl && next_state == MLX5_SQC_STATE_RDY) {
-		MLX5_SET64(modify_sq_in, in, modify_bitmask, 1);
-		MLX5_SET(sqc,  sqc, packet_pacing_rate_limit_index, rl_index);
-	}
-
-	err = mlx5_core_modify_sq(mdev, sq->sqn, in, inlen);
-
-	kvfree(in);
-
-	return err;
-}
-
-static void mlx5e_disable_sq(struct mlx5e_sq *sq)
-{
-	struct mlx5e_channel *c = sq->channel;
-	struct mlx5e_priv *priv = c->priv;
-	struct mlx5_core_dev *mdev = priv->mdev;
-
-	mlx5_core_destroy_sq(mdev, sq->sqn);
-	if (sq->rate_limit)
-		mlx5_rl_remove_rate(mdev, sq->rate_limit);
-}
-
-static int mlx5e_open_sq(struct mlx5e_channel *c,
-			 int tc,
-			 struct mlx5e_sq_param *param,
-			 struct mlx5e_sq *sq)
-{
-	int err;
-
-	err = mlx5e_create_sq(c, tc, param, sq);
-	if (err)
-		return err;
-
-	err = mlx5e_enable_sq(sq, param);
-	if (err)
-		goto err_destroy_sq;
-
-	set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
-	err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY,
-			      false, 0);
-	if (err)
-		goto err_disable_sq;
-
-	if (sq->txq) {
-		netdev_tx_reset_queue(sq->txq);
-		netif_tx_start_queue(sq->txq);
-	}
-
-	return 0;
-
-err_disable_sq:
-	clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
-	mlx5e_disable_sq(sq);
-err_destroy_sq:
-	mlx5e_destroy_sq(sq);
-
-	return err;
-}
-
-static inline void netif_tx_disable_queue(struct netdev_queue *txq)
-{
-	__netif_tx_lock_bh(txq);
-	netif_tx_stop_queue(txq);
-	__netif_tx_unlock_bh(txq);
-}
-
-static void mlx5e_close_sq(struct mlx5e_sq *sq)
-{
-	clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
-	/* prevent netif_tx_wake_queue */
-	napi_synchronize(&sq->channel->napi);
-
-	if (sq->txq) {
-		netif_tx_disable_queue(sq->txq);
-
-		/* last doorbell out, godspeed .. */
-		if (mlx5e_sq_has_room_for(sq, 1)) {
-			sq->db.txq.skb[(sq->pc & sq->wq.sz_m1)] = NULL;
-			mlx5e_send_nop(sq, true);
-		}
-	}
-
-	mlx5e_disable_sq(sq);
-	mlx5e_free_sq_descs(sq);
-	mlx5e_destroy_sq(sq);
-}
-
-static int mlx5e_create_cq(struct mlx5e_channel *c,
-			   struct mlx5e_cq_param *param,
-			   struct mlx5e_cq *cq)
-{
-	struct mlx5e_priv *priv = c->priv;
-	struct mlx5_core_dev *mdev = priv->mdev;
-	struct mlx5_core_cq *mcq = &cq->mcq;
-	int eqn_not_used;
-	unsigned int irqn;
-	int err;
-	u32 i;
-
-	param->wq.buf_numa_node = cpu_to_node(c->cpu);
-	param->wq.db_numa_node  = cpu_to_node(c->cpu);
-	param->eq_ix   = c->ix;
-
-	err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
-			       &cq->wq_ctrl);
-	if (err)
-		return err;
-
-	mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
-
-	cq->napi        = &c->napi;
-
-	mcq->cqe_sz     = 64;
-	mcq->set_ci_db  = cq->wq_ctrl.db.db;
-	mcq->arm_db     = cq->wq_ctrl.db.db + 1;
-	*mcq->set_ci_db = 0;
-	*mcq->arm_db    = 0;
-	mcq->vector     = param->eq_ix;
-	mcq->comp       = mlx5e_completion_event;
-	mcq->event      = mlx5e_cq_error_event;
-	mcq->irqn       = irqn;
-
-	for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
-		struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
-
-		cqe->op_own = 0xf1;
-	}
-
-	cq->channel = c;
-	cq->priv = priv;
-
-	return 0;
-}
-
-static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
-{
-	mlx5_cqwq_destroy(&cq->wq_ctrl);
-}
-
-static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
-{
-	struct mlx5e_priv *priv = cq->priv;
-	struct mlx5_core_dev *mdev = priv->mdev;
-	struct mlx5_core_cq *mcq = &cq->mcq;
-
-	void *in;
-	void *cqc;
-	int inlen;
-	unsigned int irqn_not_used;
-	int eqn;
-	int err;
-
-	inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
-		sizeof(u64) * cq->wq_ctrl.frag_buf.npages;
-	in = mlx5_vzalloc(inlen);
-	if (!in)
-		return -ENOMEM;
-
-	cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
-
-	memcpy(cqc, param->cqc, sizeof(param->cqc));
-
-	mlx5_fill_page_frag_array(&cq->wq_ctrl.frag_buf,
-				  (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
-
-	mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
-
-	MLX5_SET(cqc,   cqc, cq_period_mode, param->cq_period_mode);
-	MLX5_SET(cqc,   cqc, c_eqn,         eqn);
-	MLX5_SET(cqc,   cqc, uar_page,      mdev->priv.uar->index);
-	MLX5_SET(cqc,   cqc, log_page_size, cq->wq_ctrl.frag_buf.page_shift -
-					    MLX5_ADAPTER_PAGE_SHIFT);
-	MLX5_SET64(cqc, cqc, dbr_addr,      cq->wq_ctrl.db.dma);
-
-	err = mlx5_core_create_cq(mdev, mcq, in, inlen);
-
-	kvfree(in);
-
-	if (err)
-		return err;
-
-	mlx5e_cq_arm(cq);
-
-	return 0;
-}
-
-static void mlx5e_disable_cq(struct mlx5e_cq *cq)
-{
-	struct mlx5e_priv *priv = cq->priv;
-	struct mlx5_core_dev *mdev = priv->mdev;
-
-	mlx5_core_destroy_cq(mdev, &cq->mcq);
-}
-
-static int mlx5e_open_cq(struct mlx5e_channel *c,
-			 struct mlx5e_cq_param *param,
-			 struct mlx5e_cq *cq,
-			 struct mlx5e_cq_moder moderation)
-{
-	int err;
-	struct mlx5e_priv *priv = c->priv;
-	struct mlx5_core_dev *mdev = priv->mdev;
-
-	err = mlx5e_create_cq(c, param, cq);
-	if (err)
-		return err;
-
-	err = mlx5e_enable_cq(cq, param);
-	if (err)
-		goto err_destroy_cq;
-
-	if (MLX5_CAP_GEN(mdev, cq_moderation))
-		mlx5_core_modify_cq_moderation(mdev, &cq->mcq,
-					       moderation.usec,
-					       moderation.pkts);
-	return 0;
-
-err_destroy_cq:
-	mlx5e_destroy_cq(cq);
-
-	return err;
-}
-
-static void mlx5e_close_cq(struct mlx5e_cq *cq)
-{
-	mlx5e_disable_cq(cq);
-	mlx5e_destroy_cq(cq);
-}
-
-static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix)
-{
-	return cpumask_first(priv->mdev->priv.irq_info[ix].mask);
-}
-
-static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
-			     struct mlx5e_channel_param *cparam)
-{
-	struct mlx5e_priv *priv = c->priv;
-	int err;
-	int tc;
-
-	for (tc = 0; tc < c->num_tc; tc++) {
-		err = mlx5e_open_cq(c, &cparam->tx_cq, &c->sq[tc].cq,
-				    priv->params.tx_cq_moderation);
-		if (err)
-			goto err_close_tx_cqs;
-	}
-
-	return 0;
-
-err_close_tx_cqs:
-	for (tc--; tc >= 0; tc--)
-		mlx5e_close_cq(&c->sq[tc].cq);
-
-	return err;
-}
-
-static void mlx5e_close_tx_cqs(struct mlx5e_channel *c)
-{
-	int tc;
-
-	for (tc = 0; tc < c->num_tc; tc++)
-		mlx5e_close_cq(&c->sq[tc].cq);
-}
-
-static int mlx5e_open_sqs(struct mlx5e_channel *c,
-			  struct mlx5e_channel_param *cparam)
-{
-	int err;
-	int tc;
-
-	for (tc = 0; tc < c->num_tc; tc++) {
-		err = mlx5e_open_sq(c, tc, &cparam->sq, &c->sq[tc]);
-		if (err)
-			goto err_close_sqs;
-	}
-
-	return 0;
-
-err_close_sqs:
-	for (tc--; tc >= 0; tc--)
-		mlx5e_close_sq(&c->sq[tc]);
-
-	return err;
-}
-
-static void mlx5e_close_sqs(struct mlx5e_channel *c)
-{
-	int tc;
-
-	for (tc = 0; tc < c->num_tc; tc++)
-		mlx5e_close_sq(&c->sq[tc]);
-}
-
-static void mlx5e_build_channeltc_to_txq_map(struct mlx5e_priv *priv, int ix)
-{
-	int i;
-
-	for (i = 0; i < priv->profile->max_tc; i++)
-		priv->channeltc_to_txq_map[ix][i] =
-			ix + i * priv->params.num_channels;
-}
-
-static int mlx5e_set_sq_maxrate(struct net_device *dev,
-				struct mlx5e_sq *sq, u32 rate)
-{
-	struct mlx5e_priv *priv = netdev_priv(dev);
-	struct mlx5_core_dev *mdev = priv->mdev;
-	u16 rl_index = 0;
-	int err;
-
-	if (rate == sq->rate_limit)
-		/* nothing to do */
-		return 0;
-
-	if (sq->rate_limit)
-		/* remove current rl index to free space to next ones */
-		mlx5_rl_remove_rate(mdev, sq->rate_limit);
-
-	sq->rate_limit = 0;
-
-	if (rate) {
-		err = mlx5_rl_add_rate(mdev, rate, &rl_index);
-		if (err) {
-			netdev_err(dev, "Failed configuring rate %u: %d\n",
-				   rate, err);
-			return err;
-		}
-	}
-
-	err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY,
-			      MLX5_SQC_STATE_RDY, true, rl_index);
-	if (err) {
-		netdev_err(dev, "Failed configuring rate %u: %d\n",
-			   rate, err);
-		/* remove the rate from the table */
-		if (rate)
-			mlx5_rl_remove_rate(mdev, rate);
-		return err;
-	}
-
-	sq->rate_limit = rate;
-	return 0;
-}
-
-static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
-{
-	struct mlx5e_priv *priv = netdev_priv(dev);
-	struct mlx5_core_dev *mdev = priv->mdev;
-	struct mlx5e_sq *sq = priv->txq_to_sq_map[index];
-	int err = 0;
-
-	if (!mlx5_rl_is_supported(mdev)) {
-		netdev_err(dev, "Rate limiting is not supported on this device\n");
-		return -EINVAL;
-	}
-
-	/* rate is given in Mb/sec, HW config is in Kb/sec */
-	rate = rate << 10;
-
-	/* Check whether rate in valid range, 0 is always valid */
-	if (rate && !mlx5_rl_is_in_range(mdev, rate)) {
-		netdev_err(dev, "TX rate %u, is not in range\n", rate);
-		return -ERANGE;
-	}
-
-	mutex_lock(&priv->state_lock);
-	if (test_bit(MLX5E_STATE_OPENED, &priv->state))
-		err = mlx5e_set_sq_maxrate(dev, sq, rate);
-	if (!err)
-		priv->tx_rates[index] = rate;
-	mutex_unlock(&priv->state_lock);
-
-	return err;
-}
-
-static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
-			      struct mlx5e_channel_param *cparam,
-			      struct mlx5e_channel **cp)
-{
-	struct mlx5e_cq_moder icosq_cq_moder = {0, 0};
-	struct net_device *netdev = priv->netdev;
-	struct mlx5e_cq_moder rx_cq_profile;
-	int cpu = mlx5e_get_cpu(priv, ix);
-	struct mlx5e_channel *c;
-	struct mlx5e_sq *sq;
-	int err;
-	int i;
-
-	c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
-	if (!c)
-		return -ENOMEM;
-
-	c->priv     = priv;
-	c->ix       = ix;
-	c->cpu      = cpu;
-	c->pdev     = &priv->mdev->pdev->dev;
-	c->netdev   = priv->netdev;
-	c->mkey_be  = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
-	c->num_tc   = priv->params.num_tc;
-	c->xdp      = !!priv->xdp_prog;
-
-	if (priv->params.rx_am_enabled)
-		rx_cq_profile = mlx5e_am_get_def_profile(priv->params.rx_cq_period_mode);
-	else
-		rx_cq_profile = priv->params.rx_cq_moderation;
-
-	mlx5e_build_channeltc_to_txq_map(priv, ix);
-
-	netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
-
-	err = mlx5e_open_cq(c, &cparam->icosq_cq, &c->icosq.cq, icosq_cq_moder);
-	if (err)
-		goto err_napi_del;
-
-	err = mlx5e_open_tx_cqs(c, cparam);
-	if (err)
-		goto err_close_icosq_cq;
-
-	err = mlx5e_open_cq(c, &cparam->rx_cq, &c->rq.cq,
-			    rx_cq_profile);
-	if (err)
-		goto err_close_tx_cqs;
-
-	/* XDP SQ CQ params are same as normal TXQ sq CQ params */
-	err = c->xdp ? mlx5e_open_cq(c, &cparam->tx_cq, &c->xdp_sq.cq,
-				     priv->params.tx_cq_moderation) : 0;
-	if (err)
-		goto err_close_rx_cq;
-
-	napi_enable(&c->napi);
-
-	err = mlx5e_open_sq(c, 0, &cparam->icosq, &c->icosq);
-	if (err)
-		goto err_disable_napi;
-
-	err = mlx5e_open_sqs(c, cparam);
-	if (err)
-		goto err_close_icosq;
-
-	for (i = 0; i < priv->params.num_tc; i++) {
-		u32 txq_ix = priv->channeltc_to_txq_map[ix][i];
-
-		if (priv->tx_rates[txq_ix]) {
-			sq = priv->txq_to_sq_map[txq_ix];
-			mlx5e_set_sq_maxrate(priv->netdev, sq,
-					     priv->tx_rates[txq_ix]);
-		}
-	}
-
-	err = c->xdp ? mlx5e_open_sq(c, 0, &cparam->xdp_sq, &c->xdp_sq) : 0;
-	if (err)
-		goto err_close_sqs;
-
-	err = mlx5e_open_rq(c, &cparam->rq, &c->rq);
-	if (err)
-		goto err_close_xdp_sq;
-
-	netif_set_xps_queue(netdev, get_cpu_mask(c->cpu), ix);
-	*cp = c;
-
-	return 0;
-err_close_xdp_sq:
-	if (c->xdp)
-		mlx5e_close_sq(&c->xdp_sq);
-
-err_close_sqs:
-	mlx5e_close_sqs(c);
-
-err_close_icosq:
-	mlx5e_close_sq(&c->icosq);
-
-err_disable_napi:
-	napi_disable(&c->napi);
-	if (c->xdp)
-		mlx5e_close_cq(&c->xdp_sq.cq);
-
-err_close_rx_cq:
-	mlx5e_close_cq(&c->rq.cq);
-
-err_close_tx_cqs:
-	mlx5e_close_tx_cqs(c);
-
-err_close_icosq_cq:
-	mlx5e_close_cq(&c->icosq.cq);
-
-err_napi_del:
-	netif_napi_del(&c->napi);
-	kfree(c);
-
-	return err;
-}
-
-static void mlx5e_close_channel(struct mlx5e_channel *c)
-{
-	mlx5e_close_rq(&c->rq);
-	if (c->xdp)
-		mlx5e_close_sq(&c->xdp_sq);
-	mlx5e_close_sqs(c);
-	mlx5e_close_sq(&c->icosq);
-	napi_disable(&c->napi);
-	if (c->xdp)
-		mlx5e_close_cq(&c->xdp_sq.cq);
-	mlx5e_close_cq(&c->rq.cq);
-	mlx5e_close_tx_cqs(c);
-	mlx5e_close_cq(&c->icosq.cq);
-	netif_napi_del(&c->napi);
-
-	kfree(c);
-}
-
-static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
-				 struct mlx5e_rq_param *param)
-{
-	void *rqc = param->rqc;
-	void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
-
-	switch (priv->params.rq_wq_type) {
-	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
-		MLX5_SET(wq, wq, log_wqe_num_of_strides,
-			 priv->params.mpwqe_log_num_strides - 9);
-		MLX5_SET(wq, wq, log_wqe_stride_size,
-			 priv->params.mpwqe_log_stride_sz - 6);
-		MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ);
-		break;
-	default: /* MLX5_WQ_TYPE_LINKED_LIST */
-		MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
-	}
-
-	MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
-	MLX5_SET(wq, wq, log_wq_stride,    ilog2(sizeof(struct mlx5e_rx_wqe)));
-	MLX5_SET(wq, wq, log_wq_sz,        priv->params.log_rq_size);
-	MLX5_SET(wq, wq, pd,               priv->mdev->mlx5e_res.pdn);
-	MLX5_SET(rqc, rqc, counter_set_id, priv->q_counter);
-
-	param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
-	param->wq.linear = 1;
-
-	param->am_enabled = priv->params.rx_am_enabled;
-}
-
-static void mlx5e_build_drop_rq_param(struct mlx5e_rq_param *param)
-{
-	void *rqc = param->rqc;
-	void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
-
-	MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
-	MLX5_SET(wq, wq, log_wq_stride,    ilog2(sizeof(struct mlx5e_rx_wqe)));
-}
-
-static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
-					struct mlx5e_sq_param *param)
-{
-	void *sqc = param->sqc;
-	void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
-
-	MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
-	MLX5_SET(wq, wq, pd,            priv->mdev->mlx5e_res.pdn);
-
-	param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
-}
-
-static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
-				 struct mlx5e_sq_param *param)
-{
-	void *sqc = param->sqc;
-	void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
-
-	mlx5e_build_sq_param_common(priv, param);
-	MLX5_SET(wq, wq, log_wq_sz,     priv->params.log_sq_size);
-
-	param->max_inline = priv->params.tx_max_inline;
-	param->min_inline_mode = priv->params.tx_min_inline_mode;
-	param->type = MLX5E_SQ_TXQ;
-}
-
-static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
-					struct mlx5e_cq_param *param)
-{
-	void *cqc = param->cqc;
-
-	MLX5_SET(cqc, cqc, uar_page, priv->mdev->priv.uar->index);
-}
-
-static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
-				    struct mlx5e_cq_param *param)
-{
-	void *cqc = param->cqc;
-	u8 log_cq_size;
-
-	switch (priv->params.rq_wq_type) {
-	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
-		log_cq_size = priv->params.log_rq_size +
-			priv->params.mpwqe_log_num_strides;
-		break;
-	default: /* MLX5_WQ_TYPE_LINKED_LIST */
-		log_cq_size = priv->params.log_rq_size;
-	}
-
-	MLX5_SET(cqc, cqc, log_cq_size, log_cq_size);
-	if (MLX5E_GET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
-		MLX5_SET(cqc, cqc, mini_cqe_res_format, MLX5_CQE_FORMAT_CSUM);
-		MLX5_SET(cqc, cqc, cqe_comp_en, 1);
-	}
-
-	mlx5e_build_common_cq_param(priv, param);
-
-	param->cq_period_mode = priv->params.rx_cq_period_mode;
-}
-
-static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
-				    struct mlx5e_cq_param *param)
-{
-	void *cqc = param->cqc;
-
-	MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size);
-
-	mlx5e_build_common_cq_param(priv, param);
-
-	param->cq_period_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
-}
-
-static void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv,
-				     struct mlx5e_cq_param *param,
-				     u8 log_wq_size)
-{
-	void *cqc = param->cqc;
-
-	MLX5_SET(cqc, cqc, log_cq_size, log_wq_size);
-
-	mlx5e_build_common_cq_param(priv, param);
-
-	param->cq_period_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
-}
-
-static void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
-				    struct mlx5e_sq_param *param,
-				    u8 log_wq_size)
-{
-	void *sqc = param->sqc;
-	void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
-
-	mlx5e_build_sq_param_common(priv, param);
-
-	MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
-	MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(priv->mdev, reg_umr_sq));
-
-	param->type = MLX5E_SQ_ICO;
-}
-
-static void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv,
-				    struct mlx5e_sq_param *param)
-{
-	void *sqc = param->sqc;
-	void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
-
-	mlx5e_build_sq_param_common(priv, param);
-	MLX5_SET(wq, wq, log_wq_sz,     priv->params.log_sq_size);
-
-	param->max_inline = priv->params.tx_max_inline;
-	/* FOR XDP SQs will support only L2 inline mode */
-	param->min_inline_mode = MLX5_INLINE_MODE_NONE;
-	param->type = MLX5E_SQ_XDP;
-}
-
-static void mlx5e_build_channel_param(struct mlx5e_priv *priv, struct mlx5e_channel_param *cparam)
-{
-	u8 icosq_log_wq_sz = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
-
-	mlx5e_build_rq_param(priv, &cparam->rq);
-	mlx5e_build_sq_param(priv, &cparam->sq);
-	mlx5e_build_xdpsq_param(priv, &cparam->xdp_sq);
-	mlx5e_build_icosq_param(priv, &cparam->icosq, icosq_log_wq_sz);
-	mlx5e_build_rx_cq_param(priv, &cparam->rx_cq);
-	mlx5e_build_tx_cq_param(priv, &cparam->tx_cq);
-	mlx5e_build_ico_cq_param(priv, &cparam->icosq_cq, icosq_log_wq_sz);
-}
-
-static int mlx5e_open_channels(struct mlx5e_priv *priv)
-{
-	struct mlx5e_channel_param *cparam;
-	int nch = priv->params.num_channels;
-	int err = -ENOMEM;
-	int i;
-	int j;
-
-	priv->channel = kcalloc(nch, sizeof(struct mlx5e_channel *),
-				GFP_KERNEL);
-
-	priv->txq_to_sq_map = kcalloc(nch * priv->params.num_tc,
-				      sizeof(struct mlx5e_sq *), GFP_KERNEL);
-
-	cparam = kzalloc(sizeof(struct mlx5e_channel_param), GFP_KERNEL);
-
-	if (!priv->channel || !priv->txq_to_sq_map || !cparam)
-		goto err_free_txq_to_sq_map;
-
-	mlx5e_build_channel_param(priv, cparam);
-
-	for (i = 0; i < nch; i++) {
-		err = mlx5e_open_channel(priv, i, cparam, &priv->channel[i]);
-		if (err)
-			goto err_close_channels;
-	}
-
-	for (j = 0; j < nch; j++) {
-		err = mlx5e_wait_for_min_rx_wqes(&priv->channel[j]->rq);
-		if (err)
-			goto err_close_channels;
-	}
-
-	/* FIXME: This is a W/A for tx timeout watch dog false alarm when
-	 * polling for inactive tx queues.
-	 */
-	netif_tx_start_all_queues(priv->netdev);
-
-	kfree(cparam);
-	return 0;
-
-err_close_channels:
-	for (i--; i >= 0; i--)
-		mlx5e_close_channel(priv->channel[i]);
-
-err_free_txq_to_sq_map:
-	kfree(priv->txq_to_sq_map);
-	kfree(priv->channel);
-	kfree(cparam);
-
-	return err;
-}
-
-static void mlx5e_close_channels(struct mlx5e_priv *priv)
-{
-	int i;
-
-	/* FIXME: This is a W/A only for tx timeout watch dog false alarm when
-	 * polling for inactive tx queues.
-	 */
-	netif_tx_stop_all_queues(priv->netdev);
-	netif_tx_disable(priv->netdev);
-
-	for (i = 0; i < priv->params.num_channels; i++)
-		mlx5e_close_channel(priv->channel[i]);
-
-	kfree(priv->txq_to_sq_map);
-	kfree(priv->channel);
-}
-
-static int mlx5e_rx_hash_fn(int hfunc)
-{
-	return (hfunc == ETH_RSS_HASH_TOP) ?
-	       MLX5_RX_HASH_FN_TOEPLITZ :
-	       MLX5_RX_HASH_FN_INVERTED_XOR8;
-}
-
-static int mlx5e_bits_invert(unsigned long a, int size)
-{
-	int inv = 0;
-	int i;
-
-	for (i = 0; i < size; i++)
-		inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i;
-
-	return inv;
-}
-
-static void mlx5e_fill_indir_rqt_rqns(struct mlx5e_priv *priv, void *rqtc)
-{
-	int i;
-
-	for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) {
-		int ix = i;
-		u32 rqn;
-
-		if (priv->params.rss_hfunc == ETH_RSS_HASH_XOR)
-			ix = mlx5e_bits_invert(i, MLX5E_LOG_INDIR_RQT_SIZE);
-
-		ix = priv->params.indirection_rqt[ix];
-		rqn = test_bit(MLX5E_STATE_OPENED, &priv->state) ?
-				priv->channel[ix]->rq.rqn :
-				priv->drop_rq.rqn;
-		MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
-	}
-}
-
-static void mlx5e_fill_direct_rqt_rqn(struct mlx5e_priv *priv, void *rqtc,
-				      int ix)
-{
-	u32 rqn = test_bit(MLX5E_STATE_OPENED, &priv->state) ?
-			priv->channel[ix]->rq.rqn :
-			priv->drop_rq.rqn;
-
-	MLX5_SET(rqtc, rqtc, rq_num[0], rqn);
-}
-
-static int mlx5e_create_rqt(struct mlx5e_priv *priv, int sz,
-			    int ix, struct mlx5e_rqt *rqt)
-{
-	struct mlx5_core_dev *mdev = priv->mdev;
-	void *rqtc;
-	int inlen;
-	int err;
-	u32 *in;
-
-	inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
-	in = mlx5_vzalloc(inlen);
-	if (!in)
-		return -ENOMEM;
-
-	rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
-
-	MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
-	MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
-
-	if (sz > 1) /* RSS */
-		mlx5e_fill_indir_rqt_rqns(priv, rqtc);
-	else
-		mlx5e_fill_direct_rqt_rqn(priv, rqtc, ix);
-
-	err = mlx5_core_create_rqt(mdev, in, inlen, &rqt->rqtn);
-	if (!err)
-		rqt->enabled = true;
-
-	kvfree(in);
-	return err;
-}
-
-void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt)
-{
-	rqt->enabled = false;
-	mlx5_core_destroy_rqt(priv->mdev, rqt->rqtn);
-}
-
-static int mlx5e_create_indirect_rqts(struct mlx5e_priv *priv)
-{
-	struct mlx5e_rqt *rqt = &priv->indir_rqt;
-
-	return mlx5e_create_rqt(priv, MLX5E_INDIR_RQT_SIZE, 0, rqt);
-}
-
-int mlx5e_create_direct_rqts(struct mlx5e_priv *priv)
-{
-	struct mlx5e_rqt *rqt;
-	int err;
-	int ix;
-
-	for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
-		rqt = &priv->direct_tir[ix].rqt;
-		err = mlx5e_create_rqt(priv, 1 /*size */, ix, rqt);
-		if (err)
-			goto err_destroy_rqts;
-	}
-
-	return 0;
-
-err_destroy_rqts:
-	for (ix--; ix >= 0; ix--)
-		mlx5e_destroy_rqt(priv, &priv->direct_tir[ix].rqt);
-
-	return err;
-}
-
-int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix)
-{
-	struct mlx5_core_dev *mdev = priv->mdev;
-	void *rqtc;
-	int inlen;
-	u32 *in;
-	int err;
-
-	inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * sz;
-	in = mlx5_vzalloc(inlen);
-	if (!in)
-		return -ENOMEM;
-
-	rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx);
-
-	MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
-	if (sz > 1) /* RSS */
-		mlx5e_fill_indir_rqt_rqns(priv, rqtc);
-	else
-		mlx5e_fill_direct_rqt_rqn(priv, rqtc, ix);
-
-	MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1);
-
-	err = mlx5_core_modify_rqt(mdev, rqtn, in, inlen);
-
-	kvfree(in);
-
-	return err;
-}
-
-static void mlx5e_redirect_rqts(struct mlx5e_priv *priv)
-{
-	u32 rqtn;
-	int ix;
-
-	if (priv->indir_rqt.enabled) {
-		rqtn = priv->indir_rqt.rqtn;
-		mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0);
-	}
-
-	for (ix = 0; ix < priv->params.num_channels; ix++) {
-		if (!priv->direct_tir[ix].rqt.enabled)
-			continue;
-		rqtn = priv->direct_tir[ix].rqt.rqtn;
-		mlx5e_redirect_rqt(priv, rqtn, 1, ix);
-	}
-}
-
-static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv)
-{
-	if (!priv->params.lro_en)
-		return;
-
-#define ROUGH_MAX_L2_L3_HDR_SZ 256
-
-	MLX5_SET(tirc, tirc, lro_enable_mask,
-		 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
-		 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
-	MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
-		 (priv->params.lro_wqe_sz -
-		  ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
-	MLX5_SET(tirc, tirc, lro_timeout_period_usecs, priv->params.lro_timeout);
-}
-
-void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv)
-{
-	MLX5_SET(tirc, tirc, rx_hash_fn,
-		 mlx5e_rx_hash_fn(priv->params.rss_hfunc));
-	if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) {
-		void *rss_key = MLX5_ADDR_OF(tirc, tirc,
-					     rx_hash_toeplitz_key);
-		size_t len = MLX5_FLD_SZ_BYTES(tirc,
-					       rx_hash_toeplitz_key);
-
-		MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
-		memcpy(rss_key, priv->params.toeplitz_hash_key, len);
-	}
-}
-
-static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
-{
-	struct mlx5_core_dev *mdev = priv->mdev;
-
-	void *in;
-	void *tirc;
-	int inlen;
-	int err;
-	int tt;
-	int ix;
-
-	inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
-	in = mlx5_vzalloc(inlen);
-	if (!in)
-		return -ENOMEM;
-
-	MLX5_SET(modify_tir_in, in, bitmask.lro, 1);
-	tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
-
-	mlx5e_build_tir_ctx_lro(tirc, priv);
-
-	for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
-		err = mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in,
-					   inlen);
-		if (err)
-			goto free_in;
-	}
-
-	for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
-		err = mlx5_core_modify_tir(mdev, priv->direct_tir[ix].tirn,
-					   in, inlen);
-		if (err)
-			goto free_in;
-	}
-
-free_in:
-	kvfree(in);
-
-	return err;
-}
-
-static int mlx5e_set_mtu(struct mlx5e_priv *priv, u16 mtu)
-{
-	struct mlx5_core_dev *mdev = priv->mdev;
-	u16 hw_mtu = MLX5E_SW2HW_MTU(mtu);
-	int err;
-
-	err = mlx5_set_port_mtu(mdev, hw_mtu, 1);
-	if (err)
-		return err;
-
-	/* Update vport context MTU */
-	mlx5_modify_nic_vport_mtu(mdev, hw_mtu);
-	return 0;
-}
-
-static void mlx5e_query_mtu(struct mlx5e_priv *priv, u16 *mtu)
-{
-	struct mlx5_core_dev *mdev = priv->mdev;
-	u16 hw_mtu = 0;
-	int err;
-
-	err = mlx5_query_nic_vport_mtu(mdev, &hw_mtu);
-	if (err || !hw_mtu) /* fallback to port oper mtu */
-		mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
-
-	*mtu = MLX5E_HW2SW_MTU(hw_mtu);
-}
-
-static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
-{
-	struct mlx5e_priv *priv = netdev_priv(netdev);
-	u16 mtu;
-	int err;
-
-	err = mlx5e_set_mtu(priv, netdev->mtu);
-	if (err)
-		return err;
-
-	mlx5e_query_mtu(priv, &mtu);
-	if (mtu != netdev->mtu)
-		netdev_warn(netdev, "%s: VPort MTU %d is different than netdev mtu %d\n",
-			    __func__, mtu, netdev->mtu);
-
-	netdev->mtu = mtu;
-	return 0;
-}
-
-static void mlx5e_netdev_set_tcs(struct net_device *netdev)
-{
-	struct mlx5e_priv *priv = netdev_priv(netdev);
-	int nch = priv->params.num_channels;
-	int ntc = priv->params.num_tc;
-	int tc;
-
-	netdev_reset_tc(netdev);
-
-	if (ntc == 1)
-		return;
-
-	netdev_set_num_tc(netdev, ntc);
-
-	/* Map netdev TCs to offset 0
-	 * We have our own UP to TXQ mapping for QoS
-	 */
-	for (tc = 0; tc < ntc; tc++)
-		netdev_set_tc_queue(netdev, tc, nch, 0);
-}
-
-int mlx5e_open_locked(struct net_device *netdev)
-{
-	struct mlx5e_priv *priv = netdev_priv(netdev);
-	struct mlx5_core_dev *mdev = priv->mdev;
-	int num_txqs;
-	int err;
-
-	set_bit(MLX5E_STATE_OPENED, &priv->state);
-
-	mlx5e_netdev_set_tcs(netdev);
-
-	num_txqs = priv->params.num_channels * priv->params.num_tc;
-	netif_set_real_num_tx_queues(netdev, num_txqs);
-	netif_set_real_num_rx_queues(netdev, priv->params.num_channels);
-
-	err = mlx5e_open_channels(priv);
-	if (err) {
-		netdev_err(netdev, "%s: mlx5e_open_channels failed, %d\n",
-			   __func__, err);
-		goto err_clear_state_opened_flag;
-	}
-
-	err = mlx5e_refresh_tirs_self_loopback(priv->mdev, false);
-	if (err) {
-		netdev_err(netdev, "%s: mlx5e_refresh_tirs_self_loopback_enable failed, %d\n",
-			   __func__, err);
-		goto err_close_channels;
-	}
-
-	mlx5e_redirect_rqts(priv);
-	mlx5e_update_carrier(priv);
-	mlx5e_timestamp_init(priv);
-#ifdef CONFIG_RFS_ACCEL
-	priv->netdev->rx_cpu_rmap = priv->mdev->rmap;
-#endif
-	if (priv->profile->update_stats)
-		queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
-
-	if (MLX5_CAP_GEN(mdev, vport_group_manager)) {
-		err = mlx5e_add_sqs_fwd_rules(priv);
-		if (err)
-			goto err_close_channels;
-	}
-	return 0;
-
-err_close_channels:
-	mlx5e_close_channels(priv);
-err_clear_state_opened_flag:
-	clear_bit(MLX5E_STATE_OPENED, &priv->state);
-	return err;
-}
-
-int mlx5e_open(struct net_device *netdev)
-{
-	struct mlx5e_priv *priv = netdev_priv(netdev);
-	int err;
-
-	mutex_lock(&priv->state_lock);
-	err = mlx5e_open_locked(netdev);
-	mutex_unlock(&priv->state_lock);
-
-	return err;
-}
-
-int mlx5e_close_locked(struct net_device *netdev)
-{
-	struct mlx5e_priv *priv = netdev_priv(netdev);
-	struct mlx5_core_dev *mdev = priv->mdev;
-
-	/* May already be CLOSED in case a previous configuration operation
-	 * (e.g RX/TX queue size change) that involves close&open failed.
-	 */
-	if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
-		return 0;
-
-	clear_bit(MLX5E_STATE_OPENED, &priv->state);
-
-	if (MLX5_CAP_GEN(mdev, vport_group_manager))
-		mlx5e_remove_sqs_fwd_rules(priv);
-
-	mlx5e_timestamp_cleanup(priv);
-	netif_carrier_off(priv->netdev);
-	mlx5e_redirect_rqts(priv);
-	mlx5e_close_channels(priv);
-
-	return 0;
-}
-
-int mlx5e_close(struct net_device *netdev)
-{
-	struct mlx5e_priv *priv = netdev_priv(netdev);
-	int err;
-
-	if (!netif_device_present(netdev))
-		return -ENODEV;
-
-	mutex_lock(&priv->state_lock);
-	err = mlx5e_close_locked(netdev);
-	mutex_unlock(&priv->state_lock);
-
-	return err;
-}
-
-static int mlx5e_create_drop_rq(struct mlx5e_priv *priv,
-				struct mlx5e_rq *rq,
-				struct mlx5e_rq_param *param)
-{
-	struct mlx5_core_dev *mdev = priv->mdev;
-	void *rqc = param->rqc;
-	void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
-	int err;
-
-	param->wq.db_numa_node = param->wq.buf_numa_node;
-
-	err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
-				&rq->wq_ctrl);
-	if (err)
-		return err;
-
-	rq->priv = priv;
-
-	return 0;
-}
-
-static int mlx5e_create_drop_cq(struct mlx5e_priv *priv,
-				struct mlx5e_cq *cq,
-				struct mlx5e_cq_param *param)
-{
-	struct mlx5_core_dev *mdev = priv->mdev;
-	struct mlx5_core_cq *mcq = &cq->mcq;
-	int eqn_not_used;
-	unsigned int irqn;
-	int err;
-
-	err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
-			       &cq->wq_ctrl);
-	if (err)
-		return err;
-
-	mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
-
-	mcq->cqe_sz     = 64;
-	mcq->set_ci_db  = cq->wq_ctrl.db.db;
-	mcq->arm_db     = cq->wq_ctrl.db.db + 1;
-	*mcq->set_ci_db = 0;
-	*mcq->arm_db    = 0;
-	mcq->vector     = param->eq_ix;
-	mcq->comp       = mlx5e_completion_event;
-	mcq->event      = mlx5e_cq_error_event;
-	mcq->irqn       = irqn;
-
-	cq->priv = priv;
-
-	return 0;
-}
-
-static int mlx5e_open_drop_rq(struct mlx5e_priv *priv)
-{
-	struct mlx5e_cq_param cq_param;
-	struct mlx5e_rq_param rq_param;
-	struct mlx5e_rq *rq = &priv->drop_rq;
-	struct mlx5e_cq *cq = &priv->drop_rq.cq;
-	int err;
-
-	memset(&cq_param, 0, sizeof(cq_param));
-	memset(&rq_param, 0, sizeof(rq_param));
-	mlx5e_build_drop_rq_param(&rq_param);
-
-	err = mlx5e_create_drop_cq(priv, cq, &cq_param);
-	if (err)
-		return err;
-
-	err = mlx5e_enable_cq(cq, &cq_param);
-	if (err)
-		goto err_destroy_cq;
-
-	err = mlx5e_create_drop_rq(priv, rq, &rq_param);
-	if (err)
-		goto err_disable_cq;
-
-	err = mlx5e_enable_rq(rq, &rq_param);
-	if (err)
-		goto err_destroy_rq;
-
-	return 0;
-
-err_destroy_rq:
-	mlx5e_destroy_rq(&priv->drop_rq);
-
-err_disable_cq:
-	mlx5e_disable_cq(&priv->drop_rq.cq);
-
-err_destroy_cq:
-	mlx5e_destroy_cq(&priv->drop_rq.cq);
-
-	return err;
-}
-
-static void mlx5e_close_drop_rq(struct mlx5e_priv *priv)
-{
-	mlx5e_disable_rq(&priv->drop_rq);
-	mlx5e_destroy_rq(&priv->drop_rq);
-	mlx5e_disable_cq(&priv->drop_rq.cq);
-	mlx5e_destroy_cq(&priv->drop_rq.cq);
-}
-
-static int mlx5e_create_tis(struct mlx5e_priv *priv, int tc)
-{
-	struct mlx5_core_dev *mdev = priv->mdev;
-	u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {0};
-	void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
-
-	MLX5_SET(tisc, tisc, prio, tc << 1);
-	MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.td.tdn);
-
-	if (mlx5_lag_is_lacp_owner(mdev))
-		MLX5_SET(tisc, tisc, strict_lag_tx_port_affinity, 1);
-
-	return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]);
-}
-
-static void mlx5e_destroy_tis(struct mlx5e_priv *priv, int tc)
-{
-	mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]);
-}
-
-int mlx5e_create_tises(struct mlx5e_priv *priv)
-{
-	int err;
-	int tc;
-
-	for (tc = 0; tc < priv->profile->max_tc; tc++) {
-		err = mlx5e_create_tis(priv, tc);
-		if (err)
-			goto err_close_tises;
-	}
-
-	return 0;
-
-err_close_tises:
-	for (tc--; tc >= 0; tc--)
-		mlx5e_destroy_tis(priv, tc);
-
-	return err;
-}
-
-void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
-{
-	int tc;
-
-	for (tc = 0; tc < priv->profile->max_tc; tc++)
-		mlx5e_destroy_tis(priv, tc);
-}
-
-static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
-				      enum mlx5e_traffic_types tt)
-{
-	void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
-
-	MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
-
-#define MLX5_HASH_IP            (MLX5_HASH_FIELD_SEL_SRC_IP   |\
-				 MLX5_HASH_FIELD_SEL_DST_IP)
-
-#define MLX5_HASH_IP_L4PORTS    (MLX5_HASH_FIELD_SEL_SRC_IP   |\
-				 MLX5_HASH_FIELD_SEL_DST_IP   |\
-				 MLX5_HASH_FIELD_SEL_L4_SPORT |\
-				 MLX5_HASH_FIELD_SEL_L4_DPORT)
-
-#define MLX5_HASH_IP_IPSEC_SPI  (MLX5_HASH_FIELD_SEL_SRC_IP   |\
-				 MLX5_HASH_FIELD_SEL_DST_IP   |\
-				 MLX5_HASH_FIELD_SEL_IPSEC_SPI)
-
-	mlx5e_build_tir_ctx_lro(tirc, priv);
-
-	MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
-	MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
-	mlx5e_build_tir_ctx_hash(tirc, priv);
-
-	switch (tt) {
-	case MLX5E_TT_IPV4_TCP:
-		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-			 MLX5_L3_PROT_TYPE_IPV4);
-		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
-			 MLX5_L4_PROT_TYPE_TCP);
-		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-			 MLX5_HASH_IP_L4PORTS);
-		break;
-
-	case MLX5E_TT_IPV6_TCP:
-		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-			 MLX5_L3_PROT_TYPE_IPV6);
-		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
-			 MLX5_L4_PROT_TYPE_TCP);
-		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-			 MLX5_HASH_IP_L4PORTS);
-		break;
-
-	case MLX5E_TT_IPV4_UDP:
-		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-			 MLX5_L3_PROT_TYPE_IPV4);
-		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
-			 MLX5_L4_PROT_TYPE_UDP);
-		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-			 MLX5_HASH_IP_L4PORTS);
-		break;
-
-	case MLX5E_TT_IPV6_UDP:
-		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-			 MLX5_L3_PROT_TYPE_IPV6);
-		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
-			 MLX5_L4_PROT_TYPE_UDP);
-		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-			 MLX5_HASH_IP_L4PORTS);
-		break;
-
-	case MLX5E_TT_IPV4_IPSEC_AH:
-		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-			 MLX5_L3_PROT_TYPE_IPV4);
-		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-			 MLX5_HASH_IP_IPSEC_SPI);
-		break;
-
-	case MLX5E_TT_IPV6_IPSEC_AH:
-		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-			 MLX5_L3_PROT_TYPE_IPV6);
-		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-			 MLX5_HASH_IP_IPSEC_SPI);
-		break;
-
-	case MLX5E_TT_IPV4_IPSEC_ESP:
-		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-			 MLX5_L3_PROT_TYPE_IPV4);
-		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-			 MLX5_HASH_IP_IPSEC_SPI);
-		break;
-
-	case MLX5E_TT_IPV6_IPSEC_ESP:
-		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-			 MLX5_L3_PROT_TYPE_IPV6);
-		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-			 MLX5_HASH_IP_IPSEC_SPI);
-		break;
-
-	case MLX5E_TT_IPV4:
-		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-			 MLX5_L3_PROT_TYPE_IPV4);
-		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-			 MLX5_HASH_IP);
-		break;
-
-	case MLX5E_TT_IPV6:
-		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-			 MLX5_L3_PROT_TYPE_IPV6);
-		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-			 MLX5_HASH_IP);
-		break;
-	default:
-		WARN_ONCE(true,
-			  "mlx5e_build_indir_tir_ctx: bad traffic type!\n");
-	}
-}
-
-static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
-				       u32 rqtn)
-{
-	MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
-
-	mlx5e_build_tir_ctx_lro(tirc, priv);
-
-	MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
-	MLX5_SET(tirc, tirc, indirect_table, rqtn);
-	MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
-}
-
-static int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv)
-{
-	struct mlx5e_tir *tir;
-	void *tirc;
-	int inlen;
-	int err;
-	u32 *in;
-	int tt;
-
-	inlen = MLX5_ST_SZ_BYTES(create_tir_in);
-	in = mlx5_vzalloc(inlen);
-	if (!in)
-		return -ENOMEM;
-
-	for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
-		memset(in, 0, inlen);
-		tir = &priv->indir_tir[tt];
-		tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
-		mlx5e_build_indir_tir_ctx(priv, tirc, tt);
-		err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
-		if (err)
-			goto err_destroy_tirs;
-	}
-
-	kvfree(in);
-
-	return 0;
-
-err_destroy_tirs:
-	for (tt--; tt >= 0; tt--)
-		mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[tt]);
-
-	kvfree(in);
-
-	return err;
-}
-
-int mlx5e_create_direct_tirs(struct mlx5e_priv *priv)
-{
-	int nch = priv->profile->max_nch(priv->mdev);
-	struct mlx5e_tir *tir;
-	void *tirc;
-	int inlen;
-	int err;
-	u32 *in;
-	int ix;
-
-	inlen = MLX5_ST_SZ_BYTES(create_tir_in);
-	in = mlx5_vzalloc(inlen);
-	if (!in)
-		return -ENOMEM;
-
-	for (ix = 0; ix < nch; ix++) {
-		memset(in, 0, inlen);
-		tir = &priv->direct_tir[ix];
-		tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
-		mlx5e_build_direct_tir_ctx(priv, tirc,
-					   priv->direct_tir[ix].rqt.rqtn);
-		err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
-		if (err)
-			goto err_destroy_ch_tirs;
-	}
-
-	kvfree(in);
-
-	return 0;
-
-err_destroy_ch_tirs:
-	for (ix--; ix >= 0; ix--)
-		mlx5e_destroy_tir(priv->mdev, &priv->direct_tir[ix]);
-
-	kvfree(in);
-
-	return err;
-}
-
-static void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv)
-{
-	int i;
-
-	for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
-		mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[i]);
-}
-
-void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv)
-{
-	int nch = priv->profile->max_nch(priv->mdev);
-	int i;
-
-	for (i = 0; i < nch; i++)
-		mlx5e_destroy_tir(priv->mdev, &priv->direct_tir[i]);
-}
-
-int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd)
-{
-	int err = 0;
-	int i;
-
-	if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
-		return 0;
-
-	for (i = 0; i < priv->params.num_channels; i++) {
-		err = mlx5e_modify_rq_vsd(&priv->channel[i]->rq, vsd);
-		if (err)
-			return err;
-	}
-
-	return 0;
-}
-
-static int mlx5e_setup_tc(struct net_device *netdev, u8 tc)
-{
-	struct mlx5e_priv *priv = netdev_priv(netdev);
-	bool was_opened;
-	int err = 0;
-
-	if (tc && tc != MLX5E_MAX_NUM_TC)
-		return -EINVAL;
-
-	mutex_lock(&priv->state_lock);
-
-	was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
-	if (was_opened)
-		mlx5e_close_locked(priv->netdev);
-
-	priv->params.num_tc = tc ? tc : 1;
-
-	if (was_opened)
-		err = mlx5e_open_locked(priv->netdev);
-
-	mutex_unlock(&priv->state_lock);
-
-	return err;
-}
-
-static int mlx5e_ndo_setup_tc(struct net_device *dev, u32 handle,
-			      __be16 proto, struct tc_to_netdev *tc)
-{
-	struct mlx5e_priv *priv = netdev_priv(dev);
-
-	if (TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS))
-		goto mqprio;
-
-	switch (tc->type) {
-	case TC_SETUP_CLSFLOWER:
-		switch (tc->cls_flower->command) {
-		case TC_CLSFLOWER_REPLACE:
-			return mlx5e_configure_flower(priv, proto, tc->cls_flower);
-		case TC_CLSFLOWER_DESTROY:
-			return mlx5e_delete_flower(priv, tc->cls_flower);
-		case TC_CLSFLOWER_STATS:
-			return mlx5e_stats_flower(priv, tc->cls_flower);
-		}
-	default:
-		return -EOPNOTSUPP;
-	}
-
-mqprio:
-	if (tc->type != TC_SETUP_MQPRIO)
-		return -EINVAL;
-
-	return mlx5e_setup_tc(dev, tc->tc);
-}
-
-static struct rtnl_link_stats64 *
-mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
-{
-	struct mlx5e_priv *priv = netdev_priv(dev);
-	struct mlx5e_sw_stats *sstats = &priv->stats.sw;
-	struct mlx5e_vport_stats *vstats = &priv->stats.vport;
-	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
-
-	if (mlx5e_is_uplink_rep(priv)) {
-		stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok);
-		stats->rx_bytes   = PPORT_802_3_GET(pstats, a_octets_received_ok);
-		stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok);
-		stats->tx_bytes   = PPORT_802_3_GET(pstats, a_octets_transmitted_ok);
-	} else {
-		stats->rx_packets = sstats->rx_packets;
-		stats->rx_bytes   = sstats->rx_bytes;
-		stats->tx_packets = sstats->tx_packets;
-		stats->tx_bytes   = sstats->tx_bytes;
-		stats->tx_dropped = sstats->tx_queue_dropped;
-	}
-
-	stats->rx_dropped = priv->stats.qcnt.rx_out_of_buffer;
-
-	stats->rx_length_errors =
-		PPORT_802_3_GET(pstats, a_in_range_length_errors) +
-		PPORT_802_3_GET(pstats, a_out_of_range_length_field) +
-		PPORT_802_3_GET(pstats, a_frame_too_long_errors);
-	stats->rx_crc_errors =
-		PPORT_802_3_GET(pstats, a_frame_check_sequence_errors);
-	stats->rx_frame_errors = PPORT_802_3_GET(pstats, a_alignment_errors);
-	stats->tx_aborted_errors = PPORT_2863_GET(pstats, if_out_discards);
-	stats->tx_carrier_errors =
-		PPORT_802_3_GET(pstats, a_symbol_error_during_carrier);
-	stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors +
-			   stats->rx_frame_errors;
-	stats->tx_errors = stats->tx_aborted_errors + stats->tx_carrier_errors;
-
-	/* vport multicast also counts packets that are dropped due to steering
-	 * or rx out of buffer
-	 */
-	stats->multicast =
-		VPORT_COUNTER_GET(vstats, received_eth_multicast.packets);
-
-	return stats;
-}
-
-static void mlx5e_set_rx_mode(struct net_device *dev)
-{
-	struct mlx5e_priv *priv = netdev_priv(dev);
-
-	queue_work(priv->wq, &priv->set_rx_mode_work);
-}
-
-static int mlx5e_set_mac(struct net_device *netdev, void *addr)
-{
-	struct mlx5e_priv *priv = netdev_priv(netdev);
-	struct sockaddr *saddr = addr;
-
-	if (!is_valid_ether_addr(saddr->sa_data))
-		return -EADDRNOTAVAIL;
-
-	netif_addr_lock_bh(netdev);
-	ether_addr_copy(netdev->dev_addr, saddr->sa_data);
-	netif_addr_unlock_bh(netdev);
-
-	queue_work(priv->wq, &priv->set_rx_mode_work);
-
-	return 0;
-}
-
-#define MLX5E_SET_FEATURE(netdev, feature, enable)	\
-	do {						\
-		if (enable)				\
-			netdev->features |= feature;	\
-		else					\
-			netdev->features &= ~feature;	\
-	} while (0)
-
-typedef int (*mlx5e_feature_handler)(struct net_device *netdev, bool enable);
-
-static int set_feature_lro(struct net_device *netdev, bool enable)
-{
-	struct mlx5e_priv *priv = netdev_priv(netdev);
-	bool was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
-	int err;
-
-	mutex_lock(&priv->state_lock);
-
-	if (was_opened && (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST))
-		mlx5e_close_locked(priv->netdev);
-
-	priv->params.lro_en = enable;
-	err = mlx5e_modify_tirs_lro(priv);
-	if (err) {
-		netdev_err(netdev, "lro modify failed, %d\n", err);
-		priv->params.lro_en = !enable;
-	}
-
-	if (was_opened && (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST))
-		mlx5e_open_locked(priv->netdev);
-
-	mutex_unlock(&priv->state_lock);
-
-	return err;
-}
-
-static int set_feature_vlan_filter(struct net_device *netdev, bool enable)
-{
-	struct mlx5e_priv *priv = netdev_priv(netdev);
-
-	if (enable)
-		mlx5e_enable_vlan_filter(priv);
-	else
-		mlx5e_disable_vlan_filter(priv);
-
-	return 0;
-}
-
-static int set_feature_tc_num_filters(struct net_device *netdev, bool enable)
-{
-	struct mlx5e_priv *priv = netdev_priv(netdev);
-
-	if (!enable && mlx5e_tc_num_filters(priv)) {
-		netdev_err(netdev,
-			   "Active offloaded tc filters, can't turn hw_tc_offload off\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static int set_feature_rx_all(struct net_device *netdev, bool enable)
-{
-	struct mlx5e_priv *priv = netdev_priv(netdev);
-	struct mlx5_core_dev *mdev = priv->mdev;
-
-	return mlx5_set_port_fcs(mdev, !enable);
-}
-
-static int set_feature_rx_vlan(struct net_device *netdev, bool enable)
-{
-	struct mlx5e_priv *priv = netdev_priv(netdev);
-	int err;
-
-	mutex_lock(&priv->state_lock);
-
-	priv->params.vlan_strip_disable = !enable;
-	err = mlx5e_modify_rqs_vsd(priv, !enable);
-	if (err)
-		priv->params.vlan_strip_disable = enable;
-
-	mutex_unlock(&priv->state_lock);
-
-	return err;
-}
-
-#ifdef CONFIG_RFS_ACCEL
-static int set_feature_arfs(struct net_device *netdev, bool enable)
-{
-	struct mlx5e_priv *priv = netdev_priv(netdev);
-	int err;
-
-	if (enable)
-		err = mlx5e_arfs_enable(priv);
-	else
-		err = mlx5e_arfs_disable(priv);
-
-	return err;
-}
-#endif
-
-static int mlx5e_handle_feature(struct net_device *netdev,
-				netdev_features_t wanted_features,
-				netdev_features_t feature,
-				mlx5e_feature_handler feature_handler)
-{
-	netdev_features_t changes = wanted_features ^ netdev->features;
-	bool enable = !!(wanted_features & feature);
-	int err;
-
-	if (!(changes & feature))
-		return 0;
-
-	err = feature_handler(netdev, enable);
-	if (err) {
-		netdev_err(netdev, "%s feature 0x%llx failed err %d\n",
-			   enable ? "Enable" : "Disable", feature, err);
-		return err;
-	}
-
-	MLX5E_SET_FEATURE(netdev, feature, enable);
-	return 0;
-}
-
-static int mlx5e_set_features(struct net_device *netdev,
-			      netdev_features_t features)
-{
-	int err;
-
-	err  = mlx5e_handle_feature(netdev, features, NETIF_F_LRO,
-				    set_feature_lro);
-	err |= mlx5e_handle_feature(netdev, features,
-				    NETIF_F_HW_VLAN_CTAG_FILTER,
-				    set_feature_vlan_filter);
-	err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_TC,
-				    set_feature_tc_num_filters);
-	err |= mlx5e_handle_feature(netdev, features, NETIF_F_RXALL,
-				    set_feature_rx_all);
-	err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_VLAN_CTAG_RX,
-				    set_feature_rx_vlan);
-#ifdef CONFIG_RFS_ACCEL
-	err |= mlx5e_handle_feature(netdev, features, NETIF_F_NTUPLE,
-				    set_feature_arfs);
-#endif
-
-	return err ? -EINVAL : 0;
-}
-
-static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
-{
-	struct mlx5e_priv *priv = netdev_priv(netdev);
-	bool was_opened;
-	int err = 0;
-	bool reset;
-
-	mutex_lock(&priv->state_lock);
-
-	reset = !priv->params.lro_en &&
-		(priv->params.rq_wq_type !=
-		 MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ);
-
-	was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
-	if (was_opened && reset)
-		mlx5e_close_locked(netdev);
-
-	netdev->mtu = new_mtu;
-	mlx5e_set_dev_port_mtu(netdev);
-
-	if (was_opened && reset)
-		err = mlx5e_open_locked(netdev);
-
-	mutex_unlock(&priv->state_lock);
-
-	return err;
-}
-
-static int mlx5e_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
-	switch (cmd) {
-	case SIOCSHWTSTAMP:
-		return mlx5e_hwstamp_set(dev, ifr);
-	case SIOCGHWTSTAMP:
-		return mlx5e_hwstamp_get(dev, ifr);
-	default:
-		return -EOPNOTSUPP;
-	}
-}
-
-static int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
-{
-	struct mlx5e_priv *priv = netdev_priv(dev);
-	struct mlx5_core_dev *mdev = priv->mdev;
-
-	return mlx5_eswitch_set_vport_mac(mdev->priv.eswitch, vf + 1, mac);
-}
-
-static int mlx5e_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos,
-			     __be16 vlan_proto)
-{
-	struct mlx5e_priv *priv = netdev_priv(dev);
-	struct mlx5_core_dev *mdev = priv->mdev;
-
-	if (vlan_proto != htons(ETH_P_8021Q))
-		return -EPROTONOSUPPORT;
-
-	return mlx5_eswitch_set_vport_vlan(mdev->priv.eswitch, vf + 1,
-					   vlan, qos);
-}
-
-static int mlx5e_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
-{
-	struct mlx5e_priv *priv = netdev_priv(dev);
-	struct mlx5_core_dev *mdev = priv->mdev;
-
-	return mlx5_eswitch_set_vport_spoofchk(mdev->priv.eswitch, vf + 1, setting);
-}
-
-static int mlx5e_set_vf_trust(struct net_device *dev, int vf, bool setting)
-{
-	struct mlx5e_priv *priv = netdev_priv(dev);
-	struct mlx5_core_dev *mdev = priv->mdev;
-
-	return mlx5_eswitch_set_vport_trust(mdev->priv.eswitch, vf + 1, setting);
-}
-
-static int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
-			     int max_tx_rate)
-{
-	struct mlx5e_priv *priv = netdev_priv(dev);
-	struct mlx5_core_dev *mdev = priv->mdev;
-
-	if (min_tx_rate)
-		return -EOPNOTSUPP;
-
-	return mlx5_eswitch_set_vport_rate(mdev->priv.eswitch, vf + 1,
-					   max_tx_rate);
-}
-
-static int mlx5_vport_link2ifla(u8 esw_link)
-{
-	switch (esw_link) {
-	case MLX5_ESW_VPORT_ADMIN_STATE_DOWN:
-		return IFLA_VF_LINK_STATE_DISABLE;
-	case MLX5_ESW_VPORT_ADMIN_STATE_UP:
-		return IFLA_VF_LINK_STATE_ENABLE;
-	}
-	return IFLA_VF_LINK_STATE_AUTO;
-}
-
-static int mlx5_ifla_link2vport(u8 ifla_link)
-{
-	switch (ifla_link) {
-	case IFLA_VF_LINK_STATE_DISABLE:
-		return MLX5_ESW_VPORT_ADMIN_STATE_DOWN;
-	case IFLA_VF_LINK_STATE_ENABLE:
-		return MLX5_ESW_VPORT_ADMIN_STATE_UP;
-	}
-	return MLX5_ESW_VPORT_ADMIN_STATE_AUTO;
-}
-
-static int mlx5e_set_vf_link_state(struct net_device *dev, int vf,
-				   int link_state)
-{
-	struct mlx5e_priv *priv = netdev_priv(dev);
-	struct mlx5_core_dev *mdev = priv->mdev;
-
-	return mlx5_eswitch_set_vport_state(mdev->priv.eswitch, vf + 1,
-					    mlx5_ifla_link2vport(link_state));
-}
-
-static int mlx5e_get_vf_config(struct net_device *dev,
-			       int vf, struct ifla_vf_info *ivi)
-{
-	struct mlx5e_priv *priv = netdev_priv(dev);
-	struct mlx5_core_dev *mdev = priv->mdev;
-	int err;
-
-	err = mlx5_eswitch_get_vport_config(mdev->priv.eswitch, vf + 1, ivi);
-	if (err)
-		return err;
-	ivi->linkstate = mlx5_vport_link2ifla(ivi->linkstate);
-	return 0;
-}
-
-static int mlx5e_get_vf_stats(struct net_device *dev,
-			      int vf, struct ifla_vf_stats *vf_stats)
-{
-	struct mlx5e_priv *priv = netdev_priv(dev);
-	struct mlx5_core_dev *mdev = priv->mdev;
-
-	return mlx5_eswitch_get_vport_stats(mdev->priv.eswitch, vf + 1,
-					    vf_stats);
-}
-
-void mlx5e_add_vxlan_port(struct net_device *netdev,
-			  struct udp_tunnel_info *ti)
-{
-	struct mlx5e_priv *priv = netdev_priv(netdev);
-
-	if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
-		return;
-
-	if (!mlx5e_vxlan_allowed(priv->mdev))
-		return;
-
-	mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 1);
-}
-
-void mlx5e_del_vxlan_port(struct net_device *netdev,
-			  struct udp_tunnel_info *ti)
-{
-	struct mlx5e_priv *priv = netdev_priv(netdev);
-
-	if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
-		return;
-
-	if (!mlx5e_vxlan_allowed(priv->mdev))
-		return;
-
-	mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 0);
-}
-
-static netdev_features_t mlx5e_vxlan_features_check(struct mlx5e_priv *priv,
-						    struct sk_buff *skb,
-						    netdev_features_t features)
-{
-	struct udphdr *udph;
-	u16 proto;
-	u16 port = 0;
-
-	switch (vlan_get_protocol(skb)) {
-	case htons(ETH_P_IP):
-		proto = ip_hdr(skb)->protocol;
-		break;
-	case htons(ETH_P_IPV6):
-		proto = ipv6_hdr(skb)->nexthdr;
-		break;
-	default:
-		goto out;
-	}
-
-	if (proto == IPPROTO_UDP) {
-		udph = udp_hdr(skb);
-		port = be16_to_cpu(udph->dest);
-	}
-
-	/* Verify if UDP port is being offloaded by HW */
-	if (port && mlx5e_vxlan_lookup_port(priv, port))
-		return features;
-
-out:
-	/* Disable CSUM and GSO if the udp dport is not offloaded by HW */
-	return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
-}
-
-static netdev_features_t mlx5e_features_check(struct sk_buff *skb,
-					      struct net_device *netdev,
-					      netdev_features_t features)
-{
-	struct mlx5e_priv *priv = netdev_priv(netdev);
-
-	features = vlan_features_check(skb, features);
-	features = vxlan_features_check(skb, features);
-
-	/* Validate if the tunneled packet is being offloaded by HW */
-	if (skb->encapsulation &&
-	    (features & NETIF_F_CSUM_MASK || features & NETIF_F_GSO_MASK))
-		return mlx5e_vxlan_features_check(priv, skb, features);
-
-	return features;
-}
-
-static void mlx5e_tx_timeout(struct net_device *dev)
-{
-	struct mlx5e_priv *priv = netdev_priv(dev);
-	bool sched_work = false;
-	int i;
-
-	netdev_err(dev, "TX timeout detected\n");
-
-	for (i = 0; i < priv->params.num_channels * priv->params.num_tc; i++) {
-		struct mlx5e_sq *sq = priv->txq_to_sq_map[i];
-
-		if (!netif_xmit_stopped(netdev_get_tx_queue(dev, i)))
-			continue;
-		sched_work = true;
-		clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
-		netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x\n",
-			   i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc);
-	}
-
-	if (sched_work && test_bit(MLX5E_STATE_OPENED, &priv->state))
-		schedule_work(&priv->tx_timeout_work);
-}
-
-static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
-{
-	struct mlx5e_priv *priv = netdev_priv(netdev);
-	struct bpf_prog *old_prog;
-	int err = 0;
-	bool reset, was_opened;
-	int i;
-
-	if (prog && prog->xdp_adjust_head) {
-		netdev_err(netdev, "Does not support bpf_xdp_adjust_head()\n");
-		return -EOPNOTSUPP;
-	}
-
-	mutex_lock(&priv->state_lock);
-
-	if ((netdev->features & NETIF_F_LRO) && prog) {
-		netdev_warn(netdev, "can't set XDP while LRO is on, disable LRO first\n");
-		err = -EINVAL;
-		goto unlock;
-	}
-
-	was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
-	/* no need for full reset when exchanging programs */
-	reset = (!priv->xdp_prog || !prog);
-
-	if (was_opened && reset)
-		mlx5e_close_locked(netdev);
-	if (was_opened && !reset) {
-		/* num_channels is invariant here, so we can take the
-		 * batched reference right upfront.
-		 */
-		prog = bpf_prog_add(prog, priv->params.num_channels);
-		if (IS_ERR(prog)) {
-			err = PTR_ERR(prog);
-			goto unlock;
-		}
-	}
-
-	/* exchange programs, extra prog reference we got from caller
-	 * as long as we don't fail from this point onwards.
-	 */
-	old_prog = xchg(&priv->xdp_prog, prog);
-	if (old_prog)
-		bpf_prog_put(old_prog);
-
-	if (reset) /* change RQ type according to priv->xdp_prog */
-		mlx5e_set_rq_priv_params(priv);
-
-	if (was_opened && reset)
-		mlx5e_open_locked(netdev);
-
-	if (!test_bit(MLX5E_STATE_OPENED, &priv->state) || reset)
-		goto unlock;
-
-	/* exchanging programs w/o reset, we update ref counts on behalf
-	 * of the channels RQs here.
-	 */
-	for (i = 0; i < priv->params.num_channels; i++) {
-		struct mlx5e_channel *c = priv->channel[i];
-
-		clear_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state);
-		napi_synchronize(&c->napi);
-		/* prevent mlx5e_poll_rx_cq from accessing rq->xdp_prog */
-
-		old_prog = xchg(&c->rq.xdp_prog, prog);
-
-		set_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state);
-		/* napi_schedule in case we have missed anything */
-		set_bit(MLX5E_CHANNEL_NAPI_SCHED, &c->flags);
-		napi_schedule(&c->napi);
-
-		if (old_prog)
-			bpf_prog_put(old_prog);
-	}
-
-unlock:
-	mutex_unlock(&priv->state_lock);
-	return err;
-}
-
-static bool mlx5e_xdp_attached(struct net_device *dev)
-{
-	struct mlx5e_priv *priv = netdev_priv(dev);
-
-	return !!priv->xdp_prog;
-}
-
-static int mlx5e_xdp(struct net_device *dev, struct netdev_xdp *xdp)
-{
-	switch (xdp->command) {
-	case XDP_SETUP_PROG:
-		return mlx5e_xdp_set(dev, xdp->prog);
-	case XDP_QUERY_PROG:
-		xdp->prog_attached = mlx5e_xdp_attached(dev);
-		return 0;
-	default:
-		return -EINVAL;
-	}
-}
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/* Fake "interrupt" called by netpoll (eg netconsole) to send skbs without
- * reenabling interrupts.
- */
-static void mlx5e_netpoll(struct net_device *dev)
-{
-	struct mlx5e_priv *priv = netdev_priv(dev);
-	int i;
-
-	for (i = 0; i < priv->params.num_channels; i++)
-		napi_schedule(&priv->channel[i]->napi);
-}
-#endif
-
-static const struct net_device_ops mlx5e_netdev_ops_basic = {
-	.ndo_open                = mlx5e_open,
-	.ndo_stop                = mlx5e_close,
-	.ndo_start_xmit          = mlx5e_xmit,
-	.ndo_setup_tc            = mlx5e_ndo_setup_tc,
-	.ndo_select_queue        = mlx5e_select_queue,
-	.ndo_get_stats64         = mlx5e_get_stats,
-	.ndo_set_rx_mode         = mlx5e_set_rx_mode,
-	.ndo_set_mac_address     = mlx5e_set_mac,
-	.ndo_vlan_rx_add_vid     = mlx5e_vlan_rx_add_vid,
-	.ndo_vlan_rx_kill_vid    = mlx5e_vlan_rx_kill_vid,
-	.ndo_set_features        = mlx5e_set_features,
-	.ndo_change_mtu          = mlx5e_change_mtu,
-	.ndo_do_ioctl            = mlx5e_ioctl,
-	.ndo_set_tx_maxrate      = mlx5e_set_tx_maxrate,
-#ifdef CONFIG_RFS_ACCEL
-	.ndo_rx_flow_steer	 = mlx5e_rx_flow_steer,
-#endif
-	.ndo_tx_timeout          = mlx5e_tx_timeout,
-	.ndo_xdp		 = mlx5e_xdp,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-	.ndo_poll_controller     = mlx5e_netpoll,
-#endif
-};
-
-static const struct net_device_ops mlx5e_netdev_ops_sriov = {
-	.ndo_open                = mlx5e_open,
-	.ndo_stop                = mlx5e_close,
-	.ndo_start_xmit          = mlx5e_xmit,
-	.ndo_setup_tc            = mlx5e_ndo_setup_tc,
-	.ndo_select_queue        = mlx5e_select_queue,
-	.ndo_get_stats64         = mlx5e_get_stats,
-	.ndo_set_rx_mode         = mlx5e_set_rx_mode,
-	.ndo_set_mac_address     = mlx5e_set_mac,
-	.ndo_vlan_rx_add_vid     = mlx5e_vlan_rx_add_vid,
-	.ndo_vlan_rx_kill_vid    = mlx5e_vlan_rx_kill_vid,
-	.ndo_set_features        = mlx5e_set_features,
-	.ndo_change_mtu          = mlx5e_change_mtu,
-	.ndo_do_ioctl            = mlx5e_ioctl,
-	.ndo_udp_tunnel_add	 = mlx5e_add_vxlan_port,
-	.ndo_udp_tunnel_del	 = mlx5e_del_vxlan_port,
-	.ndo_set_tx_maxrate      = mlx5e_set_tx_maxrate,
-	.ndo_features_check      = mlx5e_features_check,
-#ifdef CONFIG_RFS_ACCEL
-	.ndo_rx_flow_steer	 = mlx5e_rx_flow_steer,
-#endif
-	.ndo_set_vf_mac          = mlx5e_set_vf_mac,
-	.ndo_set_vf_vlan         = mlx5e_set_vf_vlan,
-	.ndo_set_vf_spoofchk     = mlx5e_set_vf_spoofchk,
-	.ndo_set_vf_trust        = mlx5e_set_vf_trust,
-	.ndo_set_vf_rate         = mlx5e_set_vf_rate,
-	.ndo_get_vf_config       = mlx5e_get_vf_config,
-	.ndo_set_vf_link_state   = mlx5e_set_vf_link_state,
-	.ndo_get_vf_stats        = mlx5e_get_vf_stats,
-	.ndo_tx_timeout          = mlx5e_tx_timeout,
-	.ndo_xdp		 = mlx5e_xdp,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-	.ndo_poll_controller     = mlx5e_netpoll,
-#endif
-	.ndo_has_offload_stats	 = mlx5e_has_offload_stats,
-	.ndo_get_offload_stats	 = mlx5e_get_offload_stats,
-};
-
-static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
-{
-	if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
-		return -ENOTSUPP;
-	if (!MLX5_CAP_GEN(mdev, eth_net_offloads) ||
-	    !MLX5_CAP_GEN(mdev, nic_flow_table) ||
-	    !MLX5_CAP_ETH(mdev, csum_cap) ||
-	    !MLX5_CAP_ETH(mdev, max_lso_cap) ||
-	    !MLX5_CAP_ETH(mdev, vlan_cap) ||
-	    !MLX5_CAP_ETH(mdev, rss_ind_tbl_cap) ||
-	    MLX5_CAP_FLOWTABLE(mdev,
-			       flow_table_properties_nic_receive.max_ft_level)
-			       < 3) {
-		mlx5_core_warn(mdev,
-			       "Not creating net device, some required device capabilities are missing\n");
-		return -ENOTSUPP;
-	}
-	if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
-		mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");
-	if (!MLX5_CAP_GEN(mdev, cq_moderation))
-		mlx5_core_warn(mdev, "CQ modiration is not supported\n");
-
-	return 0;
-}
-
-u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev)
-{
-	int bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
-
-	return bf_buf_size -
-	       sizeof(struct mlx5e_tx_wqe) +
-	       2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/;
-}
-
-void mlx5e_build_default_indir_rqt(struct mlx5_core_dev *mdev,
-				   u32 *indirection_rqt, int len,
-				   int num_channels)
-{
-	int node = mdev->priv.numa_node;
-	int node_num_of_cores;
-	int i;
-
-	if (node == -1)
-		node = first_online_node;
-
-	node_num_of_cores = cpumask_weight(cpumask_of_node(node));
-
-	if (node_num_of_cores)
-		num_channels = min_t(int, num_channels, node_num_of_cores);
-
-	for (i = 0; i < len; i++)
-		indirection_rqt[i] = i % num_channels;
-}
-
-static int mlx5e_get_pci_bw(struct mlx5_core_dev *mdev, u32 *pci_bw)
-{
-	enum pcie_link_width width;
-	enum pci_bus_speed speed;
-	int err = 0;
-
-	err = pcie_get_minimum_link(mdev->pdev, &speed, &width);
-	if (err)
-		return err;
-
-	if (speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN)
-		return -EINVAL;
-
-	switch (speed) {
-	case PCIE_SPEED_2_5GT:
-		*pci_bw = 2500 * width;
-		break;
-	case PCIE_SPEED_5_0GT:
-		*pci_bw = 5000 * width;
-		break;
-	case PCIE_SPEED_8_0GT:
-		*pci_bw = 8000 * width;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static bool cqe_compress_heuristic(u32 link_speed, u32 pci_bw)
-{
-	return (link_speed && pci_bw &&
-		(pci_bw < 40000) && (pci_bw < link_speed));
-}
-
-void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
-{
-	params->rx_cq_period_mode = cq_period_mode;
-
-	params->rx_cq_moderation.pkts =
-		MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
-	params->rx_cq_moderation.usec =
-			MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
-
-	if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
-		params->rx_cq_moderation.usec =
-			MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE;
-}
-
-static void mlx5e_query_min_inline(struct mlx5_core_dev *mdev,
-				   u8 *min_inline_mode)
-{
-	switch (MLX5_CAP_ETH(mdev, wqe_inline_mode)) {
-	case MLX5_CAP_INLINE_MODE_L2:
-		*min_inline_mode = MLX5_INLINE_MODE_L2;
-		break;
-	case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
-		mlx5_query_nic_vport_min_inline(mdev, 0, min_inline_mode);
-		break;
-	case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
-		*min_inline_mode = MLX5_INLINE_MODE_NONE;
-		break;
-	}
-}
-
-u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
-{
-	int i;
-
-	/* The supported periods are organized in ascending order */
-	for (i = 0; i < MLX5E_LRO_TIMEOUT_ARR_SIZE - 1; i++)
-		if (MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]) >= wanted_timeout)
-			break;
-
-	return MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]);
-}
-
-static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
-					struct net_device *netdev,
-					const struct mlx5e_profile *profile,
-					void *ppriv)
-{
-	struct mlx5e_priv *priv = netdev_priv(netdev);
-	u32 link_speed = 0;
-	u32 pci_bw = 0;
-	u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
-					 MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
-					 MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
-
-	priv->mdev                         = mdev;
-	priv->netdev                       = netdev;
-	priv->params.num_channels          = profile->max_nch(mdev);
-	priv->profile                      = profile;
-	priv->ppriv                        = ppriv;
-
-	priv->params.lro_timeout =
-		mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
-
-	priv->params.log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
-
-	/* set CQE compression */
-	priv->params.rx_cqe_compress_def = false;
-	if (MLX5_CAP_GEN(mdev, cqe_compression) &&
-	    MLX5_CAP_GEN(mdev, vport_group_manager)) {
-		mlx5e_get_max_linkspeed(mdev, &link_speed);
-		mlx5e_get_pci_bw(mdev, &pci_bw);
-		mlx5_core_dbg(mdev, "Max link speed = %d, PCI BW = %d\n",
-			      link_speed, pci_bw);
-		priv->params.rx_cqe_compress_def =
-			cqe_compress_heuristic(link_speed, pci_bw);
-	}
-
-	mlx5e_set_rq_priv_params(priv);
-	if (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
-		priv->params.lro_en = true;
-
-	priv->params.rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
-	mlx5e_set_rx_cq_mode_params(&priv->params, cq_period_mode);
-
-	priv->params.tx_cq_moderation.usec =
-		MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
-	priv->params.tx_cq_moderation.pkts =
-		MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
-	priv->params.tx_max_inline         = mlx5e_get_max_inline_cap(mdev);
-	mlx5e_query_min_inline(mdev, &priv->params.tx_min_inline_mode);
-	priv->params.num_tc                = 1;
-	priv->params.rss_hfunc             = ETH_RSS_HASH_XOR;
-
-	netdev_rss_key_fill(priv->params.toeplitz_hash_key,
-			    sizeof(priv->params.toeplitz_hash_key));
-
-	mlx5e_build_default_indir_rqt(mdev, priv->params.indirection_rqt,
-				      MLX5E_INDIR_RQT_SIZE, profile->max_nch(mdev));
-
-	priv->params.lro_wqe_sz =
-		MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ -
-		/* Extra room needed for build_skb */
-		MLX5_RX_HEADROOM -
-		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
-
-	/* Initialize pflags */
-	MLX5E_SET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_BASED_MODER,
-			priv->params.rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
-	MLX5E_SET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS, priv->params.rx_cqe_compress_def);
-
-	mutex_init(&priv->state_lock);
-
-	INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
-	INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
-	INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work);
-	INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
-}
-
-static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
-{
-	struct mlx5e_priv *priv = netdev_priv(netdev);
-
-	mlx5_query_nic_vport_mac_address(priv->mdev, 0, netdev->dev_addr);
-	if (is_zero_ether_addr(netdev->dev_addr) &&
-	    !MLX5_CAP_GEN(priv->mdev, vport_group_manager)) {
-		eth_hw_addr_random(netdev);
-		mlx5_core_info(priv->mdev, "Assigned random MAC address %pM\n", netdev->dev_addr);
-	}
-}
-
-static const struct switchdev_ops mlx5e_switchdev_ops = {
-	.switchdev_port_attr_get	= mlx5e_attr_get,
-};
-
-static void mlx5e_build_nic_netdev(struct net_device *netdev)
-{
-	struct mlx5e_priv *priv = netdev_priv(netdev);
-	struct mlx5_core_dev *mdev = priv->mdev;
-	bool fcs_supported;
-	bool fcs_enabled;
-
-	SET_NETDEV_DEV(netdev, &mdev->pdev->dev);
-
-	if (MLX5_CAP_GEN(mdev, vport_group_manager)) {
-		netdev->netdev_ops = &mlx5e_netdev_ops_sriov;
-#ifdef CONFIG_MLX5_CORE_EN_DCB
-		if (MLX5_CAP_GEN(mdev, qos))
-			netdev->dcbnl_ops = &mlx5e_dcbnl_ops;
-#endif
-	} else {
-		netdev->netdev_ops = &mlx5e_netdev_ops_basic;
-	}
-
-	netdev->watchdog_timeo    = 15 * HZ;
-
-	netdev->ethtool_ops	  = &mlx5e_ethtool_ops;
-
-	netdev->vlan_features    |= NETIF_F_SG;
-	netdev->vlan_features    |= NETIF_F_IP_CSUM;
-	netdev->vlan_features    |= NETIF_F_IPV6_CSUM;
-	netdev->vlan_features    |= NETIF_F_GRO;
-	netdev->vlan_features    |= NETIF_F_TSO;
-	netdev->vlan_features    |= NETIF_F_TSO6;
-	netdev->vlan_features    |= NETIF_F_RXCSUM;
-	netdev->vlan_features    |= NETIF_F_RXHASH;
-
-	if (!!MLX5_CAP_ETH(mdev, lro_cap))
-		netdev->vlan_features    |= NETIF_F_LRO;
-
-	netdev->hw_features       = netdev->vlan_features;
-	netdev->hw_features      |= NETIF_F_HW_VLAN_CTAG_TX;
-	netdev->hw_features      |= NETIF_F_HW_VLAN_CTAG_RX;
-	netdev->hw_features      |= NETIF_F_HW_VLAN_CTAG_FILTER;
-
-	if (mlx5e_vxlan_allowed(mdev)) {
-		netdev->hw_features     |= NETIF_F_GSO_UDP_TUNNEL |
-					   NETIF_F_GSO_UDP_TUNNEL_CSUM |
-					   NETIF_F_GSO_PARTIAL;
-		netdev->hw_enc_features |= NETIF_F_IP_CSUM;
-		netdev->hw_enc_features |= NETIF_F_IPV6_CSUM;
-		netdev->hw_enc_features |= NETIF_F_TSO;
-		netdev->hw_enc_features |= NETIF_F_TSO6;
-		netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL;
-		netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM |
-					   NETIF_F_GSO_PARTIAL;
-		netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
-	}
-
-	mlx5_query_port_fcs(mdev, &fcs_supported, &fcs_enabled);
-
-	if (fcs_supported)
-		netdev->hw_features |= NETIF_F_RXALL;
-
-	netdev->features          = netdev->hw_features;
-	if (!priv->params.lro_en)
-		netdev->features  &= ~NETIF_F_LRO;
-
-	if (fcs_enabled)
-		netdev->features  &= ~NETIF_F_RXALL;
-
-#define FT_CAP(f) MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.f)
-	if (FT_CAP(flow_modify_en) &&
-	    FT_CAP(modify_root) &&
-	    FT_CAP(identified_miss_table_mode) &&
-	    FT_CAP(flow_table_modify)) {
-		netdev->hw_features      |= NETIF_F_HW_TC;
-#ifdef CONFIG_RFS_ACCEL
-		netdev->hw_features	 |= NETIF_F_NTUPLE;
-#endif
-	}
-
-	netdev->features         |= NETIF_F_HIGHDMA;
-
-	netdev->priv_flags       |= IFF_UNICAST_FLT;
-
-	mlx5e_set_netdev_dev_addr(netdev);
-
-#ifdef CONFIG_NET_SWITCHDEV
-	if (MLX5_CAP_GEN(mdev, vport_group_manager))
-		netdev->switchdev_ops = &mlx5e_switchdev_ops;
-#endif
-}
-
-static void mlx5e_create_q_counter(struct mlx5e_priv *priv)
-{
-	struct mlx5_core_dev *mdev = priv->mdev;
-	int err;
-
-	err = mlx5_core_alloc_q_counter(mdev, &priv->q_counter);
-	if (err) {
-		mlx5_core_warn(mdev, "alloc queue counter failed, %d\n", err);
-		priv->q_counter = 0;
-	}
-}
-
-static void mlx5e_destroy_q_counter(struct mlx5e_priv *priv)
-{
-	if (!priv->q_counter)
-		return;
-
-	mlx5_core_dealloc_q_counter(priv->mdev, priv->q_counter);
-}
-
-static void mlx5e_nic_init(struct mlx5_core_dev *mdev,
-			   struct net_device *netdev,
-			   const struct mlx5e_profile *profile,
-			   void *ppriv)
-{
-	struct mlx5e_priv *priv = netdev_priv(netdev);
-
-	mlx5e_build_nic_netdev_priv(mdev, netdev, profile, ppriv);
-	mlx5e_build_nic_netdev(netdev);
-	mlx5e_vxlan_init(priv);
-}
-
-static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
-{
-	struct mlx5_core_dev *mdev = priv->mdev;
-	struct mlx5_eswitch *esw = mdev->priv.eswitch;
-
-	mlx5e_vxlan_cleanup(priv);
-
-	if (MLX5_CAP_GEN(mdev, vport_group_manager))
-		mlx5_eswitch_unregister_vport_rep(esw, 0);
-
-	if (priv->xdp_prog)
-		bpf_prog_put(priv->xdp_prog);
-}
-
-static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
-{
-	struct mlx5_core_dev *mdev = priv->mdev;
-	int err;
-	int i;
-
-	err = mlx5e_create_indirect_rqts(priv);
-	if (err) {
-		mlx5_core_warn(mdev, "create indirect rqts failed, %d\n", err);
-		return err;
-	}
-
-	err = mlx5e_create_direct_rqts(priv);
-	if (err) {
-		mlx5_core_warn(mdev, "create direct rqts failed, %d\n", err);
-		goto err_destroy_indirect_rqts;
-	}
-
-	err = mlx5e_create_indirect_tirs(priv);
-	if (err) {
-		mlx5_core_warn(mdev, "create indirect tirs failed, %d\n", err);
-		goto err_destroy_direct_rqts;
-	}
-
-	err = mlx5e_create_direct_tirs(priv);
-	if (err) {
-		mlx5_core_warn(mdev, "create direct tirs failed, %d\n", err);
-		goto err_destroy_indirect_tirs;
-	}
-
-	err = mlx5e_create_flow_steering(priv);
-	if (err) {
-		mlx5_core_warn(mdev, "create flow steering failed, %d\n", err);
-		goto err_destroy_direct_tirs;
-	}
-
-	err = mlx5e_tc_init(priv);
-	if (err)
-		goto err_destroy_flow_steering;
-
-	return 0;
-
-err_destroy_flow_steering:
-	mlx5e_destroy_flow_steering(priv);
-err_destroy_direct_tirs:
-	mlx5e_destroy_direct_tirs(priv);
-err_destroy_indirect_tirs:
-	mlx5e_destroy_indirect_tirs(priv);
-err_destroy_direct_rqts:
-	for (i = 0; i < priv->profile->max_nch(mdev); i++)
-		mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt);
-err_destroy_indirect_rqts:
-	mlx5e_destroy_rqt(priv, &priv->indir_rqt);
-	return err;
-}
-
-static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
-{
-	int i;
-
-	mlx5e_tc_cleanup(priv);
-	mlx5e_destroy_flow_steering(priv);
-	mlx5e_destroy_direct_tirs(priv);
-	mlx5e_destroy_indirect_tirs(priv);
-	for (i = 0; i < priv->profile->max_nch(priv->mdev); i++)
-		mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt);
-	mlx5e_destroy_rqt(priv, &priv->indir_rqt);
-}
-
-static int mlx5e_init_nic_tx(struct mlx5e_priv *priv)
-{
-	int err;
-
-	err = mlx5e_create_tises(priv);
-	if (err) {
-		mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
-		return err;
-	}
-
-#ifdef CONFIG_MLX5_CORE_EN_DCB
-	mlx5e_dcbnl_initialize(priv);
-#endif
-	return 0;
-}
-
-static void mlx5e_nic_enable(struct mlx5e_priv *priv)
-{
-	struct net_device *netdev = priv->netdev;
-	struct mlx5_core_dev *mdev = priv->mdev;
-	struct mlx5_eswitch *esw = mdev->priv.eswitch;
-	struct mlx5_eswitch_rep rep;
-
-	mlx5_lag_add(mdev, netdev);
-
-	if (mlx5e_vxlan_allowed(mdev)) {
-		rtnl_lock();
-		udp_tunnel_get_rx_info(netdev);
-		rtnl_unlock();
-	}
-
-	mlx5e_enable_async_events(priv);
-	queue_work(priv->wq, &priv->set_rx_mode_work);
-
-	if (MLX5_CAP_GEN(mdev, vport_group_manager)) {
-		mlx5_query_nic_vport_mac_address(mdev, 0, rep.hw_id);
-		rep.load = mlx5e_nic_rep_load;
-		rep.unload = mlx5e_nic_rep_unload;
-		rep.vport = FDB_UPLINK_VPORT;
-		rep.netdev = netdev;
-		mlx5_eswitch_register_vport_rep(esw, 0, &rep);
-	}
-}
-
-static void mlx5e_nic_disable(struct mlx5e_priv *priv)
-{
-	queue_work(priv->wq, &priv->set_rx_mode_work);
-	mlx5e_disable_async_events(priv);
-	mlx5_lag_remove(priv->mdev);
-}
-
-static const struct mlx5e_profile mlx5e_nic_profile = {
-	.init		   = mlx5e_nic_init,
-	.cleanup	   = mlx5e_nic_cleanup,
-	.init_rx	   = mlx5e_init_nic_rx,
-	.cleanup_rx	   = mlx5e_cleanup_nic_rx,
-	.init_tx	   = mlx5e_init_nic_tx,
-	.cleanup_tx	   = mlx5e_cleanup_nic_tx,
-	.enable		   = mlx5e_nic_enable,
-	.disable	   = mlx5e_nic_disable,
-	.update_stats	   = mlx5e_update_stats,
-	.max_nch	   = mlx5e_get_max_num_channels,
-	.max_tc		   = MLX5E_MAX_NUM_TC,
-};
-
-struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
-				       const struct mlx5e_profile *profile,
-				       void *ppriv)
-{
-	int nch = profile->max_nch(mdev);
-	struct net_device *netdev;
-	struct mlx5e_priv *priv;
-
-	netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv),
-				    nch * profile->max_tc,
-				    nch);
-	if (!netdev) {
-		mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
-		return NULL;
-	}
-
-	profile->init(mdev, netdev, profile, ppriv);
-
-	netif_carrier_off(netdev);
-
-	priv = netdev_priv(netdev);
-
-	priv->wq = create_singlethread_workqueue("mlx5e");
-	if (!priv->wq)
-		goto err_cleanup_nic;
-
-	return netdev;
-
-err_cleanup_nic:
-	profile->cleanup(priv);
-	free_netdev(netdev);
-
-	return NULL;
-}
-
-int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev)
-{
-	const struct mlx5e_profile *profile;
-	struct mlx5e_priv *priv;
-	u16 max_mtu;
-	int err;
-
-	priv = netdev_priv(netdev);
-	profile = priv->profile;
-	clear_bit(MLX5E_STATE_DESTROYING, &priv->state);
-
-	err = profile->init_tx(priv);
-	if (err)
-		goto out;
-
-	err = mlx5e_open_drop_rq(priv);
-	if (err) {
-		mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
-		goto err_cleanup_tx;
-	}
-
-	err = profile->init_rx(priv);
-	if (err)
-		goto err_close_drop_rq;
-
-	mlx5e_create_q_counter(priv);
-
-	mlx5e_init_l2_addr(priv);
-
-	/* MTU range: 68 - hw-specific max */
-	netdev->min_mtu = ETH_MIN_MTU;
-	mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1);
-	netdev->max_mtu = MLX5E_HW2SW_MTU(max_mtu);
-
-	mlx5e_set_dev_port_mtu(netdev);
-
-	if (profile->enable)
-		profile->enable(priv);
-
-	rtnl_lock();
-	if (netif_running(netdev))
-		mlx5e_open(netdev);
-	netif_device_attach(netdev);
-	rtnl_unlock();
-
-	return 0;
-
-err_close_drop_rq:
-	mlx5e_close_drop_rq(priv);
-
-err_cleanup_tx:
-	profile->cleanup_tx(priv);
-
-out:
-	return err;
-}
-
-static void mlx5e_register_vport_rep(struct mlx5_core_dev *mdev)
-{
-	struct mlx5_eswitch *esw = mdev->priv.eswitch;
-	int total_vfs = MLX5_TOTAL_VPORTS(mdev);
-	int vport;
-	u8 mac[ETH_ALEN];
-
-	if (!MLX5_CAP_GEN(mdev, vport_group_manager))
-		return;
-
-	mlx5_query_nic_vport_mac_address(mdev, 0, mac);
-
-	for (vport = 1; vport < total_vfs; vport++) {
-		struct mlx5_eswitch_rep rep;
-
-		rep.load = mlx5e_vport_rep_load;
-		rep.unload = mlx5e_vport_rep_unload;
-		rep.vport = vport;
-		ether_addr_copy(rep.hw_id, mac);
-		mlx5_eswitch_register_vport_rep(esw, vport, &rep);
-	}
-}
-
-void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev)
-{
-	struct mlx5e_priv *priv = netdev_priv(netdev);
-	const struct mlx5e_profile *profile = priv->profile;
-
-	set_bit(MLX5E_STATE_DESTROYING, &priv->state);
-	if (profile->disable)
-		profile->disable(priv);
-
-	flush_workqueue(priv->wq);
-
-	rtnl_lock();
-	if (netif_running(netdev))
-		mlx5e_close(netdev);
-	netif_device_detach(netdev);
-	rtnl_unlock();
-
-	mlx5e_destroy_q_counter(priv);
-	profile->cleanup_rx(priv);
-	mlx5e_close_drop_rq(priv);
-	profile->cleanup_tx(priv);
-	cancel_delayed_work_sync(&priv->update_stats_work);
-}
-
-/* mlx5e_attach and mlx5e_detach scope should be only creating/destroying
- * hardware contexts and to connect it to the current netdev.
- */
-static int mlx5e_attach(struct mlx5_core_dev *mdev, void *vpriv)
-{
-	struct mlx5e_priv *priv = vpriv;
-	struct net_device *netdev = priv->netdev;
-	int err;
-
-	if (netif_device_present(netdev))
-		return 0;
-
-	err = mlx5e_create_mdev_resources(mdev);
-	if (err)
-		return err;
-
-	err = mlx5e_attach_netdev(mdev, netdev);
-	if (err) {
-		mlx5e_destroy_mdev_resources(mdev);
-		return err;
-	}
-
-	return 0;
-}
-
-static void mlx5e_detach(struct mlx5_core_dev *mdev, void *vpriv)
-{
-	struct mlx5e_priv *priv = vpriv;
-	struct net_device *netdev = priv->netdev;
-
-	if (!netif_device_present(netdev))
-		return;
-
-	mlx5e_detach_netdev(mdev, netdev);
-	mlx5e_destroy_mdev_resources(mdev);
-}
-
-static void *mlx5e_add(struct mlx5_core_dev *mdev)
-{
-	struct mlx5_eswitch *esw = mdev->priv.eswitch;
-	int total_vfs = MLX5_TOTAL_VPORTS(mdev);
-	void *ppriv = NULL;
-	void *priv;
-	int vport;
-	int err;
-	struct net_device *netdev;
-
-	err = mlx5e_check_required_hca_cap(mdev);
-	if (err)
-		return NULL;
-
-	mlx5e_register_vport_rep(mdev);
-
-	if (MLX5_CAP_GEN(mdev, vport_group_manager))
-		ppriv = &esw->offloads.vport_reps[0];
-
-	netdev = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, ppriv);
-	if (!netdev) {
-		mlx5_core_err(mdev, "mlx5e_create_netdev failed\n");
-		goto err_unregister_reps;
-	}
-
-	priv = netdev_priv(netdev);
-
-	err = mlx5e_attach(mdev, priv);
-	if (err) {
-		mlx5_core_err(mdev, "mlx5e_attach failed, %d\n", err);
-		goto err_destroy_netdev;
-	}
-
-	err = register_netdev(netdev);
-	if (err) {
-		mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
-		goto err_detach;
-	}
-
-	return priv;
-
-err_detach:
-	mlx5e_detach(mdev, priv);
-
-err_destroy_netdev:
-	mlx5e_destroy_netdev(mdev, priv);
-
-err_unregister_reps:
-	for (vport = 1; vport < total_vfs; vport++)
-		mlx5_eswitch_unregister_vport_rep(esw, vport);
-
-	return NULL;
-}
-
-void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv)
-{
-	const struct mlx5e_profile *profile = priv->profile;
-	struct net_device *netdev = priv->netdev;
-
-	destroy_workqueue(priv->wq);
-	if (profile->cleanup)
-		profile->cleanup(priv);
-	free_netdev(netdev);
-}
-
-static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
-{
-	struct mlx5_eswitch *esw = mdev->priv.eswitch;
-	int total_vfs = MLX5_TOTAL_VPORTS(mdev);
-	struct mlx5e_priv *priv = vpriv;
-	int vport;
-
-	for (vport = 1; vport < total_vfs; vport++)
-		mlx5_eswitch_unregister_vport_rep(esw, vport);
-
-	unregister_netdev(priv->netdev);
-	mlx5e_detach(mdev, vpriv);
-	mlx5e_destroy_netdev(mdev, priv);
-}
-
-static void *mlx5e_get_netdev(void *vpriv)
-{
-	struct mlx5e_priv *priv = vpriv;
-
-	return priv->netdev;
-}
-
-static struct mlx5_interface mlx5e_interface = {
-	.add       = mlx5e_add,
-	.remove    = mlx5e_remove,
-	.attach    = mlx5e_attach,
-	.detach    = mlx5e_detach,
-	.event     = mlx5e_async_event,
-	.protocol  = MLX5_INTERFACE_PROTOCOL_ETH,
-	.get_dev   = mlx5e_get_netdev,
-};
-
-void mlx5e_init(void)
-{
-	mlx5e_build_ptys2ethtool_map();
-	mlx5_register_interface(&mlx5e_interface);
-}
-
-void mlx5e_cleanup(void)
-{
-	mlx5_unregister_interface(&mlx5e_interface);
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
deleted file mode 100644
index 8503788..0000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ /dev/null
@@ -1,604 +0,0 @@
-/*
- * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <generated/utsrelease.h>
-#include <linux/mlx5/fs.h>
-#include <net/switchdev.h>
-#include <net/pkt_cls.h>
-
-#include "eswitch.h"
-#include "en.h"
-#include "en_tc.h"
-
-static const char mlx5e_rep_driver_name[] = "mlx5e_rep";
-
-static void mlx5e_rep_get_drvinfo(struct net_device *dev,
-				  struct ethtool_drvinfo *drvinfo)
-{
-	strlcpy(drvinfo->driver, mlx5e_rep_driver_name,
-		sizeof(drvinfo->driver));
-	strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
-}
-
-static const struct counter_desc sw_rep_stats_desc[] = {
-	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
-	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
-	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
-	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
-};
-
-#define NUM_VPORT_REP_COUNTERS	ARRAY_SIZE(sw_rep_stats_desc)
-
-static void mlx5e_rep_get_strings(struct net_device *dev,
-				  u32 stringset, uint8_t *data)
-{
-	int i;
-
-	switch (stringset) {
-	case ETH_SS_STATS:
-		for (i = 0; i < NUM_VPORT_REP_COUNTERS; i++)
-			strcpy(data + (i * ETH_GSTRING_LEN),
-			       sw_rep_stats_desc[i].format);
-		break;
-	}
-}
-
-static void mlx5e_rep_update_hw_counters(struct mlx5e_priv *priv)
-{
-	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
-	struct mlx5_eswitch_rep *rep = priv->ppriv;
-	struct rtnl_link_stats64 *vport_stats;
-	struct ifla_vf_stats vf_stats;
-	int err;
-
-	err = mlx5_eswitch_get_vport_stats(esw, rep->vport, &vf_stats);
-	if (err) {
-		pr_warn("vport %d error %d reading stats\n", rep->vport, err);
-		return;
-	}
-
-	vport_stats = &priv->stats.vf_vport;
-	/* flip tx/rx as we are reporting the counters for the switch vport */
-	vport_stats->rx_packets = vf_stats.tx_packets;
-	vport_stats->rx_bytes   = vf_stats.tx_bytes;
-	vport_stats->tx_packets = vf_stats.rx_packets;
-	vport_stats->tx_bytes   = vf_stats.rx_bytes;
-}
-
-static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv)
-{
-	struct mlx5e_sw_stats *s = &priv->stats.sw;
-	struct mlx5e_rq_stats *rq_stats;
-	struct mlx5e_sq_stats *sq_stats;
-	int i, j;
-
-	memset(s, 0, sizeof(*s));
-	for (i = 0; i < priv->params.num_channels; i++) {
-		rq_stats = &priv->channel[i]->rq.stats;
-
-		s->rx_packets	+= rq_stats->packets;
-		s->rx_bytes	+= rq_stats->bytes;
-
-		for (j = 0; j < priv->params.num_tc; j++) {
-			sq_stats = &priv->channel[i]->sq[j].stats;
-
-			s->tx_packets		+= sq_stats->packets;
-			s->tx_bytes		+= sq_stats->bytes;
-		}
-	}
-}
-
-static void mlx5e_rep_update_stats(struct mlx5e_priv *priv)
-{
-	mlx5e_rep_update_sw_counters(priv);
-	mlx5e_rep_update_hw_counters(priv);
-}
-
-static void mlx5e_rep_get_ethtool_stats(struct net_device *dev,
-					struct ethtool_stats *stats, u64 *data)
-{
-	struct mlx5e_priv *priv = netdev_priv(dev);
-	int i;
-
-	if (!data)
-		return;
-
-	mutex_lock(&priv->state_lock);
-	if (test_bit(MLX5E_STATE_OPENED, &priv->state))
-		mlx5e_rep_update_sw_counters(priv);
-	mutex_unlock(&priv->state_lock);
-
-	for (i = 0; i < NUM_VPORT_REP_COUNTERS; i++)
-		data[i] = MLX5E_READ_CTR64_CPU(&priv->stats.sw,
-					       sw_rep_stats_desc, i);
-}
-
-static int mlx5e_rep_get_sset_count(struct net_device *dev, int sset)
-{
-	switch (sset) {
-	case ETH_SS_STATS:
-		return NUM_VPORT_REP_COUNTERS;
-	default:
-		return -EOPNOTSUPP;
-	}
-}
-
-static const struct ethtool_ops mlx5e_rep_ethtool_ops = {
-	.get_drvinfo	   = mlx5e_rep_get_drvinfo,
-	.get_link	   = ethtool_op_get_link,
-	.get_strings       = mlx5e_rep_get_strings,
-	.get_sset_count    = mlx5e_rep_get_sset_count,
-	.get_ethtool_stats = mlx5e_rep_get_ethtool_stats,
-};
-
-int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr)
-{
-	struct mlx5e_priv *priv = netdev_priv(dev);
-	struct mlx5_eswitch_rep *rep = priv->ppriv;
-	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
-
-	if (esw->mode == SRIOV_NONE)
-		return -EOPNOTSUPP;
-
-	switch (attr->id) {
-	case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
-		attr->u.ppid.id_len = ETH_ALEN;
-		ether_addr_copy(attr->u.ppid.id, rep->hw_id);
-		break;
-	default:
-		return -EOPNOTSUPP;
-	}
-
-	return 0;
-}
-
-int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
-
-{
-	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
-	struct mlx5_eswitch_rep *rep = priv->ppriv;
-	struct mlx5e_channel *c;
-	int n, tc, err, num_sqs = 0;
-	u16 *sqs;
-
-	sqs = kcalloc(priv->params.num_channels * priv->params.num_tc, sizeof(u16), GFP_KERNEL);
-	if (!sqs)
-		return -ENOMEM;
-
-	for (n = 0; n < priv->params.num_channels; n++) {
-		c = priv->channel[n];
-		for (tc = 0; tc < c->num_tc; tc++)
-			sqs[num_sqs++] = c->sq[tc].sqn;
-	}
-
-	err = mlx5_eswitch_sqs2vport_start(esw, rep, sqs, num_sqs);
-
-	kfree(sqs);
-	return err;
-}
-
-int mlx5e_nic_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
-{
-	struct net_device *netdev = rep->netdev;
-	struct mlx5e_priv *priv = netdev_priv(netdev);
-
-	if (test_bit(MLX5E_STATE_OPENED, &priv->state))
-		return mlx5e_add_sqs_fwd_rules(priv);
-	return 0;
-}
-
-void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv)
-{
-	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
-	struct mlx5_eswitch_rep *rep = priv->ppriv;
-
-	mlx5_eswitch_sqs2vport_stop(esw, rep);
-}
-
-void mlx5e_nic_rep_unload(struct mlx5_eswitch *esw,
-			  struct mlx5_eswitch_rep *rep)
-{
-	struct net_device *netdev = rep->netdev;
-	struct mlx5e_priv *priv = netdev_priv(netdev);
-
-	if (test_bit(MLX5E_STATE_OPENED, &priv->state))
-		mlx5e_remove_sqs_fwd_rules(priv);
-
-	/* clean (and re-init) existing uplink offloaded TC rules */
-	mlx5e_tc_cleanup(priv);
-	mlx5e_tc_init(priv);
-}
-
-static int mlx5e_rep_open(struct net_device *dev)
-{
-	struct mlx5e_priv *priv = netdev_priv(dev);
-	struct mlx5_eswitch_rep *rep = priv->ppriv;
-	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
-	int err;
-
-	err = mlx5e_open(dev);
-	if (err)
-		return err;
-
-	err = mlx5_eswitch_set_vport_state(esw, rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_UP);
-	if (!err)
-		netif_carrier_on(dev);
-
-	return 0;
-}
-
-static int mlx5e_rep_close(struct net_device *dev)
-{
-	struct mlx5e_priv *priv = netdev_priv(dev);
-	struct mlx5_eswitch_rep *rep = priv->ppriv;
-	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
-
-	(void)mlx5_eswitch_set_vport_state(esw, rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_DOWN);
-
-	return mlx5e_close(dev);
-}
-
-static int mlx5e_rep_get_phys_port_name(struct net_device *dev,
-					char *buf, size_t len)
-{
-	struct mlx5e_priv *priv = netdev_priv(dev);
-	struct mlx5_eswitch_rep *rep = priv->ppriv;
-	int ret;
-
-	ret = snprintf(buf, len, "%d", rep->vport - 1);
-	if (ret >= len)
-		return -EOPNOTSUPP;
-
-	return 0;
-}
-
-static int mlx5e_rep_ndo_setup_tc(struct net_device *dev, u32 handle,
-				  __be16 proto, struct tc_to_netdev *tc)
-{
-	struct mlx5e_priv *priv = netdev_priv(dev);
-
-	if (TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS))
-		return -EOPNOTSUPP;
-
-	if (tc->egress_dev) {
-		struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
-		struct net_device *uplink_dev = mlx5_eswitch_get_uplink_netdev(esw);
-
-		return uplink_dev->netdev_ops->ndo_setup_tc(uplink_dev, handle,
-							    proto, tc);
-	}
-
-	switch (tc->type) {
-	case TC_SETUP_CLSFLOWER:
-		switch (tc->cls_flower->command) {
-		case TC_CLSFLOWER_REPLACE:
-			return mlx5e_configure_flower(priv, proto, tc->cls_flower);
-		case TC_CLSFLOWER_DESTROY:
-			return mlx5e_delete_flower(priv, tc->cls_flower);
-		case TC_CLSFLOWER_STATS:
-			return mlx5e_stats_flower(priv, tc->cls_flower);
-		}
-	default:
-		return -EOPNOTSUPP;
-	}
-}
-
-bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv)
-{
-	struct mlx5_eswitch_rep *rep = (struct mlx5_eswitch_rep *)priv->ppriv;
-	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
-
-	if (rep && rep->vport == FDB_UPLINK_VPORT && esw->mode == SRIOV_OFFLOADS)
-		return true;
-
-	return false;
-}
-
-bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv)
-{
-	struct mlx5_eswitch_rep *rep = (struct mlx5_eswitch_rep *)priv->ppriv;
-
-	if (rep && rep->vport != FDB_UPLINK_VPORT)
-		return true;
-
-	return false;
-}
-
-bool mlx5e_has_offload_stats(const struct net_device *dev, int attr_id)
-{
-	struct mlx5e_priv *priv = netdev_priv(dev);
-
-	switch (attr_id) {
-	case IFLA_OFFLOAD_XSTATS_CPU_HIT:
-		if (mlx5e_is_vf_vport_rep(priv) || mlx5e_is_uplink_rep(priv))
-			return true;
-	}
-
-	return false;
-}
-
-static int
-mlx5e_get_sw_stats64(const struct net_device *dev,
-		     struct rtnl_link_stats64 *stats)
-{
-	struct mlx5e_priv *priv = netdev_priv(dev);
-	struct mlx5e_sw_stats *sstats = &priv->stats.sw;
-
-	stats->rx_packets = sstats->rx_packets;
-	stats->rx_bytes   = sstats->rx_bytes;
-	stats->tx_packets = sstats->tx_packets;
-	stats->tx_bytes   = sstats->tx_bytes;
-
-	stats->tx_dropped = sstats->tx_queue_dropped;
-
-	return 0;
-}
-
-int mlx5e_get_offload_stats(int attr_id, const struct net_device *dev,
-			    void *sp)
-{
-	switch (attr_id) {
-	case IFLA_OFFLOAD_XSTATS_CPU_HIT:
-		return mlx5e_get_sw_stats64(dev, sp);
-	}
-
-	return -EINVAL;
-}
-
-static struct rtnl_link_stats64 *
-mlx5e_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
-{
-	struct mlx5e_priv *priv = netdev_priv(dev);
-
-	memcpy(stats, &priv->stats.vf_vport, sizeof(*stats));
-	return stats;
-}
-
-static const struct switchdev_ops mlx5e_rep_switchdev_ops = {
-	.switchdev_port_attr_get	= mlx5e_attr_get,
-};
-
-static const struct net_device_ops mlx5e_netdev_ops_rep = {
-	.ndo_open                = mlx5e_rep_open,
-	.ndo_stop                = mlx5e_rep_close,
-	.ndo_start_xmit          = mlx5e_xmit,
-	.ndo_get_phys_port_name  = mlx5e_rep_get_phys_port_name,
-	.ndo_setup_tc            = mlx5e_rep_ndo_setup_tc,
-	.ndo_get_stats64         = mlx5e_rep_get_stats,
-	.ndo_udp_tunnel_add      = mlx5e_add_vxlan_port,
-	.ndo_udp_tunnel_del      = mlx5e_del_vxlan_port,
-	.ndo_has_offload_stats	 = mlx5e_has_offload_stats,
-	.ndo_get_offload_stats	 = mlx5e_get_offload_stats,
-};
-
-static void mlx5e_build_rep_netdev_priv(struct mlx5_core_dev *mdev,
-					struct net_device *netdev,
-					const struct mlx5e_profile *profile,
-					void *ppriv)
-{
-	struct mlx5e_priv *priv = netdev_priv(netdev);
-	u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
-					 MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
-					 MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
-
-	priv->params.log_sq_size           =
-		MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
-	priv->params.rq_wq_type = MLX5_WQ_TYPE_LINKED_LIST;
-	priv->params.log_rq_size = MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE;
-
-	priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type,
-					    BIT(priv->params.log_rq_size));
-
-	priv->params.rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
-	mlx5e_set_rx_cq_mode_params(&priv->params, cq_period_mode);
-
-	priv->params.tx_max_inline         = mlx5e_get_max_inline_cap(mdev);
-	priv->params.num_tc                = 1;
-
-	priv->params.lro_wqe_sz            =
-		MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
-
-	priv->mdev                         = mdev;
-	priv->netdev                       = netdev;
-	priv->params.num_channels          = profile->max_nch(mdev);
-	priv->profile                      = profile;
-	priv->ppriv                        = ppriv;
-
-	mutex_init(&priv->state_lock);
-
-	INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
-}
-
-static void mlx5e_build_rep_netdev(struct net_device *netdev)
-{
-	netdev->netdev_ops = &mlx5e_netdev_ops_rep;
-
-	netdev->watchdog_timeo    = 15 * HZ;
-
-	netdev->ethtool_ops	  = &mlx5e_rep_ethtool_ops;
-
-#ifdef CONFIG_NET_SWITCHDEV
-	netdev->switchdev_ops = &mlx5e_rep_switchdev_ops;
-#endif
-
-	netdev->features	 |= NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_TC | NETIF_F_NETNS_LOCAL;
-	netdev->hw_features      |= NETIF_F_HW_TC;
-
-	eth_hw_addr_random(netdev);
-}
-
-static void mlx5e_init_rep(struct mlx5_core_dev *mdev,
-			   struct net_device *netdev,
-			   const struct mlx5e_profile *profile,
-			   void *ppriv)
-{
-	mlx5e_build_rep_netdev_priv(mdev, netdev, profile, ppriv);
-	mlx5e_build_rep_netdev(netdev);
-}
-
-static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
-{
-	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
-	struct mlx5_eswitch_rep *rep = priv->ppriv;
-	struct mlx5_core_dev *mdev = priv->mdev;
-	struct mlx5_flow_handle *flow_rule;
-	int err;
-	int i;
-
-	err = mlx5e_create_direct_rqts(priv);
-	if (err) {
-		mlx5_core_warn(mdev, "create direct rqts failed, %d\n", err);
-		return err;
-	}
-
-	err = mlx5e_create_direct_tirs(priv);
-	if (err) {
-		mlx5_core_warn(mdev, "create direct tirs failed, %d\n", err);
-		goto err_destroy_direct_rqts;
-	}
-
-	flow_rule = mlx5_eswitch_create_vport_rx_rule(esw,
-						      rep->vport,
-						      priv->direct_tir[0].tirn);
-	if (IS_ERR(flow_rule)) {
-		err = PTR_ERR(flow_rule);
-		goto err_destroy_direct_tirs;
-	}
-	rep->vport_rx_rule = flow_rule;
-
-	err = mlx5e_tc_init(priv);
-	if (err)
-		goto err_del_flow_rule;
-
-	return 0;
-
-err_del_flow_rule:
-	mlx5_del_flow_rules(rep->vport_rx_rule);
-err_destroy_direct_tirs:
-	mlx5e_destroy_direct_tirs(priv);
-err_destroy_direct_rqts:
-	for (i = 0; i < priv->params.num_channels; i++)
-		mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt);
-	return err;
-}
-
-static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
-{
-	struct mlx5_eswitch_rep *rep = priv->ppriv;
-	int i;
-
-	mlx5e_tc_cleanup(priv);
-	mlx5_del_flow_rules(rep->vport_rx_rule);
-	mlx5e_destroy_direct_tirs(priv);
-	for (i = 0; i < priv->params.num_channels; i++)
-		mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt);
-}
-
-static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
-{
-	int err;
-
-	err = mlx5e_create_tises(priv);
-	if (err) {
-		mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
-		return err;
-	}
-	return 0;
-}
-
-static int mlx5e_get_rep_max_num_channels(struct mlx5_core_dev *mdev)
-{
-#define	MLX5E_PORT_REPRESENTOR_NCH 1
-	return MLX5E_PORT_REPRESENTOR_NCH;
-}
-
-static struct mlx5e_profile mlx5e_rep_profile = {
-	.init			= mlx5e_init_rep,
-	.init_rx		= mlx5e_init_rep_rx,
-	.cleanup_rx		= mlx5e_cleanup_rep_rx,
-	.init_tx		= mlx5e_init_rep_tx,
-	.cleanup_tx		= mlx5e_cleanup_nic_tx,
-	.update_stats           = mlx5e_rep_update_stats,
-	.max_nch		= mlx5e_get_rep_max_num_channels,
-	.max_tc			= 1,
-};
-
-int mlx5e_vport_rep_load(struct mlx5_eswitch *esw,
-			 struct mlx5_eswitch_rep *rep)
-{
-	struct net_device *netdev;
-	int err;
-
-	netdev = mlx5e_create_netdev(esw->dev, &mlx5e_rep_profile, rep);
-	if (!netdev) {
-		pr_warn("Failed to create representor netdev for vport %d\n",
-			rep->vport);
-		return -EINVAL;
-	}
-
-	rep->netdev = netdev;
-
-	err = mlx5e_attach_netdev(esw->dev, netdev);
-	if (err) {
-		pr_warn("Failed to attach representor netdev for vport %d\n",
-			rep->vport);
-		goto err_destroy_netdev;
-	}
-
-	err = register_netdev(netdev);
-	if (err) {
-		pr_warn("Failed to register representor netdev for vport %d\n",
-			rep->vport);
-		goto err_detach_netdev;
-	}
-
-	return 0;
-
-err_detach_netdev:
-	mlx5e_detach_netdev(esw->dev, netdev);
-
-err_destroy_netdev:
-	mlx5e_destroy_netdev(esw->dev, netdev_priv(netdev));
-
-	return err;
-
-}
-
-void mlx5e_vport_rep_unload(struct mlx5_eswitch *esw,
-			    struct mlx5_eswitch_rep *rep)
-{
-	struct net_device *netdev = rep->netdev;
-
-	unregister_netdev(netdev);
-	mlx5e_detach_netdev(esw->dev, netdev);
-	mlx5e_destroy_netdev(esw->dev, netdev_priv(netdev));
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
deleted file mode 100644
index 0e2fb3e..0000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ /dev/null
@@ -1,966 +0,0 @@
-/*
- * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/ip.h>
-#include <linux/ipv6.h>
-#include <linux/tcp.h>
-#include <net/busy_poll.h>
-#include "en.h"
-#include "en_tc.h"
-#include "eswitch.h"
-
-static inline bool mlx5e_rx_hw_stamp(struct mlx5e_tstamp *tstamp)
-{
-	return tstamp->hwtstamp_config.rx_filter == HWTSTAMP_FILTER_ALL;
-}
-
-static inline void mlx5e_read_cqe_slot(struct mlx5e_cq *cq, u32 cqcc,
-				       void *data)
-{
-	u32 ci = cqcc & cq->wq.sz_m1;
-
-	memcpy(data, mlx5_cqwq_get_wqe(&cq->wq, ci), sizeof(struct mlx5_cqe64));
-}
-
-static inline void mlx5e_read_title_slot(struct mlx5e_rq *rq,
-					 struct mlx5e_cq *cq, u32 cqcc)
-{
-	mlx5e_read_cqe_slot(cq, cqcc, &cq->title);
-	cq->decmprs_left        = be32_to_cpu(cq->title.byte_cnt);
-	cq->decmprs_wqe_counter = be16_to_cpu(cq->title.wqe_counter);
-	rq->stats.cqe_compress_blks++;
-}
-
-static inline void mlx5e_read_mini_arr_slot(struct mlx5e_cq *cq, u32 cqcc)
-{
-	mlx5e_read_cqe_slot(cq, cqcc, cq->mini_arr);
-	cq->mini_arr_idx = 0;
-}
-
-static inline void mlx5e_cqes_update_owner(struct mlx5e_cq *cq, u32 cqcc, int n)
-{
-	u8 op_own = (cqcc >> cq->wq.log_sz) & 1;
-	u32 wq_sz = 1 << cq->wq.log_sz;
-	u32 ci = cqcc & cq->wq.sz_m1;
-	u32 ci_top = min_t(u32, wq_sz, ci + n);
-
-	for (; ci < ci_top; ci++, n--) {
-		struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, ci);
-
-		cqe->op_own = op_own;
-	}
-
-	if (unlikely(ci == wq_sz)) {
-		op_own = !op_own;
-		for (ci = 0; ci < n; ci++) {
-			struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, ci);
-
-			cqe->op_own = op_own;
-		}
-	}
-}
-
-static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq,
-					struct mlx5e_cq *cq, u32 cqcc)
-{
-	u16 wqe_cnt_step;
-
-	cq->title.byte_cnt     = cq->mini_arr[cq->mini_arr_idx].byte_cnt;
-	cq->title.check_sum    = cq->mini_arr[cq->mini_arr_idx].checksum;
-	cq->title.op_own      &= 0xf0;
-	cq->title.op_own      |= 0x01 & (cqcc >> cq->wq.log_sz);
-	cq->title.wqe_counter  = cpu_to_be16(cq->decmprs_wqe_counter);
-
-	wqe_cnt_step =
-		rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ?
-		mpwrq_get_cqe_consumed_strides(&cq->title) : 1;
-	cq->decmprs_wqe_counter =
-		(cq->decmprs_wqe_counter + wqe_cnt_step) & rq->wq.sz_m1;
-}
-
-static inline void mlx5e_decompress_cqe_no_hash(struct mlx5e_rq *rq,
-						struct mlx5e_cq *cq, u32 cqcc)
-{
-	mlx5e_decompress_cqe(rq, cq, cqcc);
-	cq->title.rss_hash_type   = 0;
-	cq->title.rss_hash_result = 0;
-}
-
-static inline u32 mlx5e_decompress_cqes_cont(struct mlx5e_rq *rq,
-					     struct mlx5e_cq *cq,
-					     int update_owner_only,
-					     int budget_rem)
-{
-	u32 cqcc = cq->wq.cc + update_owner_only;
-	u32 cqe_count;
-	u32 i;
-
-	cqe_count = min_t(u32, cq->decmprs_left, budget_rem);
-
-	for (i = update_owner_only; i < cqe_count;
-	     i++, cq->mini_arr_idx++, cqcc++) {
-		if (cq->mini_arr_idx == MLX5_MINI_CQE_ARRAY_SIZE)
-			mlx5e_read_mini_arr_slot(cq, cqcc);
-
-		mlx5e_decompress_cqe_no_hash(rq, cq, cqcc);
-		rq->handle_rx_cqe(rq, &cq->title);
-	}
-	mlx5e_cqes_update_owner(cq, cq->wq.cc, cqcc - cq->wq.cc);
-	cq->wq.cc = cqcc;
-	cq->decmprs_left -= cqe_count;
-	rq->stats.cqe_compress_pkts += cqe_count;
-
-	return cqe_count;
-}
-
-static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq,
-					      struct mlx5e_cq *cq,
-					      int budget_rem)
-{
-	mlx5e_read_title_slot(rq, cq, cq->wq.cc);
-	mlx5e_read_mini_arr_slot(cq, cq->wq.cc + 1);
-	mlx5e_decompress_cqe(rq, cq, cq->wq.cc);
-	rq->handle_rx_cqe(rq, &cq->title);
-	cq->mini_arr_idx++;
-
-	return mlx5e_decompress_cqes_cont(rq, cq, 1, budget_rem) - 1;
-}
-
-void mlx5e_modify_rx_cqe_compression(struct mlx5e_priv *priv, bool val)
-{
-	bool was_opened;
-
-	if (!MLX5_CAP_GEN(priv->mdev, cqe_compression))
-		return;
-
-	mutex_lock(&priv->state_lock);
-
-	if (MLX5E_GET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS) == val)
-		goto unlock;
-
-	was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
-	if (was_opened)
-		mlx5e_close_locked(priv->netdev);
-
-	MLX5E_SET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS, val);
-
-	if (was_opened)
-		mlx5e_open_locked(priv->netdev);
-
-unlock:
-	mutex_unlock(&priv->state_lock);
-}
-
-#define RQ_PAGE_SIZE(rq) ((1 << rq->buff.page_order) << PAGE_SHIFT)
-
-static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq,
-				      struct mlx5e_dma_info *dma_info)
-{
-	struct mlx5e_page_cache *cache = &rq->page_cache;
-	u32 tail_next = (cache->tail + 1) & (MLX5E_CACHE_SIZE - 1);
-
-	if (tail_next == cache->head) {
-		rq->stats.cache_full++;
-		return false;
-	}
-
-	cache->page_cache[cache->tail] = *dma_info;
-	cache->tail = tail_next;
-	return true;
-}
-
-static inline bool mlx5e_rx_cache_get(struct mlx5e_rq *rq,
-				      struct mlx5e_dma_info *dma_info)
-{
-	struct mlx5e_page_cache *cache = &rq->page_cache;
-
-	if (unlikely(cache->head == cache->tail)) {
-		rq->stats.cache_empty++;
-		return false;
-	}
-
-	if (page_ref_count(cache->page_cache[cache->head].page) != 1) {
-		rq->stats.cache_busy++;
-		return false;
-	}
-
-	*dma_info = cache->page_cache[cache->head];
-	cache->head = (cache->head + 1) & (MLX5E_CACHE_SIZE - 1);
-	rq->stats.cache_reuse++;
-
-	dma_sync_single_for_device(rq->pdev, dma_info->addr,
-				   RQ_PAGE_SIZE(rq),
-				   DMA_FROM_DEVICE);
-	return true;
-}
-
-static inline int mlx5e_page_alloc_mapped(struct mlx5e_rq *rq,
-					  struct mlx5e_dma_info *dma_info)
-{
-	struct page *page;
-
-	if (mlx5e_rx_cache_get(rq, dma_info))
-		return 0;
-
-	page = dev_alloc_pages(rq->buff.page_order);
-	if (unlikely(!page))
-		return -ENOMEM;
-
-	dma_info->page = page;
-	dma_info->addr = dma_map_page(rq->pdev, page, 0,
-				      RQ_PAGE_SIZE(rq), rq->buff.map_dir);
-	if (unlikely(dma_mapping_error(rq->pdev, dma_info->addr))) {
-		put_page(page);
-		return -ENOMEM;
-	}
-
-	return 0;
-}
-
-void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info,
-			bool recycle)
-{
-	if (likely(recycle) && mlx5e_rx_cache_put(rq, dma_info))
-		return;
-
-	dma_unmap_page(rq->pdev, dma_info->addr, RQ_PAGE_SIZE(rq),
-		       rq->buff.map_dir);
-	put_page(dma_info->page);
-}
-
-int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix)
-{
-	struct mlx5e_dma_info *di = &rq->dma_info[ix];
-
-	if (unlikely(mlx5e_page_alloc_mapped(rq, di)))
-		return -ENOMEM;
-
-	wqe->data.addr = cpu_to_be64(di->addr + MLX5_RX_HEADROOM);
-	return 0;
-}
-
-void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix)
-{
-	struct mlx5e_dma_info *di = &rq->dma_info[ix];
-
-	mlx5e_page_release(rq, di, true);
-}
-
-static inline int mlx5e_mpwqe_strides_per_page(struct mlx5e_rq *rq)
-{
-	return rq->mpwqe_num_strides >> MLX5_MPWRQ_WQE_PAGE_ORDER;
-}
-
-static inline void mlx5e_add_skb_frag_mpwqe(struct mlx5e_rq *rq,
-					    struct sk_buff *skb,
-					    struct mlx5e_mpw_info *wi,
-					    u32 page_idx, u32 frag_offset,
-					    u32 len)
-{
-	unsigned int truesize =	ALIGN(len, rq->mpwqe_stride_sz);
-
-	dma_sync_single_for_cpu(rq->pdev,
-				wi->umr.dma_info[page_idx].addr + frag_offset,
-				len, DMA_FROM_DEVICE);
-	wi->skbs_frags[page_idx]++;
-	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
-			wi->umr.dma_info[page_idx].page, frag_offset,
-			len, truesize);
-}
-
-static inline void
-mlx5e_copy_skb_header_mpwqe(struct device *pdev,
-			    struct sk_buff *skb,
-			    struct mlx5e_mpw_info *wi,
-			    u32 page_idx, u32 offset,
-			    u32 headlen)
-{
-	u16 headlen_pg = min_t(u32, headlen, PAGE_SIZE - offset);
-	struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[page_idx];
-	unsigned int len;
-
-	 /* Aligning len to sizeof(long) optimizes memcpy performance */
-	len = ALIGN(headlen_pg, sizeof(long));
-	dma_sync_single_for_cpu(pdev, dma_info->addr + offset, len,
-				DMA_FROM_DEVICE);
-	skb_copy_to_linear_data_offset(skb, 0,
-				       page_address(dma_info->page) + offset,
-				       len);
-	if (unlikely(offset + headlen > PAGE_SIZE)) {
-		dma_info++;
-		headlen_pg = len;
-		len = ALIGN(headlen - headlen_pg, sizeof(long));
-		dma_sync_single_for_cpu(pdev, dma_info->addr, len,
-					DMA_FROM_DEVICE);
-		skb_copy_to_linear_data_offset(skb, headlen_pg,
-					       page_address(dma_info->page),
-					       len);
-	}
-}
-
-static inline void mlx5e_post_umr_wqe(struct mlx5e_rq *rq, u16 ix)
-{
-	struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
-	struct mlx5e_sq *sq = &rq->channel->icosq;
-	struct mlx5_wq_cyc *wq = &sq->wq;
-	struct mlx5e_umr_wqe *wqe;
-	u8 num_wqebbs = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_BB);
-	u16 pi;
-
-	/* fill sq edge with nops to avoid wqe wrap around */
-	while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) {
-		sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP;
-		sq->db.ico_wqe[pi].num_wqebbs = 1;
-		mlx5e_send_nop(sq, false);
-	}
-
-	wqe = mlx5_wq_cyc_get_wqe(wq, pi);
-	memcpy(wqe, &wi->umr.wqe, sizeof(*wqe));
-	wqe->ctrl.opmod_idx_opcode =
-		cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
-			    MLX5_OPCODE_UMR);
-
-	sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_UMR;
-	sq->db.ico_wqe[pi].num_wqebbs = num_wqebbs;
-	sq->pc += num_wqebbs;
-	mlx5e_tx_notify_hw(sq, &wqe->ctrl, 0);
-}
-
-static int mlx5e_alloc_rx_umr_mpwqe(struct mlx5e_rq *rq,
-				    struct mlx5e_rx_wqe *wqe,
-				    u16 ix)
-{
-	struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
-	u64 dma_offset = (u64)mlx5e_get_wqe_mtt_offset(rq, ix) << PAGE_SHIFT;
-	int pg_strides = mlx5e_mpwqe_strides_per_page(rq);
-	int err;
-	int i;
-
-	for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) {
-		struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[i];
-
-		err = mlx5e_page_alloc_mapped(rq, dma_info);
-		if (unlikely(err))
-			goto err_unmap;
-		wi->umr.mtt[i] = cpu_to_be64(dma_info->addr | MLX5_EN_WR);
-		page_ref_add(dma_info->page, pg_strides);
-		wi->skbs_frags[i] = 0;
-	}
-
-	wi->consumed_strides = 0;
-	wqe->data.addr = cpu_to_be64(dma_offset);
-
-	return 0;
-
-err_unmap:
-	while (--i >= 0) {
-		struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[i];
-
-		page_ref_sub(dma_info->page, pg_strides);
-		mlx5e_page_release(rq, dma_info, true);
-	}
-
-	return err;
-}
-
-void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi)
-{
-	int pg_strides = mlx5e_mpwqe_strides_per_page(rq);
-	int i;
-
-	for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) {
-		struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[i];
-
-		page_ref_sub(dma_info->page, pg_strides - wi->skbs_frags[i]);
-		mlx5e_page_release(rq, dma_info, true);
-	}
-}
-
-void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq)
-{
-	struct mlx5_wq_ll *wq = &rq->wq;
-	struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head);
-
-	clear_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state);
-
-	if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state))) {
-		mlx5e_free_rx_mpwqe(rq, &rq->mpwqe.info[wq->head]);
-		return;
-	}
-
-	mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index));
-
-	/* ensure wqes are visible to device before updating doorbell record */
-	dma_wmb();
-
-	mlx5_wq_ll_update_db_record(wq);
-}
-
-int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix)
-{
-	int err;
-
-	err = mlx5e_alloc_rx_umr_mpwqe(rq, wqe, ix);
-	if (unlikely(err))
-		return err;
-	set_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state);
-	mlx5e_post_umr_wqe(rq, ix);
-	return -EBUSY;
-}
-
-void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
-{
-	struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
-
-	mlx5e_free_rx_mpwqe(rq, wi);
-}
-
-#define RQ_CANNOT_POST(rq) \
-	(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state) || \
-	 test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state))
-
-bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
-{
-	struct mlx5_wq_ll *wq = &rq->wq;
-
-	if (unlikely(RQ_CANNOT_POST(rq)))
-		return false;
-
-	while (!mlx5_wq_ll_is_full(wq)) {
-		struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head);
-		int err;
-
-		err = rq->alloc_wqe(rq, wqe, wq->head);
-		if (err == -EBUSY)
-			return true;
-		if (unlikely(err)) {
-			rq->stats.buff_alloc_err++;
-			break;
-		}
-
-		mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index));
-	}
-
-	/* ensure wqes are visible to device before updating doorbell record */
-	dma_wmb();
-
-	mlx5_wq_ll_update_db_record(wq);
-
-	return !mlx5_wq_ll_is_full(wq);
-}
-
-static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
-				 u32 cqe_bcnt)
-{
-	struct ethhdr	*eth = (struct ethhdr *)(skb->data);
-	struct iphdr	*ipv4;
-	struct ipv6hdr	*ipv6;
-	struct tcphdr	*tcp;
-	int network_depth = 0;
-	__be16 proto;
-	u16 tot_len;
-
-	u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe);
-	int tcp_ack = ((CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA  == l4_hdr_type) ||
-		       (CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA == l4_hdr_type));
-
-	skb->mac_len = ETH_HLEN;
-	proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth);
-
-	ipv4 = (struct iphdr *)(skb->data + network_depth);
-	ipv6 = (struct ipv6hdr *)(skb->data + network_depth);
-	tot_len = cqe_bcnt - network_depth;
-
-	if (proto == htons(ETH_P_IP)) {
-		tcp = (struct tcphdr *)(skb->data + network_depth +
-					sizeof(struct iphdr));
-		ipv6 = NULL;
-		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
-	} else {
-		tcp = (struct tcphdr *)(skb->data + network_depth +
-					sizeof(struct ipv6hdr));
-		ipv4 = NULL;
-		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
-	}
-
-	if (get_cqe_lro_tcppsh(cqe))
-		tcp->psh                = 1;
-
-	if (tcp_ack) {
-		tcp->ack                = 1;
-		tcp->ack_seq            = cqe->lro_ack_seq_num;
-		tcp->window             = cqe->lro_tcp_win;
-	}
-
-	if (ipv4) {
-		ipv4->ttl               = cqe->lro_min_ttl;
-		ipv4->tot_len           = cpu_to_be16(tot_len);
-		ipv4->check             = 0;
-		ipv4->check             = ip_fast_csum((unsigned char *)ipv4,
-						       ipv4->ihl);
-	} else {
-		ipv6->hop_limit         = cqe->lro_min_ttl;
-		ipv6->payload_len       = cpu_to_be16(tot_len -
-						      sizeof(struct ipv6hdr));
-	}
-}
-
-static inline void mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe,
-				      struct sk_buff *skb)
-{
-	u8 cht = cqe->rss_hash_type;
-	int ht = (cht & CQE_RSS_HTYPE_L4) ? PKT_HASH_TYPE_L4 :
-		 (cht & CQE_RSS_HTYPE_IP) ? PKT_HASH_TYPE_L3 :
-					    PKT_HASH_TYPE_NONE;
-	skb_set_hash(skb, be32_to_cpu(cqe->rss_hash_result), ht);
-}
-
-static inline bool is_first_ethertype_ip(struct sk_buff *skb)
-{
-	__be16 ethertype = ((struct ethhdr *)skb->data)->h_proto;
-
-	return (ethertype == htons(ETH_P_IP) || ethertype == htons(ETH_P_IPV6));
-}
-
-static inline void mlx5e_handle_csum(struct net_device *netdev,
-				     struct mlx5_cqe64 *cqe,
-				     struct mlx5e_rq *rq,
-				     struct sk_buff *skb,
-				     bool   lro)
-{
-	if (unlikely(!(netdev->features & NETIF_F_RXCSUM)))
-		goto csum_none;
-
-	if (lro) {
-		skb->ip_summed = CHECKSUM_UNNECESSARY;
-		return;
-	}
-
-	if (is_first_ethertype_ip(skb)) {
-		skb->ip_summed = CHECKSUM_COMPLETE;
-		skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
-		rq->stats.csum_complete++;
-		return;
-	}
-
-	if (likely((cqe->hds_ip_ext & CQE_L3_OK) &&
-		   (cqe->hds_ip_ext & CQE_L4_OK))) {
-		skb->ip_summed = CHECKSUM_UNNECESSARY;
-		if (cqe_is_tunneled(cqe)) {
-			skb->csum_level = 1;
-			skb->encapsulation = 1;
-			rq->stats.csum_unnecessary_inner++;
-		}
-		return;
-	}
-csum_none:
-	skb->ip_summed = CHECKSUM_NONE;
-	rq->stats.csum_none++;
-}
-
-static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
-				      u32 cqe_bcnt,
-				      struct mlx5e_rq *rq,
-				      struct sk_buff *skb)
-{
-	struct net_device *netdev = rq->netdev;
-	struct mlx5e_tstamp *tstamp = rq->tstamp;
-	int lro_num_seg;
-
-	lro_num_seg = be32_to_cpu(cqe->srqn) >> 24;
-	if (lro_num_seg > 1) {
-		mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
-		skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg);
-		rq->stats.lro_packets++;
-		rq->stats.lro_bytes += cqe_bcnt;
-	}
-
-	if (unlikely(mlx5e_rx_hw_stamp(tstamp)))
-		mlx5e_fill_hwstamp(tstamp, get_cqe_ts(cqe), skb_hwtstamps(skb));
-
-	skb_record_rx_queue(skb, rq->ix);
-
-	if (likely(netdev->features & NETIF_F_RXHASH))
-		mlx5e_skb_set_hash(cqe, skb);
-
-	if (cqe_has_vlan(cqe))
-		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
-				       be16_to_cpu(cqe->vlan_info));
-
-	skb->mark = be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK;
-
-	mlx5e_handle_csum(netdev, cqe, rq, skb, !!lro_num_seg);
-	skb->protocol = eth_type_trans(skb, netdev);
-}
-
-static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
-					 struct mlx5_cqe64 *cqe,
-					 u32 cqe_bcnt,
-					 struct sk_buff *skb)
-{
-	rq->stats.packets++;
-	rq->stats.bytes += cqe_bcnt;
-	mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
-}
-
-static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_sq *sq)
-{
-	struct mlx5_wq_cyc *wq = &sq->wq;
-	struct mlx5e_tx_wqe *wqe;
-	u16 pi = (sq->pc - MLX5E_XDP_TX_WQEBBS) & wq->sz_m1; /* last pi */
-
-	wqe  = mlx5_wq_cyc_get_wqe(wq, pi);
-
-	wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
-	mlx5e_tx_notify_hw(sq, &wqe->ctrl, 0);
-}
-
-static inline void mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
-					struct mlx5e_dma_info *di,
-					unsigned int data_offset,
-					int len)
-{
-	struct mlx5e_sq          *sq   = &rq->channel->xdp_sq;
-	struct mlx5_wq_cyc       *wq   = &sq->wq;
-	u16                      pi    = sq->pc & wq->sz_m1;
-	struct mlx5e_tx_wqe      *wqe  = mlx5_wq_cyc_get_wqe(wq, pi);
-	struct mlx5e_sq_wqe_info *wi   = &sq->db.xdp.wqe_info[pi];
-
-	struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
-	struct mlx5_wqe_eth_seg  *eseg = &wqe->eth;
-	struct mlx5_wqe_data_seg *dseg;
-
-	dma_addr_t dma_addr  = di->addr + data_offset + MLX5E_XDP_MIN_INLINE;
-	unsigned int dma_len = len - MLX5E_XDP_MIN_INLINE;
-	void *data           = page_address(di->page) + data_offset;
-
-	if (unlikely(!mlx5e_sq_has_room_for(sq, MLX5E_XDP_TX_WQEBBS))) {
-		if (sq->db.xdp.doorbell) {
-			/* SQ is full, ring doorbell */
-			mlx5e_xmit_xdp_doorbell(sq);
-			sq->db.xdp.doorbell = false;
-		}
-		rq->stats.xdp_tx_full++;
-		mlx5e_page_release(rq, di, true);
-		return;
-	}
-
-	dma_sync_single_for_device(sq->pdev, dma_addr, dma_len,
-				   PCI_DMA_TODEVICE);
-
-	memset(wqe, 0, sizeof(*wqe));
-
-	/* copy the inline part */
-	memcpy(eseg->inline_hdr_start, data, MLX5E_XDP_MIN_INLINE);
-	eseg->inline_hdr_sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE);
-
-	dseg = (struct mlx5_wqe_data_seg *)cseg + (MLX5E_XDP_TX_DS_COUNT - 1);
-
-	/* write the dma part */
-	dseg->addr       = cpu_to_be64(dma_addr);
-	dseg->byte_count = cpu_to_be32(dma_len);
-	dseg->lkey       = sq->mkey_be;
-
-	cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND);
-	cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | MLX5E_XDP_TX_DS_COUNT);
-
-	sq->db.xdp.di[pi] = *di;
-	wi->opcode     = MLX5_OPCODE_SEND;
-	wi->num_wqebbs = MLX5E_XDP_TX_WQEBBS;
-	sq->pc += MLX5E_XDP_TX_WQEBBS;
-
-	sq->db.xdp.doorbell = true;
-	rq->stats.xdp_tx++;
-}
-
-/* returns true if packet was consumed by xdp */
-static inline bool mlx5e_xdp_handle(struct mlx5e_rq *rq,
-				    const struct bpf_prog *prog,
-				    struct mlx5e_dma_info *di,
-				    void *data, u16 len)
-{
-	struct xdp_buff xdp;
-	u32 act;
-
-	if (!prog)
-		return false;
-
-	xdp.data = data;
-	xdp.data_end = xdp.data + len;
-	act = bpf_prog_run_xdp(prog, &xdp);
-	switch (act) {
-	case XDP_PASS:
-		return false;
-	case XDP_TX:
-		mlx5e_xmit_xdp_frame(rq, di, MLX5_RX_HEADROOM, len);
-		return true;
-	default:
-		bpf_warn_invalid_xdp_action(act);
-	case XDP_ABORTED:
-	case XDP_DROP:
-		rq->stats.xdp_drop++;
-		mlx5e_page_release(rq, di, true);
-		return true;
-	}
-}
-
-static inline
-struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
-			     u16 wqe_counter, u32 cqe_bcnt)
-{
-	struct mlx5e_dma_info *di;
-	struct sk_buff *skb;
-	void *va, *data;
-	bool consumed;
-
-	di             = &rq->dma_info[wqe_counter];
-	va             = page_address(di->page);
-	data           = va + MLX5_RX_HEADROOM;
-
-	dma_sync_single_range_for_cpu(rq->pdev,
-				      di->addr,
-				      MLX5_RX_HEADROOM,
-				      rq->buff.wqe_sz,
-				      DMA_FROM_DEVICE);
-	prefetch(data);
-
-	if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
-		rq->stats.wqe_err++;
-		mlx5e_page_release(rq, di, true);
-		return NULL;
-	}
-
-	rcu_read_lock();
-	consumed = mlx5e_xdp_handle(rq, READ_ONCE(rq->xdp_prog), di, data,
-				    cqe_bcnt);
-	rcu_read_unlock();
-	if (consumed)
-		return NULL; /* page/packet was consumed by XDP */
-
-	skb = build_skb(va, RQ_PAGE_SIZE(rq));
-	if (unlikely(!skb)) {
-		rq->stats.buff_alloc_err++;
-		mlx5e_page_release(rq, di, true);
-		return NULL;
-	}
-
-	/* queue up for recycling ..*/
-	page_ref_inc(di->page);
-	mlx5e_page_release(rq, di, true);
-
-	skb_reserve(skb, MLX5_RX_HEADROOM);
-	skb_put(skb, cqe_bcnt);
-
-	return skb;
-}
-
-void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
-{
-	struct mlx5e_rx_wqe *wqe;
-	__be16 wqe_counter_be;
-	struct sk_buff *skb;
-	u16 wqe_counter;
-	u32 cqe_bcnt;
-
-	wqe_counter_be = cqe->wqe_counter;
-	wqe_counter    = be16_to_cpu(wqe_counter_be);
-	wqe            = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
-	cqe_bcnt       = be32_to_cpu(cqe->byte_cnt);
-
-	skb = skb_from_cqe(rq, cqe, wqe_counter, cqe_bcnt);
-	if (!skb)
-		goto wq_ll_pop;
-
-	mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
-	napi_gro_receive(rq->cq.napi, skb);
-
-wq_ll_pop:
-	mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
-		       &wqe->next.next_wqe_index);
-}
-
-void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
-{
-	struct net_device *netdev = rq->netdev;
-	struct mlx5e_priv *priv = netdev_priv(netdev);
-	struct mlx5_eswitch_rep *rep = priv->ppriv;
-	struct mlx5e_rx_wqe *wqe;
-	struct sk_buff *skb;
-	__be16 wqe_counter_be;
-	u16 wqe_counter;
-	u32 cqe_bcnt;
-
-	wqe_counter_be = cqe->wqe_counter;
-	wqe_counter    = be16_to_cpu(wqe_counter_be);
-	wqe            = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
-	cqe_bcnt       = be32_to_cpu(cqe->byte_cnt);
-
-	skb = skb_from_cqe(rq, cqe, wqe_counter, cqe_bcnt);
-	if (!skb)
-		goto wq_ll_pop;
-
-	mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
-
-	if (rep->vlan && skb_vlan_tag_present(skb))
-		skb_vlan_pop(skb);
-
-	napi_gro_receive(rq->cq.napi, skb);
-
-wq_ll_pop:
-	mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
-		       &wqe->next.next_wqe_index);
-}
-
-static inline void mlx5e_mpwqe_fill_rx_skb(struct mlx5e_rq *rq,
-					   struct mlx5_cqe64 *cqe,
-					   struct mlx5e_mpw_info *wi,
-					   u32 cqe_bcnt,
-					   struct sk_buff *skb)
-{
-	u16 stride_ix      = mpwrq_get_cqe_stride_index(cqe);
-	u32 wqe_offset     = stride_ix * rq->mpwqe_stride_sz;
-	u32 head_offset    = wqe_offset & (PAGE_SIZE - 1);
-	u32 page_idx       = wqe_offset >> PAGE_SHIFT;
-	u32 head_page_idx  = page_idx;
-	u16 headlen = min_t(u16, MLX5_MPWRQ_SMALL_PACKET_THRESHOLD, cqe_bcnt);
-	u32 frag_offset    = head_offset + headlen;
-	u16 byte_cnt       = cqe_bcnt - headlen;
-
-	if (unlikely(frag_offset >= PAGE_SIZE)) {
-		page_idx++;
-		frag_offset -= PAGE_SIZE;
-	}
-
-	while (byte_cnt) {
-		u32 pg_consumed_bytes =
-			min_t(u32, PAGE_SIZE - frag_offset, byte_cnt);
-
-		mlx5e_add_skb_frag_mpwqe(rq, skb, wi, page_idx, frag_offset,
-					 pg_consumed_bytes);
-		byte_cnt -= pg_consumed_bytes;
-		frag_offset = 0;
-		page_idx++;
-	}
-	/* copy header */
-	mlx5e_copy_skb_header_mpwqe(rq->pdev, skb, wi, head_page_idx,
-				    head_offset, headlen);
-	/* skb linear part was allocated with headlen and aligned to long */
-	skb->tail += headlen;
-	skb->len  += headlen;
-}
-
-void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
-{
-	u16 cstrides       = mpwrq_get_cqe_consumed_strides(cqe);
-	u16 wqe_id         = be16_to_cpu(cqe->wqe_id);
-	struct mlx5e_mpw_info *wi = &rq->mpwqe.info[wqe_id];
-	struct mlx5e_rx_wqe  *wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_id);
-	struct sk_buff *skb;
-	u16 cqe_bcnt;
-
-	wi->consumed_strides += cstrides;
-
-	if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
-		rq->stats.wqe_err++;
-		goto mpwrq_cqe_out;
-	}
-
-	if (unlikely(mpwrq_is_filler_cqe(cqe))) {
-		rq->stats.mpwqe_filler++;
-		goto mpwrq_cqe_out;
-	}
-
-	skb = napi_alloc_skb(rq->cq.napi,
-			     ALIGN(MLX5_MPWRQ_SMALL_PACKET_THRESHOLD,
-				   sizeof(long)));
-	if (unlikely(!skb)) {
-		rq->stats.buff_alloc_err++;
-		goto mpwrq_cqe_out;
-	}
-
-	prefetch(skb->data);
-	cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe);
-
-	mlx5e_mpwqe_fill_rx_skb(rq, cqe, wi, cqe_bcnt, skb);
-	mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
-	napi_gro_receive(rq->cq.napi, skb);
-
-mpwrq_cqe_out:
-	if (likely(wi->consumed_strides < rq->mpwqe_num_strides))
-		return;
-
-	mlx5e_free_rx_mpwqe(rq, wi);
-	mlx5_wq_ll_pop(&rq->wq, cqe->wqe_id, &wqe->next.next_wqe_index);
-}
-
-int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
-{
-	struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
-	struct mlx5e_sq *xdp_sq = &rq->channel->xdp_sq;
-	int work_done = 0;
-
-	if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
-		return 0;
-
-	if (cq->decmprs_left)
-		work_done += mlx5e_decompress_cqes_cont(rq, cq, 0, budget);
-
-	for (; work_done < budget; work_done++) {
-		struct mlx5_cqe64 *cqe = mlx5e_get_cqe(cq);
-
-		if (!cqe)
-			break;
-
-		if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) {
-			work_done +=
-				mlx5e_decompress_cqes_start(rq, cq,
-							    budget - work_done);
-			continue;
-		}
-
-		mlx5_cqwq_pop(&cq->wq);
-
-		rq->handle_rx_cqe(rq, cqe);
-	}
-
-	if (xdp_sq->db.xdp.doorbell) {
-		mlx5e_xmit_xdp_doorbell(xdp_sq);
-		xdp_sq->db.xdp.doorbell = false;
-	}
-
-	mlx5_cqwq_update_db_record(&cq->wq);
-
-	/* ensure cq space is freed before enabling more cqes */
-	wmb();
-
-	return work_done;
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
deleted file mode 100644
index 1fffe48..0000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
+++ /dev/null
@@ -1,335 +0,0 @@
-/*
- * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "en.h"
-
-/* Adaptive moderation profiles */
-#define MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE 256
-#define MLX5E_RX_AM_DEF_PROFILE_CQE 1
-#define MLX5E_RX_AM_DEF_PROFILE_EQE 1
-#define MLX5E_PARAMS_AM_NUM_PROFILES 5
-
-/* All profiles sizes must be MLX5E_PARAMS_AM_NUM_PROFILES */
-#define MLX5_AM_EQE_PROFILES { \
-	{1,   MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
-	{8,   MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
-	{64,  MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
-	{128, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
-	{256, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
-}
-
-#define MLX5_AM_CQE_PROFILES { \
-	{2,  256},             \
-	{8,  128},             \
-	{16, 64},              \
-	{32, 64},              \
-	{64, 64}               \
-}
-
-static const struct mlx5e_cq_moder
-profile[MLX5_CQ_PERIOD_NUM_MODES][MLX5E_PARAMS_AM_NUM_PROFILES] = {
-	MLX5_AM_EQE_PROFILES,
-	MLX5_AM_CQE_PROFILES,
-};
-
-static inline struct mlx5e_cq_moder mlx5e_am_get_profile(u8 cq_period_mode, int ix)
-{
-	return profile[cq_period_mode][ix];
-}
-
-struct mlx5e_cq_moder mlx5e_am_get_def_profile(u8 rx_cq_period_mode)
-{
-	int default_profile_ix;
-
-	if (rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
-		default_profile_ix = MLX5E_RX_AM_DEF_PROFILE_CQE;
-	else /* MLX5_CQ_PERIOD_MODE_START_FROM_EQE */
-		default_profile_ix = MLX5E_RX_AM_DEF_PROFILE_EQE;
-
-	return profile[rx_cq_period_mode][default_profile_ix];
-}
-
-/* Adaptive moderation logic */
-enum {
-	MLX5E_AM_START_MEASURE,
-	MLX5E_AM_MEASURE_IN_PROGRESS,
-	MLX5E_AM_APPLY_NEW_PROFILE,
-};
-
-enum {
-	MLX5E_AM_PARKING_ON_TOP,
-	MLX5E_AM_PARKING_TIRED,
-	MLX5E_AM_GOING_RIGHT,
-	MLX5E_AM_GOING_LEFT,
-};
-
-enum {
-	MLX5E_AM_STATS_WORSE,
-	MLX5E_AM_STATS_SAME,
-	MLX5E_AM_STATS_BETTER,
-};
-
-enum {
-	MLX5E_AM_STEPPED,
-	MLX5E_AM_TOO_TIRED,
-	MLX5E_AM_ON_EDGE,
-};
-
-static bool mlx5e_am_on_top(struct mlx5e_rx_am *am)
-{
-	switch (am->tune_state) {
-	case MLX5E_AM_PARKING_ON_TOP:
-	case MLX5E_AM_PARKING_TIRED:
-		WARN_ONCE(true, "mlx5e_am_on_top: PARKING\n");
-		return true;
-	case MLX5E_AM_GOING_RIGHT:
-		return (am->steps_left > 1) && (am->steps_right == 1);
-	default: /* MLX5E_AM_GOING_LEFT */
-		return (am->steps_right > 1) && (am->steps_left == 1);
-	}
-}
-
-static void mlx5e_am_turn(struct mlx5e_rx_am *am)
-{
-	switch (am->tune_state) {
-	case MLX5E_AM_PARKING_ON_TOP:
-	case MLX5E_AM_PARKING_TIRED:
-		WARN_ONCE(true, "mlx5e_am_turn: PARKING\n");
-		break;
-	case MLX5E_AM_GOING_RIGHT:
-		am->tune_state = MLX5E_AM_GOING_LEFT;
-		am->steps_left = 0;
-		break;
-	case MLX5E_AM_GOING_LEFT:
-		am->tune_state = MLX5E_AM_GOING_RIGHT;
-		am->steps_right = 0;
-		break;
-	}
-}
-
-static int mlx5e_am_step(struct mlx5e_rx_am *am)
-{
-	if (am->tired == (MLX5E_PARAMS_AM_NUM_PROFILES * 2))
-		return MLX5E_AM_TOO_TIRED;
-
-	switch (am->tune_state) {
-	case MLX5E_AM_PARKING_ON_TOP:
-	case MLX5E_AM_PARKING_TIRED:
-		WARN_ONCE(true, "mlx5e_am_step: PARKING\n");
-		break;
-	case MLX5E_AM_GOING_RIGHT:
-		if (am->profile_ix == (MLX5E_PARAMS_AM_NUM_PROFILES - 1))
-			return MLX5E_AM_ON_EDGE;
-		am->profile_ix++;
-		am->steps_right++;
-		break;
-	case MLX5E_AM_GOING_LEFT:
-		if (am->profile_ix == 0)
-			return MLX5E_AM_ON_EDGE;
-		am->profile_ix--;
-		am->steps_left++;
-		break;
-	}
-
-	am->tired++;
-	return MLX5E_AM_STEPPED;
-}
-
-static void mlx5e_am_park_on_top(struct mlx5e_rx_am *am)
-{
-	am->steps_right  = 0;
-	am->steps_left   = 0;
-	am->tired        = 0;
-	am->tune_state   = MLX5E_AM_PARKING_ON_TOP;
-}
-
-static void mlx5e_am_park_tired(struct mlx5e_rx_am *am)
-{
-	am->steps_right  = 0;
-	am->steps_left   = 0;
-	am->tune_state   = MLX5E_AM_PARKING_TIRED;
-}
-
-static void mlx5e_am_exit_parking(struct mlx5e_rx_am *am)
-{
-	am->tune_state = am->profile_ix ? MLX5E_AM_GOING_LEFT :
-					  MLX5E_AM_GOING_RIGHT;
-	mlx5e_am_step(am);
-}
-
-static int mlx5e_am_stats_compare(struct mlx5e_rx_am_stats *curr,
-				  struct mlx5e_rx_am_stats *prev)
-{
-	int diff;
-
-	if (!prev->ppms)
-		return curr->ppms ? MLX5E_AM_STATS_BETTER :
-				    MLX5E_AM_STATS_SAME;
-
-	diff = curr->ppms - prev->ppms;
-	if (((100 * abs(diff)) / prev->ppms) > 10) /* more than 10% diff */
-		return (diff > 0) ? MLX5E_AM_STATS_BETTER :
-				    MLX5E_AM_STATS_WORSE;
-
-	if (!prev->epms)
-		return curr->epms ? MLX5E_AM_STATS_WORSE :
-				    MLX5E_AM_STATS_SAME;
-
-	diff = curr->epms - prev->epms;
-	if (((100 * abs(diff)) / prev->epms) > 10) /* more than 10% diff */
-		return (diff < 0) ? MLX5E_AM_STATS_BETTER :
-				    MLX5E_AM_STATS_WORSE;
-
-	return MLX5E_AM_STATS_SAME;
-}
-
-static bool mlx5e_am_decision(struct mlx5e_rx_am_stats *curr_stats,
-			      struct mlx5e_rx_am *am)
-{
-	int prev_state = am->tune_state;
-	int prev_ix = am->profile_ix;
-	int stats_res;
-	int step_res;
-
-	switch (am->tune_state) {
-	case MLX5E_AM_PARKING_ON_TOP:
-		stats_res = mlx5e_am_stats_compare(curr_stats, &am->prev_stats);
-		if (stats_res != MLX5E_AM_STATS_SAME)
-			mlx5e_am_exit_parking(am);
-		break;
-
-	case MLX5E_AM_PARKING_TIRED:
-		am->tired--;
-		if (!am->tired)
-			mlx5e_am_exit_parking(am);
-		break;
-
-	case MLX5E_AM_GOING_RIGHT:
-	case MLX5E_AM_GOING_LEFT:
-		stats_res = mlx5e_am_stats_compare(curr_stats, &am->prev_stats);
-		if (stats_res != MLX5E_AM_STATS_BETTER)
-			mlx5e_am_turn(am);
-
-		if (mlx5e_am_on_top(am)) {
-			mlx5e_am_park_on_top(am);
-			break;
-		}
-
-		step_res = mlx5e_am_step(am);
-		switch (step_res) {
-		case MLX5E_AM_ON_EDGE:
-			mlx5e_am_park_on_top(am);
-			break;
-		case MLX5E_AM_TOO_TIRED:
-			mlx5e_am_park_tired(am);
-			break;
-		}
-
-		break;
-	}
-
-	if ((prev_state     != MLX5E_AM_PARKING_ON_TOP) ||
-	    (am->tune_state != MLX5E_AM_PARKING_ON_TOP))
-		am->prev_stats = *curr_stats;
-
-	return am->profile_ix != prev_ix;
-}
-
-static void mlx5e_am_sample(struct mlx5e_rq *rq,
-			    struct mlx5e_rx_am_sample *s)
-{
-	s->time	     = ktime_get();
-	s->pkt_ctr   = rq->stats.packets;
-	s->event_ctr = rq->cq.event_ctr;
-}
-
-#define MLX5E_AM_NEVENTS 64
-
-static void mlx5e_am_calc_stats(struct mlx5e_rx_am_sample *start,
-				struct mlx5e_rx_am_sample *end,
-				struct mlx5e_rx_am_stats *curr_stats)
-{
-	/* u32 holds up to 71 minutes, should be enough */
-	u32 delta_us = ktime_us_delta(end->time, start->time);
-	unsigned int npkts = end->pkt_ctr - start->pkt_ctr;
-
-	if (!delta_us) {
-		WARN_ONCE(true, "mlx5e_am_calc_stats: delta_us=0\n");
-		return;
-	}
-
-	curr_stats->ppms =            (npkts * USEC_PER_MSEC) / delta_us;
-	curr_stats->epms = (MLX5E_AM_NEVENTS * USEC_PER_MSEC) / delta_us;
-}
-
-void mlx5e_rx_am_work(struct work_struct *work)
-{
-	struct mlx5e_rx_am *am = container_of(work, struct mlx5e_rx_am,
-					      work);
-	struct mlx5e_rq *rq = container_of(am, struct mlx5e_rq, am);
-	struct mlx5e_cq_moder cur_profile = profile[am->mode][am->profile_ix];
-
-	mlx5_core_modify_cq_moderation(rq->priv->mdev, &rq->cq.mcq,
-				       cur_profile.usec, cur_profile.pkts);
-
-	am->state = MLX5E_AM_START_MEASURE;
-}
-
-void mlx5e_rx_am(struct mlx5e_rq *rq)
-{
-	struct mlx5e_rx_am *am = &rq->am;
-	struct mlx5e_rx_am_sample end_sample;
-	struct mlx5e_rx_am_stats curr_stats;
-	u16 nevents;
-
-	switch (am->state) {
-	case MLX5E_AM_MEASURE_IN_PROGRESS:
-		nevents = rq->cq.event_ctr - am->start_sample.event_ctr;
-		if (nevents < MLX5E_AM_NEVENTS)
-			break;
-		mlx5e_am_sample(rq, &end_sample);
-		mlx5e_am_calc_stats(&am->start_sample, &end_sample,
-				    &curr_stats);
-		if (mlx5e_am_decision(&curr_stats, am)) {
-			am->state = MLX5E_AM_APPLY_NEW_PROFILE;
-			schedule_work(&am->work);
-			break;
-		}
-		/* fall through */
-	case MLX5E_AM_START_MEASURE:
-		mlx5e_am_sample(rq, &am->start_sample);
-		am->state = MLX5E_AM_MEASURE_IN_PROGRESS;
-		break;
-	case MLX5E_AM_APPLY_NEW_PROFILE:
-		break;
-	}
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
deleted file mode 100644
index 65442c3..0000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
+++ /dev/null
@@ -1,352 +0,0 @@
-/*
- * Copyright (c) 2016, Mellanox Technologies, Ltd.  All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/ip.h>
-#include <linux/udp.h>
-#include <net/udp.h>
-#include "en.h"
-
-enum {
-	MLX5E_ST_LINK_STATE,
-	MLX5E_ST_LINK_SPEED,
-	MLX5E_ST_HEALTH_INFO,
-#ifdef CONFIG_INET
-	MLX5E_ST_LOOPBACK,
-#endif
-	MLX5E_ST_NUM,
-};
-
-const char mlx5e_self_tests[MLX5E_ST_NUM][ETH_GSTRING_LEN] = {
-	"Link Test",
-	"Speed Test",
-	"Health Test",
-#ifdef CONFIG_INET
-	"Loopback Test",
-#endif
-};
-
-int mlx5e_self_test_num(struct mlx5e_priv *priv)
-{
-	return ARRAY_SIZE(mlx5e_self_tests);
-}
-
-static int mlx5e_test_health_info(struct mlx5e_priv *priv)
-{
-	struct mlx5_core_health *health = &priv->mdev->priv.health;
-
-	return health->sick ? 1 : 0;
-}
-
-static int mlx5e_test_link_state(struct mlx5e_priv *priv)
-{
-	u8 port_state;
-
-	if (!netif_carrier_ok(priv->netdev))
-		return 1;
-
-	port_state = mlx5_query_vport_state(priv->mdev, MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT, 0);
-	return port_state == VPORT_STATE_UP ? 0 : 1;
-}
-
-static int mlx5e_test_link_speed(struct mlx5e_priv *priv)
-{
-	u32 out[MLX5_ST_SZ_DW(ptys_reg)];
-	u32 eth_proto_oper;
-	int i;
-
-	if (!netif_carrier_ok(priv->netdev))
-		return 1;
-
-	if (mlx5_query_port_ptys(priv->mdev, out, sizeof(out), MLX5_PTYS_EN, 1))
-		return 1;
-
-	eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper);
-	for (i = 0; i < MLX5E_LINK_MODES_NUMBER; i++) {
-		if (eth_proto_oper & MLX5E_PROT_MASK(i))
-			return 0;
-	}
-	return 1;
-}
-
-#ifdef CONFIG_INET
-/* loopback test */
-#define MLX5E_TEST_PKT_SIZE (MLX5_MPWRQ_SMALL_PACKET_THRESHOLD - NET_IP_ALIGN)
-static const char mlx5e_test_text[ETH_GSTRING_LEN] = "MLX5E SELF TEST";
-#define MLX5E_TEST_MAGIC 0x5AEED15C001ULL
-
-struct mlx5ehdr {
-	__be32 version;
-	__be64 magic;
-	char   text[ETH_GSTRING_LEN];
-};
-
-static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv)
-{
-	struct sk_buff *skb = NULL;
-	struct mlx5ehdr *mlxh;
-	struct ethhdr *ethh;
-	struct udphdr *udph;
-	struct iphdr *iph;
-	int datalen, iplen;
-
-	datalen = MLX5E_TEST_PKT_SIZE -
-		  (sizeof(*ethh) + sizeof(*iph) + sizeof(*udph));
-
-	skb = netdev_alloc_skb(priv->netdev, MLX5E_TEST_PKT_SIZE);
-	if (!skb) {
-		netdev_err(priv->netdev, "\tFailed to alloc loopback skb\n");
-		return NULL;
-	}
-
-	prefetchw(skb->data);
-	skb_reserve(skb, NET_IP_ALIGN);
-
-	/*  Reserve for ethernet and IP header  */
-	ethh = (struct ethhdr *)skb_push(skb, ETH_HLEN);
-	skb_reset_mac_header(skb);
-
-	skb_set_network_header(skb, skb->len);
-	iph = (struct iphdr *)skb_put(skb, sizeof(struct iphdr));
-
-	skb_set_transport_header(skb, skb->len);
-	udph = (struct udphdr *)skb_put(skb, sizeof(struct udphdr));
-
-	/* Fill ETH header */
-	ether_addr_copy(ethh->h_dest, priv->netdev->dev_addr);
-	eth_zero_addr(ethh->h_source);
-	ethh->h_proto = htons(ETH_P_IP);
-
-	/* Fill UDP header */
-	udph->source = htons(9);
-	udph->dest = htons(9); /* Discard Protocol */
-	udph->len = htons(datalen + sizeof(struct udphdr));
-	udph->check = 0;
-
-	/* Fill IP header */
-	iph->ihl = 5;
-	iph->ttl = 32;
-	iph->version = 4;
-	iph->protocol = IPPROTO_UDP;
-	iplen = sizeof(struct iphdr) + sizeof(struct udphdr) + datalen;
-	iph->tot_len = htons(iplen);
-	iph->frag_off = 0;
-	iph->saddr = 0;
-	iph->daddr = 0;
-	iph->tos = 0;
-	iph->id = 0;
-	ip_send_check(iph);
-
-	/* Fill test header and data */
-	mlxh = (struct mlx5ehdr *)skb_put(skb, sizeof(*mlxh));
-	mlxh->version = 0;
-	mlxh->magic = cpu_to_be64(MLX5E_TEST_MAGIC);
-	strlcpy(mlxh->text, mlx5e_test_text, sizeof(mlxh->text));
-	datalen -= sizeof(*mlxh);
-	memset(skb_put(skb, datalen), 0, datalen);
-
-	skb->csum = 0;
-	skb->ip_summed = CHECKSUM_PARTIAL;
-	udp4_hwcsum(skb, iph->saddr, iph->daddr);
-
-	skb->protocol = htons(ETH_P_IP);
-	skb->pkt_type = PACKET_HOST;
-	skb->dev = priv->netdev;
-
-	return skb;
-}
-
-struct mlx5e_lbt_priv {
-	struct packet_type pt;
-	struct completion comp;
-	bool loopback_ok;
-};
-
-static int
-mlx5e_test_loopback_validate(struct sk_buff *skb,
-			     struct net_device *ndev,
-			     struct packet_type *pt,
-			     struct net_device *orig_ndev)
-{
-	struct mlx5e_lbt_priv *lbtp = pt->af_packet_priv;
-	struct mlx5ehdr *mlxh;
-	struct ethhdr *ethh;
-	struct udphdr *udph;
-	struct iphdr *iph;
-
-	/* We are only going to peek, no need to clone the SKB */
-	if (skb->protocol != htons(ETH_P_IP))
-		goto out;
-
-	if (MLX5E_TEST_PKT_SIZE - ETH_HLEN > skb_headlen(skb))
-		goto out;
-
-	ethh = (struct ethhdr *)skb_mac_header(skb);
-	if (!ether_addr_equal(ethh->h_dest, orig_ndev->dev_addr))
-		goto out;
-
-	iph = ip_hdr(skb);
-	if (iph->protocol != IPPROTO_UDP)
-		goto out;
-
-	udph = udp_hdr(skb);
-	if (udph->dest != htons(9))
-		goto out;
-
-	mlxh = (struct mlx5ehdr *)((char *)udph + sizeof(*udph));
-	if (mlxh->magic != cpu_to_be64(MLX5E_TEST_MAGIC))
-		goto out; /* so close ! */
-
-	/* bingo */
-	lbtp->loopback_ok = true;
-	complete(&lbtp->comp);
-out:
-	kfree_skb(skb);
-	return 0;
-}
-
-static int mlx5e_test_loopback_setup(struct mlx5e_priv *priv,
-				     struct mlx5e_lbt_priv *lbtp)
-{
-	int err = 0;
-
-	err = mlx5e_refresh_tirs_self_loopback(priv->mdev, true);
-	if (err) {
-		netdev_err(priv->netdev,
-			   "\tFailed to enable UC loopback err(%d)\n", err);
-		return err;
-	}
-
-	lbtp->loopback_ok = false;
-	init_completion(&lbtp->comp);
-
-	lbtp->pt.type = htons(ETH_P_ALL);
-	lbtp->pt.func = mlx5e_test_loopback_validate;
-	lbtp->pt.dev = priv->netdev;
-	lbtp->pt.af_packet_priv = lbtp;
-	dev_add_pack(&lbtp->pt);
-	return err;
-}
-
-static void mlx5e_test_loopback_cleanup(struct mlx5e_priv *priv,
-					struct mlx5e_lbt_priv *lbtp)
-{
-	dev_remove_pack(&lbtp->pt);
-	mlx5e_refresh_tirs_self_loopback(priv->mdev, false);
-}
-
-#define MLX5E_LB_VERIFY_TIMEOUT (msecs_to_jiffies(200))
-static int mlx5e_test_loopback(struct mlx5e_priv *priv)
-{
-	struct mlx5e_lbt_priv *lbtp;
-	struct sk_buff *skb = NULL;
-	int err;
-
-	if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
-		netdev_err(priv->netdev,
-			   "\tCan't perform loobpack test while device is down\n");
-		return -ENODEV;
-	}
-
-	lbtp = kzalloc(sizeof(*lbtp), GFP_KERNEL);
-	if (!lbtp)
-		return -ENOMEM;
-	lbtp->loopback_ok = false;
-
-	err = mlx5e_test_loopback_setup(priv, lbtp);
-	if (err)
-		goto out;
-
-	skb = mlx5e_test_get_udp_skb(priv);
-	if (!skb) {
-		err = -ENOMEM;
-		goto cleanup;
-	}
-
-	skb_set_queue_mapping(skb, 0);
-	err = dev_queue_xmit(skb);
-	if (err) {
-		netdev_err(priv->netdev,
-			   "\tFailed to xmit loopback packet err(%d)\n",
-			   err);
-		goto cleanup;
-	}
-
-	wait_for_completion_timeout(&lbtp->comp, MLX5E_LB_VERIFY_TIMEOUT);
-	err = !lbtp->loopback_ok;
-
-cleanup:
-	mlx5e_test_loopback_cleanup(priv, lbtp);
-out:
-	kfree(lbtp);
-	return err;
-}
-#endif
-
-static int (*mlx5e_st_func[MLX5E_ST_NUM])(struct mlx5e_priv *) = {
-	mlx5e_test_link_state,
-	mlx5e_test_link_speed,
-	mlx5e_test_health_info,
-#ifdef CONFIG_INET
-	mlx5e_test_loopback,
-#endif
-};
-
-void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest,
-		     u64 *buf)
-{
-	struct mlx5e_priv *priv = netdev_priv(ndev);
-	int i;
-
-	memset(buf, 0, sizeof(u64) * MLX5E_ST_NUM);
-
-	mutex_lock(&priv->state_lock);
-	netdev_info(ndev, "Self test begin..\n");
-
-	for (i = 0; i < MLX5E_ST_NUM; i++) {
-		netdev_info(ndev, "\t[%d] %s start..\n",
-			    i, mlx5e_self_tests[i]);
-		buf[i] = mlx5e_st_func[i](priv);
-		netdev_info(ndev, "\t[%d] %s end: result(%lld)\n",
-			    i, mlx5e_self_tests[i], buf[i]);
-	}
-
-	mutex_unlock(&priv->state_lock);
-
-	for (i = 0; i < MLX5E_ST_NUM; i++) {
-		if (buf[i]) {
-			etest->flags |= ETH_TEST_FL_FAILED;
-			break;
-		}
-	}
-	netdev_info(ndev, "Self test out: status flags(0x%x)\n",
-		    etest->flags);
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
deleted file mode 100644
index f202f87..0000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ /dev/null
@@ -1,430 +0,0 @@
-/*
- * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef __MLX5_EN_STATS_H__
-#define __MLX5_EN_STATS_H__
-
-#define MLX5E_READ_CTR64_CPU(ptr, dsc, i) \
-	(*(u64 *)((char *)ptr + dsc[i].offset))
-#define MLX5E_READ_CTR64_BE(ptr, dsc, i) \
-	be64_to_cpu(*(__be64 *)((char *)ptr + dsc[i].offset))
-#define MLX5E_READ_CTR32_CPU(ptr, dsc, i) \
-	(*(u32 *)((char *)ptr + dsc[i].offset))
-#define MLX5E_READ_CTR32_BE(ptr, dsc, i) \
-	be32_to_cpu(*(__be32 *)((char *)ptr + dsc[i].offset))
-
-#define MLX5E_DECLARE_STAT(type, fld) #fld, offsetof(type, fld)
-#define MLX5E_DECLARE_RX_STAT(type, fld) "rx%d_"#fld, offsetof(type, fld)
-#define MLX5E_DECLARE_TX_STAT(type, fld) "tx%d_"#fld, offsetof(type, fld)
-
-struct counter_desc {
-	char		format[ETH_GSTRING_LEN];
-	int		offset; /* Byte offset */
-};
-
-struct mlx5e_sw_stats {
-	u64 rx_packets;
-	u64 rx_bytes;
-	u64 tx_packets;
-	u64 tx_bytes;
-	u64 tx_tso_packets;
-	u64 tx_tso_bytes;
-	u64 tx_tso_inner_packets;
-	u64 tx_tso_inner_bytes;
-	u64 rx_lro_packets;
-	u64 rx_lro_bytes;
-	u64 rx_csum_unnecessary;
-	u64 rx_csum_none;
-	u64 rx_csum_complete;
-	u64 rx_csum_unnecessary_inner;
-	u64 rx_xdp_drop;
-	u64 rx_xdp_tx;
-	u64 rx_xdp_tx_full;
-	u64 tx_csum_partial;
-	u64 tx_csum_partial_inner;
-	u64 tx_queue_stopped;
-	u64 tx_queue_wake;
-	u64 tx_queue_dropped;
-	u64 tx_xmit_more;
-	u64 rx_wqe_err;
-	u64 rx_mpwqe_filler;
-	u64 rx_buff_alloc_err;
-	u64 rx_cqe_compress_blks;
-	u64 rx_cqe_compress_pkts;
-	u64 rx_cache_reuse;
-	u64 rx_cache_full;
-	u64 rx_cache_empty;
-	u64 rx_cache_busy;
-
-	/* Special handling counters */
-	u64 link_down_events_phy;
-};
-
-static const struct counter_desc sw_stats_desc[] = {
-	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
-	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
-	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
-	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
-	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_packets) },
-	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_bytes) },
-	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) },
-	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) },
-	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
-	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) },
-	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
-	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
-	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
-	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
-	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
-	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx) },
-	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) },
-	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
-	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) },
-	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
-	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
-	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) },
-	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) },
-	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
-	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler) },
-	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
-	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
-	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
-	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_reuse) },
-	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_full) },
-	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) },
-	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) },
-	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, link_down_events_phy) },
-};
-
-struct mlx5e_qcounter_stats {
-	u32 rx_out_of_buffer;
-};
-
-static const struct counter_desc q_stats_desc[] = {
-	{ MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_out_of_buffer) },
-};
-
-#define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c)
-#define VPORT_COUNTER_GET(vstats, c) MLX5_GET64(query_vport_counter_out, \
-						vstats->query_vport_out, c)
-
-struct mlx5e_vport_stats {
-	__be64 query_vport_out[MLX5_ST_SZ_QW(query_vport_counter_out)];
-};
-
-static const struct counter_desc vport_stats_desc[] = {
-	{ "rx_vport_unicast_packets",
-		VPORT_COUNTER_OFF(received_eth_unicast.packets) },
-	{ "rx_vport_unicast_bytes",
-		VPORT_COUNTER_OFF(received_eth_unicast.octets) },
-	{ "tx_vport_unicast_packets",
-		VPORT_COUNTER_OFF(transmitted_eth_unicast.packets) },
-	{ "tx_vport_unicast_bytes",
-		VPORT_COUNTER_OFF(transmitted_eth_unicast.octets) },
-	{ "rx_vport_multicast_packets",
-		VPORT_COUNTER_OFF(received_eth_multicast.packets) },
-	{ "rx_vport_multicast_bytes",
-		VPORT_COUNTER_OFF(received_eth_multicast.octets) },
-	{ "tx_vport_multicast_packets",
-		VPORT_COUNTER_OFF(transmitted_eth_multicast.packets) },
-	{ "tx_vport_multicast_bytes",
-		VPORT_COUNTER_OFF(transmitted_eth_multicast.octets) },
-	{ "rx_vport_broadcast_packets",
-		VPORT_COUNTER_OFF(received_eth_broadcast.packets) },
-	{ "rx_vport_broadcast_bytes",
-		VPORT_COUNTER_OFF(received_eth_broadcast.octets) },
-	{ "tx_vport_broadcast_packets",
-		VPORT_COUNTER_OFF(transmitted_eth_broadcast.packets) },
-	{ "tx_vport_broadcast_bytes",
-		VPORT_COUNTER_OFF(transmitted_eth_broadcast.octets) },
-	{ "rx_vport_rdma_unicast_packets",
-		VPORT_COUNTER_OFF(received_ib_unicast.packets) },
-	{ "rx_vport_rdma_unicast_bytes",
-		VPORT_COUNTER_OFF(received_ib_unicast.octets) },
-	{ "tx_vport_rdma_unicast_packets",
-		VPORT_COUNTER_OFF(transmitted_ib_unicast.packets) },
-	{ "tx_vport_rdma_unicast_bytes",
-		VPORT_COUNTER_OFF(transmitted_ib_unicast.octets) },
-	{ "rx_vport_rdma_multicast_packets",
-		VPORT_COUNTER_OFF(received_ib_multicast.packets) },
-	{ "rx_vport_rdma_multicast_bytes",
-		VPORT_COUNTER_OFF(received_ib_multicast.octets) },
-	{ "tx_vport_rdma_multicast_packets",
-		VPORT_COUNTER_OFF(transmitted_ib_multicast.packets) },
-	{ "tx_vport_rdma_multicast_bytes",
-		VPORT_COUNTER_OFF(transmitted_ib_multicast.octets) },
-};
-
-#define PPORT_802_3_OFF(c) \
-	MLX5_BYTE_OFF(ppcnt_reg, \
-		      counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)
-#define PPORT_802_3_GET(pstats, c) \
-	MLX5_GET64(ppcnt_reg, pstats->IEEE_802_3_counters, \
-		   counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)
-#define PPORT_2863_OFF(c) \
-	MLX5_BYTE_OFF(ppcnt_reg, \
-		      counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
-#define PPORT_2863_GET(pstats, c) \
-	MLX5_GET64(ppcnt_reg, pstats->RFC_2863_counters, \
-		   counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
-#define PPORT_2819_OFF(c) \
-	MLX5_BYTE_OFF(ppcnt_reg, \
-		      counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
-#define PPORT_2819_GET(pstats, c) \
-	MLX5_GET64(ppcnt_reg, pstats->RFC_2819_counters, \
-		   counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
-#define PPORT_PER_PRIO_OFF(c) \
-	MLX5_BYTE_OFF(ppcnt_reg, \
-		      counter_set.eth_per_prio_grp_data_layout.c##_high)
-#define PPORT_PER_PRIO_GET(pstats, prio, c) \
-	MLX5_GET64(ppcnt_reg, pstats->per_prio_counters[prio], \
-		   counter_set.eth_per_prio_grp_data_layout.c##_high)
-#define NUM_PPORT_PRIO				8
-
-struct mlx5e_pport_stats {
-	__be64 IEEE_802_3_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
-	__be64 RFC_2863_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
-	__be64 RFC_2819_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
-	__be64 per_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)];
-	__be64 phy_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
-};
-
-static const struct counter_desc pport_802_3_stats_desc[] = {
-	{ "tx_packets_phy", PPORT_802_3_OFF(a_frames_transmitted_ok) },
-	{ "rx_packets_phy", PPORT_802_3_OFF(a_frames_received_ok) },
-	{ "rx_crc_errors_phy", PPORT_802_3_OFF(a_frame_check_sequence_errors) },
-	{ "tx_bytes_phy", PPORT_802_3_OFF(a_octets_transmitted_ok) },
-	{ "rx_bytes_phy", PPORT_802_3_OFF(a_octets_received_ok) },
-	{ "tx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) },
-	{ "tx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) },
-	{ "rx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_received_ok) },
-	{ "rx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_received_ok) },
-	{ "rx_in_range_len_errors_phy", PPORT_802_3_OFF(a_in_range_length_errors) },
-	{ "rx_out_of_range_len_phy", PPORT_802_3_OFF(a_out_of_range_length_field) },
-	{ "rx_oversize_pkts_phy", PPORT_802_3_OFF(a_frame_too_long_errors) },
-	{ "rx_symbol_err_phy", PPORT_802_3_OFF(a_symbol_error_during_carrier) },
-	{ "tx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_transmitted) },
-	{ "rx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_received) },
-	{ "rx_unsupported_op_phy", PPORT_802_3_OFF(a_unsupported_opcodes_received) },
-	{ "rx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) },
-	{ "tx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) },
-};
-
-static const struct counter_desc pport_2863_stats_desc[] = {
-	{ "rx_discards_phy", PPORT_2863_OFF(if_in_discards) },
-	{ "tx_discards_phy", PPORT_2863_OFF(if_out_discards) },
-	{ "tx_errors_phy", PPORT_2863_OFF(if_out_errors) },
-};
-
-static const struct counter_desc pport_2819_stats_desc[] = {
-	{ "rx_undersize_pkts_phy", PPORT_2819_OFF(ether_stats_undersize_pkts) },
-	{ "rx_fragments_phy", PPORT_2819_OFF(ether_stats_fragments) },
-	{ "rx_jabbers_phy", PPORT_2819_OFF(ether_stats_jabbers) },
-	{ "rx_64_bytes_phy", PPORT_2819_OFF(ether_stats_pkts64octets) },
-	{ "rx_65_to_127_bytes_phy", PPORT_2819_OFF(ether_stats_pkts65to127octets) },
-	{ "rx_128_to_255_bytes_phy", PPORT_2819_OFF(ether_stats_pkts128to255octets) },
-	{ "rx_256_to_511_bytes_phy", PPORT_2819_OFF(ether_stats_pkts256to511octets) },
-	{ "rx_512_to_1023_bytes_phy", PPORT_2819_OFF(ether_stats_pkts512to1023octets) },
-	{ "rx_1024_to_1518_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1024to1518octets) },
-	{ "rx_1519_to_2047_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1519to2047octets) },
-	{ "rx_2048_to_4095_bytes_phy", PPORT_2819_OFF(ether_stats_pkts2048to4095octets) },
-	{ "rx_4096_to_8191_bytes_phy", PPORT_2819_OFF(ether_stats_pkts4096to8191octets) },
-	{ "rx_8192_to_10239_bytes_phy", PPORT_2819_OFF(ether_stats_pkts8192to10239octets) },
-};
-
-static const struct counter_desc pport_per_prio_traffic_stats_desc[] = {
-	{ "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) },
-	{ "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) },
-	{ "tx_prio%d_bytes", PPORT_PER_PRIO_OFF(tx_octets) },
-	{ "tx_prio%d_packets", PPORT_PER_PRIO_OFF(tx_frames) },
-};
-
-static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
-	/* %s is "global" or "prio{i}" */
-	{ "rx_%s_pause", PPORT_PER_PRIO_OFF(rx_pause) },
-	{ "rx_%s_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) },
-	{ "tx_%s_pause", PPORT_PER_PRIO_OFF(tx_pause) },
-	{ "tx_%s_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) },
-	{ "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
-};
-
-#define PCIE_PERF_OFF(c) \
-	MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c)
-#define PCIE_PERF_GET(pcie_stats, c) \
-	MLX5_GET(mpcnt_reg, pcie_stats->pcie_perf_counters, \
-		 counter_set.pcie_perf_cntrs_grp_data_layout.c)
-#define PCIE_TAS_OFF(c) \
-	MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_tas_cntrs_grp_data_layout.c)
-#define PCIE_TAS_GET(pcie_stats, c) \
-	MLX5_GET(mpcnt_reg, pcie_stats->pcie_tas_counters, \
-		 counter_set.pcie_tas_cntrs_grp_data_layout.c)
-
-struct mlx5e_pcie_stats {
-	__be64 pcie_perf_counters[MLX5_ST_SZ_QW(mpcnt_reg)];
-	__be64 pcie_tas_counters[MLX5_ST_SZ_QW(mpcnt_reg)];
-};
-
-static const struct counter_desc pcie_perf_stats_desc[] = {
-	{ "rx_pci_signal_integrity", PCIE_PERF_OFF(rx_errors) },
-	{ "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) },
-};
-
-static const struct counter_desc pcie_tas_stats_desc[] = {
-	{ "tx_pci_transport_nonfatal_msg", PCIE_TAS_OFF(non_fatal_err_msg_sent) },
-	{ "tx_pci_transport_fatal_msg", PCIE_TAS_OFF(fatal_err_msg_sent) },
-};
-
-struct mlx5e_rq_stats {
-	u64 packets;
-	u64 bytes;
-	u64 csum_complete;
-	u64 csum_unnecessary_inner;
-	u64 csum_none;
-	u64 lro_packets;
-	u64 lro_bytes;
-	u64 xdp_drop;
-	u64 xdp_tx;
-	u64 xdp_tx_full;
-	u64 wqe_err;
-	u64 mpwqe_filler;
-	u64 buff_alloc_err;
-	u64 cqe_compress_blks;
-	u64 cqe_compress_pkts;
-	u64 cache_reuse;
-	u64 cache_full;
-	u64 cache_empty;
-	u64 cache_busy;
-};
-
-static const struct counter_desc rq_stats_desc[] = {
-	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
-	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
-	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
-	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
-	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
-	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) },
-	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx) },
-	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx_full) },
-	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) },
-	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
-	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
-	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler) },
-	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
-	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
-	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
-	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_reuse) },
-	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_full) },
-	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_empty) },
-	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_busy) },
-};
-
-struct mlx5e_sq_stats {
-	/* commonly accessed in data path */
-	u64 packets;
-	u64 bytes;
-	u64 xmit_more;
-	u64 tso_packets;
-	u64 tso_bytes;
-	u64 tso_inner_packets;
-	u64 tso_inner_bytes;
-	u64 csum_partial_inner;
-	u64 nop;
-	/* less likely accessed in data path */
-	u64 csum_none;
-	u64 stopped;
-	u64 wake;
-	u64 dropped;
-};
-
-static const struct counter_desc sq_stats_desc[] = {
-	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, packets) },
-	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, bytes) },
-	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_packets) },
-	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
-	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
-	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
-	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
-	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
-	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
-	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
-	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) },
-	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },
-	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
-};
-
-#define NUM_SW_COUNTERS			ARRAY_SIZE(sw_stats_desc)
-#define NUM_Q_COUNTERS			ARRAY_SIZE(q_stats_desc)
-#define NUM_VPORT_COUNTERS		ARRAY_SIZE(vport_stats_desc)
-#define NUM_PPORT_802_3_COUNTERS	ARRAY_SIZE(pport_802_3_stats_desc)
-#define NUM_PPORT_2863_COUNTERS		ARRAY_SIZE(pport_2863_stats_desc)
-#define NUM_PPORT_2819_COUNTERS		ARRAY_SIZE(pport_2819_stats_desc)
-#define NUM_PCIE_PERF_COUNTERS		ARRAY_SIZE(pcie_perf_stats_desc)
-#define NUM_PCIE_TAS_COUNTERS		ARRAY_SIZE(pcie_tas_stats_desc)
-#define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS \
-	ARRAY_SIZE(pport_per_prio_traffic_stats_desc)
-#define NUM_PPORT_PER_PRIO_PFC_COUNTERS \
-	ARRAY_SIZE(pport_per_prio_pfc_stats_desc)
-#define NUM_PPORT_COUNTERS		(NUM_PPORT_802_3_COUNTERS + \
-					 NUM_PPORT_2863_COUNTERS  + \
-					 NUM_PPORT_2819_COUNTERS  + \
-					 NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * \
-					 NUM_PPORT_PRIO)
-#define NUM_PCIE_COUNTERS		(NUM_PCIE_PERF_COUNTERS + NUM_PCIE_TAS_COUNTERS)
-#define NUM_RQ_STATS			ARRAY_SIZE(rq_stats_desc)
-#define NUM_SQ_STATS			ARRAY_SIZE(sq_stats_desc)
-
-struct mlx5e_stats {
-	struct mlx5e_sw_stats sw;
-	struct mlx5e_qcounter_stats qcnt;
-	struct mlx5e_vport_stats vport;
-	struct mlx5e_pport_stats pport;
-	struct mlx5e_pcie_stats pcie;
-	struct rtnl_link_stats64 vf_vport;
-};
-
-static const struct counter_desc mlx5e_pme_status_desc[] = {
-	{ "module_plug", 0 },
-	{ "module_unplug", 8 },
-};
-
-static const struct counter_desc mlx5e_pme_error_desc[] = {
-	{ "module_pwr_budget_exd", 0 },  /* power budget exceed */
-	{ "module_long_range", 8 },      /* long range for non MLNX cable */
-	{ "module_bus_stuck", 16 },      /* bus stuck (I2C or data shorted) */
-	{ "module_no_eeprom", 24 },      /* no eeprom/retry time out */
-	{ "module_enforce_part", 32 },   /* enforce part number list */
-	{ "module_unknown_id", 40 },     /* unknown identifier */
-	{ "module_high_temp", 48 },      /* high temperature */
-	{ "module_bad_shorted", 56 },    /* bad or shorted cable/module */
-	{ "module_unknown_status", 64 },
-};
-
-#endif /* __MLX5_EN_STATS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
deleted file mode 100644
index f8829b5..0000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ /dev/null
@@ -1,1094 +0,0 @@
-/*
- * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <net/flow_dissector.h>
-#include <net/sch_generic.h>
-#include <net/pkt_cls.h>
-#include <net/tc_act/tc_gact.h>
-#include <net/tc_act/tc_skbedit.h>
-#include <linux/mlx5/fs.h>
-#include <linux/mlx5/device.h>
-#include <linux/rhashtable.h>
-#include <net/switchdev.h>
-#include <net/tc_act/tc_mirred.h>
-#include <net/tc_act/tc_vlan.h>
-#include <net/tc_act/tc_tunnel_key.h>
-#include <net/vxlan.h>
-#include "en.h"
-#include "en_tc.h"
-#include "eswitch.h"
-#include "vxlan.h"
-
-struct mlx5e_tc_flow {
-	struct rhash_head	node;
-	u64			cookie;
-	struct mlx5_flow_handle *rule;
-	struct list_head	encap; /* flows sharing the same encap */
-	struct mlx5_esw_flow_attr *attr;
-};
-
-enum {
-	MLX5_HEADER_TYPE_VXLAN = 0x0,
-	MLX5_HEADER_TYPE_NVGRE = 0x1,
-};
-
-#define MLX5E_TC_TABLE_NUM_ENTRIES 1024
-#define MLX5E_TC_TABLE_NUM_GROUPS 4
-
-static struct mlx5_flow_handle *
-mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
-		      struct mlx5_flow_spec *spec,
-		      u32 action, u32 flow_tag)
-{
-	struct mlx5_core_dev *dev = priv->mdev;
-	struct mlx5_flow_destination dest = { 0 };
-	struct mlx5_flow_act flow_act = {
-		.action = action,
-		.flow_tag = flow_tag,
-		.encap_id = 0,
-	};
-	struct mlx5_fc *counter = NULL;
-	struct mlx5_flow_handle *rule;
-	bool table_created = false;
-
-	if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
-		dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
-		dest.ft = priv->fs.vlan.ft.t;
-	} else if (action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
-		counter = mlx5_fc_create(dev, true);
-		if (IS_ERR(counter))
-			return ERR_CAST(counter);
-
-		dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
-		dest.counter = counter;
-	}
-
-	if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
-		priv->fs.tc.t =
-			mlx5_create_auto_grouped_flow_table(priv->fs.ns,
-							    MLX5E_TC_PRIO,
-							    MLX5E_TC_TABLE_NUM_ENTRIES,
-							    MLX5E_TC_TABLE_NUM_GROUPS,
-							    0, 0);
-		if (IS_ERR(priv->fs.tc.t)) {
-			netdev_err(priv->netdev,
-				   "Failed to create tc offload table\n");
-			rule = ERR_CAST(priv->fs.tc.t);
-			goto err_create_ft;
-		}
-
-		table_created = true;
-	}
-
-	spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
-	rule = mlx5_add_flow_rules(priv->fs.tc.t, spec, &flow_act, &dest, 1);
-
-	if (IS_ERR(rule))
-		goto err_add_rule;
-
-	return rule;
-
-err_add_rule:
-	if (table_created) {
-		mlx5_destroy_flow_table(priv->fs.tc.t);
-		priv->fs.tc.t = NULL;
-	}
-err_create_ft:
-	mlx5_fc_destroy(dev, counter);
-
-	return rule;
-}
-
-static struct mlx5_flow_handle *
-mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
-		      struct mlx5_flow_spec *spec,
-		      struct mlx5_esw_flow_attr *attr)
-{
-	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
-	int err;
-
-	err = mlx5_eswitch_add_vlan_action(esw, attr);
-	if (err)
-		return ERR_PTR(err);
-
-	return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
-}
-
-static void mlx5e_detach_encap(struct mlx5e_priv *priv,
-			       struct mlx5e_tc_flow *flow) {
-	struct list_head *next = flow->encap.next;
-
-	list_del(&flow->encap);
-	if (list_empty(next)) {
-		struct mlx5_encap_entry *e;
-
-		e = list_entry(next, struct mlx5_encap_entry, flows);
-		if (e->n) {
-			mlx5_encap_dealloc(priv->mdev, e->encap_id);
-			neigh_release(e->n);
-		}
-		hlist_del_rcu(&e->encap_hlist);
-		kfree(e);
-	}
-}
-
-static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
-			      struct mlx5e_tc_flow *flow)
-{
-	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
-	struct mlx5_fc *counter = NULL;
-
-	counter = mlx5_flow_rule_counter(flow->rule);
-
-	mlx5_del_flow_rules(flow->rule);
-
-	if (esw && esw->mode == SRIOV_OFFLOADS) {
-		mlx5_eswitch_del_vlan_action(esw, flow->attr);
-		if (flow->attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
-			mlx5e_detach_encap(priv, flow);
-	}
-
-	mlx5_fc_destroy(priv->mdev, counter);
-
-	if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
-		mlx5_destroy_flow_table(priv->fs.tc.t);
-		priv->fs.tc.t = NULL;
-	}
-}
-
-static void parse_vxlan_attr(struct mlx5_flow_spec *spec,
-			     struct tc_cls_flower_offload *f)
-{
-	void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
-				       outer_headers);
-	void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
-				       outer_headers);
-	void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
-				    misc_parameters);
-	void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
-				    misc_parameters);
-
-	MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
-	MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
-
-	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
-		struct flow_dissector_key_keyid *key =
-			skb_flow_dissector_target(f->dissector,
-						  FLOW_DISSECTOR_KEY_ENC_KEYID,
-						  f->key);
-		struct flow_dissector_key_keyid *mask =
-			skb_flow_dissector_target(f->dissector,
-						  FLOW_DISSECTOR_KEY_ENC_KEYID,
-						  f->mask);
-		MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
-			 be32_to_cpu(mask->keyid));
-		MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
-			 be32_to_cpu(key->keyid));
-	}
-}
-
-static int parse_tunnel_attr(struct mlx5e_priv *priv,
-			     struct mlx5_flow_spec *spec,
-			     struct tc_cls_flower_offload *f)
-{
-	void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
-				       outer_headers);
-	void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
-				       outer_headers);
-
-	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
-		struct flow_dissector_key_ports *key =
-			skb_flow_dissector_target(f->dissector,
-						  FLOW_DISSECTOR_KEY_ENC_PORTS,
-						  f->key);
-		struct flow_dissector_key_ports *mask =
-			skb_flow_dissector_target(f->dissector,
-						  FLOW_DISSECTOR_KEY_ENC_PORTS,
-						  f->mask);
-
-		/* Full udp dst port must be given */
-		if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst)))
-			return -EOPNOTSUPP;
-
-		/* udp src port isn't supported */
-		if (memchr_inv(&mask->src, 0, sizeof(mask->src)))
-			return -EOPNOTSUPP;
-
-		if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->dst)) &&
-		    MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
-			parse_vxlan_attr(spec, f);
-		else
-			return -EOPNOTSUPP;
-
-		MLX5_SET(fte_match_set_lyr_2_4, headers_c,
-			 udp_dport, ntohs(mask->dst));
-		MLX5_SET(fte_match_set_lyr_2_4, headers_v,
-			 udp_dport, ntohs(key->dst));
-
-	} else { /* udp dst port must be given */
-			return -EOPNOTSUPP;
-	}
-
-	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
-		struct flow_dissector_key_ipv4_addrs *key =
-			skb_flow_dissector_target(f->dissector,
-						  FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
-						  f->key);
-		struct flow_dissector_key_ipv4_addrs *mask =
-			skb_flow_dissector_target(f->dissector,
-						  FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
-						  f->mask);
-		MLX5_SET(fte_match_set_lyr_2_4, headers_c,
-			 src_ipv4_src_ipv6.ipv4_layout.ipv4,
-			 ntohl(mask->src));
-		MLX5_SET(fte_match_set_lyr_2_4, headers_v,
-			 src_ipv4_src_ipv6.ipv4_layout.ipv4,
-			 ntohl(key->src));
-
-		MLX5_SET(fte_match_set_lyr_2_4, headers_c,
-			 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
-			 ntohl(mask->dst));
-		MLX5_SET(fte_match_set_lyr_2_4, headers_v,
-			 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
-			 ntohl(key->dst));
-	}
-
-	MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
-	MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
-
-	/* Enforce DMAC when offloading incoming tunneled flows.
-	 * Flow counters require a match on the DMAC.
-	 */
-	MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16);
-	MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0);
-	ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
-				     dmac_47_16), priv->netdev->dev_addr);
-
-	/* let software handle IP fragments */
-	MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
-	MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
-
-	return 0;
-}
-
-static int __parse_cls_flower(struct mlx5e_priv *priv,
-			      struct mlx5_flow_spec *spec,
-			      struct tc_cls_flower_offload *f,
-			      u8 *min_inline)
-{
-	void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
-				       outer_headers);
-	void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
-				       outer_headers);
-	u16 addr_type = 0;
-	u8 ip_proto = 0;
-
-	*min_inline = MLX5_INLINE_MODE_L2;
-
-	if (f->dissector->used_keys &
-	    ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
-	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
-	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
-	      BIT(FLOW_DISSECTOR_KEY_VLAN) |
-	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
-	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
-	      BIT(FLOW_DISSECTOR_KEY_PORTS) |
-	      BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
-	      BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
-	      BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
-	      BIT(FLOW_DISSECTOR_KEY_ENC_PORTS)	|
-	      BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL))) {
-		netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
-			    f->dissector->used_keys);
-		return -EOPNOTSUPP;
-	}
-
-	if ((dissector_uses_key(f->dissector,
-				FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) ||
-	     dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID) ||
-	     dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) &&
-	    dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
-		struct flow_dissector_key_control *key =
-			skb_flow_dissector_target(f->dissector,
-						  FLOW_DISSECTOR_KEY_ENC_CONTROL,
-						  f->key);
-		switch (key->addr_type) {
-		case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
-			if (parse_tunnel_attr(priv, spec, f))
-				return -EOPNOTSUPP;
-			break;
-		default:
-			return -EOPNOTSUPP;
-		}
-
-		/* In decap flow, header pointers should point to the inner
-		 * headers, outer header were already set by parse_tunnel_attr
-		 */
-		headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
-					 inner_headers);
-		headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
-					 inner_headers);
-	}
-
-	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
-		struct flow_dissector_key_control *key =
-			skb_flow_dissector_target(f->dissector,
-						  FLOW_DISSECTOR_KEY_CONTROL,
-						  f->key);
-
-		struct flow_dissector_key_control *mask =
-			skb_flow_dissector_target(f->dissector,
-						  FLOW_DISSECTOR_KEY_CONTROL,
-						  f->mask);
-		addr_type = key->addr_type;
-
-		if (mask->flags & FLOW_DIS_IS_FRAGMENT) {
-			MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
-			MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
-				 key->flags & FLOW_DIS_IS_FRAGMENT);
-		}
-	}
-
-	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
-		struct flow_dissector_key_basic *key =
-			skb_flow_dissector_target(f->dissector,
-						  FLOW_DISSECTOR_KEY_BASIC,
-						  f->key);
-		struct flow_dissector_key_basic *mask =
-			skb_flow_dissector_target(f->dissector,
-						  FLOW_DISSECTOR_KEY_BASIC,
-						  f->mask);
-		ip_proto = key->ip_proto;
-
-		MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
-			 ntohs(mask->n_proto));
-		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
-			 ntohs(key->n_proto));
-
-		MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
-			 mask->ip_proto);
-		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
-			 key->ip_proto);
-
-		if (mask->ip_proto)
-			*min_inline = MLX5_INLINE_MODE_IP;
-	}
-
-	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
-		struct flow_dissector_key_eth_addrs *key =
-			skb_flow_dissector_target(f->dissector,
-						  FLOW_DISSECTOR_KEY_ETH_ADDRS,
-						  f->key);
-		struct flow_dissector_key_eth_addrs *mask =
-			skb_flow_dissector_target(f->dissector,
-						  FLOW_DISSECTOR_KEY_ETH_ADDRS,
-						  f->mask);
-
-		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
-					     dmac_47_16),
-				mask->dst);
-		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
-					     dmac_47_16),
-				key->dst);
-
-		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
-					     smac_47_16),
-				mask->src);
-		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
-					     smac_47_16),
-				key->src);
-	}
-
-	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
-		struct flow_dissector_key_vlan *key =
-			skb_flow_dissector_target(f->dissector,
-						  FLOW_DISSECTOR_KEY_VLAN,
-						  f->key);
-		struct flow_dissector_key_vlan *mask =
-			skb_flow_dissector_target(f->dissector,
-						  FLOW_DISSECTOR_KEY_VLAN,
-						  f->mask);
-		if (mask->vlan_id || mask->vlan_priority) {
-			MLX5_SET(fte_match_set_lyr_2_4, headers_c, vlan_tag, 1);
-			MLX5_SET(fte_match_set_lyr_2_4, headers_v, vlan_tag, 1);
-
-			MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id);
-			MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id);
-
-			MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority);
-			MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority);
-		}
-	}
-
-	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
-		struct flow_dissector_key_ipv4_addrs *key =
-			skb_flow_dissector_target(f->dissector,
-						  FLOW_DISSECTOR_KEY_IPV4_ADDRS,
-						  f->key);
-		struct flow_dissector_key_ipv4_addrs *mask =
-			skb_flow_dissector_target(f->dissector,
-						  FLOW_DISSECTOR_KEY_IPV4_ADDRS,
-						  f->mask);
-
-		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
-				    src_ipv4_src_ipv6.ipv4_layout.ipv4),
-		       &mask->src, sizeof(mask->src));
-		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
-				    src_ipv4_src_ipv6.ipv4_layout.ipv4),
-		       &key->src, sizeof(key->src));
-		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
-				    dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
-		       &mask->dst, sizeof(mask->dst));
-		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
-				    dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
-		       &key->dst, sizeof(key->dst));
-
-		if (mask->src || mask->dst)
-			*min_inline = MLX5_INLINE_MODE_IP;
-	}
-
-	if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
-		struct flow_dissector_key_ipv6_addrs *key =
-			skb_flow_dissector_target(f->dissector,
-						  FLOW_DISSECTOR_KEY_IPV6_ADDRS,
-						  f->key);
-		struct flow_dissector_key_ipv6_addrs *mask =
-			skb_flow_dissector_target(f->dissector,
-						  FLOW_DISSECTOR_KEY_IPV6_ADDRS,
-						  f->mask);
-
-		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
-				    src_ipv4_src_ipv6.ipv6_layout.ipv6),
-		       &mask->src, sizeof(mask->src));
-		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
-				    src_ipv4_src_ipv6.ipv6_layout.ipv6),
-		       &key->src, sizeof(key->src));
-
-		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
-				    dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
-		       &mask->dst, sizeof(mask->dst));
-		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
-				    dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
-		       &key->dst, sizeof(key->dst));
-
-		if (ipv6_addr_type(&mask->src) != IPV6_ADDR_ANY ||
-		    ipv6_addr_type(&mask->dst) != IPV6_ADDR_ANY)
-			*min_inline = MLX5_INLINE_MODE_IP;
-	}
-
-	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
-		struct flow_dissector_key_ports *key =
-			skb_flow_dissector_target(f->dissector,
-						  FLOW_DISSECTOR_KEY_PORTS,
-						  f->key);
-		struct flow_dissector_key_ports *mask =
-			skb_flow_dissector_target(f->dissector,
-						  FLOW_DISSECTOR_KEY_PORTS,
-						  f->mask);
-		switch (ip_proto) {
-		case IPPROTO_TCP:
-			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
-				 tcp_sport, ntohs(mask->src));
-			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
-				 tcp_sport, ntohs(key->src));
-
-			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
-				 tcp_dport, ntohs(mask->dst));
-			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
-				 tcp_dport, ntohs(key->dst));
-			break;
-
-		case IPPROTO_UDP:
-			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
-				 udp_sport, ntohs(mask->src));
-			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
-				 udp_sport, ntohs(key->src));
-
-			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
-				 udp_dport, ntohs(mask->dst));
-			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
-				 udp_dport, ntohs(key->dst));
-			break;
-		default:
-			netdev_err(priv->netdev,
-				   "Only UDP and TCP transport are supported\n");
-			return -EINVAL;
-		}
-
-		if (mask->src || mask->dst)
-			*min_inline = MLX5_INLINE_MODE_TCP_UDP;
-	}
-
-	return 0;
-}
-
-static int parse_cls_flower(struct mlx5e_priv *priv,
-			    struct mlx5_flow_spec *spec,
-			    struct tc_cls_flower_offload *f)
-{
-	struct mlx5_core_dev *dev = priv->mdev;
-	struct mlx5_eswitch *esw = dev->priv.eswitch;
-	struct mlx5_eswitch_rep *rep = priv->ppriv;
-	u8 min_inline;
-	int err;
-
-	err = __parse_cls_flower(priv, spec, f, &min_inline);
-
-	if (!err && esw->mode == SRIOV_OFFLOADS &&
-	    rep->vport != FDB_UPLINK_VPORT) {
-		if (min_inline > esw->offloads.inline_mode) {
-			netdev_warn(priv->netdev,
-				    "Flow is not offloaded due to min inline setting, required %d actual %d\n",
-				    min_inline, esw->offloads.inline_mode);
-			return -EOPNOTSUPP;
-		}
-	}
-
-	return err;
-}
-
-static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
-				u32 *action, u32 *flow_tag)
-{
-	const struct tc_action *a;
-	LIST_HEAD(actions);
-
-	if (tc_no_actions(exts))
-		return -EINVAL;
-
-	*flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
-	*action = 0;
-
-	tcf_exts_to_list(exts, &actions);
-	list_for_each_entry(a, &actions, list) {
-		/* Only support a single action per rule */
-		if (*action)
-			return -EINVAL;
-
-		if (is_tcf_gact_shot(a)) {
-			*action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
-			if (MLX5_CAP_FLOWTABLE(priv->mdev,
-					       flow_table_properties_nic_receive.flow_counter))
-				*action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
-			continue;
-		}
-
-		if (is_tcf_skbedit_mark(a)) {
-			u32 mark = tcf_skbedit_mark(a);
-
-			if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
-				netdev_warn(priv->netdev, "Bad flow mark - only 16 bit is supported: 0x%x\n",
-					    mark);
-				return -EINVAL;
-			}
-
-			*flow_tag = mark;
-			*action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
-			continue;
-		}
-
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static inline int cmp_encap_info(struct mlx5_encap_info *a,
-				 struct mlx5_encap_info *b)
-{
-	return memcmp(a, b, sizeof(*a));
-}
-
-static inline int hash_encap_info(struct mlx5_encap_info *info)
-{
-	return jhash(info, sizeof(*info), 0);
-}
-
-static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
-				   struct net_device *mirred_dev,
-				   struct net_device **out_dev,
-				   struct flowi4 *fl4,
-				   struct neighbour **out_n,
-				   __be32 *saddr,
-				   int *out_ttl)
-{
-	struct rtable *rt;
-	struct neighbour *n = NULL;
-	int ttl;
-
-#if IS_ENABLED(CONFIG_INET)
-	rt = ip_route_output_key(dev_net(mirred_dev), fl4);
-	if (IS_ERR(rt)) {
-		pr_warn("%s: no route to %pI4\n", __func__, &fl4->daddr);
-		return -EOPNOTSUPP;
-	}
-#else
-	return -EOPNOTSUPP;
-#endif
-
-	if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev)) {
-		pr_warn("%s: Can't offload the flow, netdevices aren't on the same HW e-switch\n",
-			__func__);
-		ip_rt_put(rt);
-		return -EOPNOTSUPP;
-	}
-
-	ttl = ip4_dst_hoplimit(&rt->dst);
-	n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
-	ip_rt_put(rt);
-	if (!n)
-		return -ENOMEM;
-
-	*out_n = n;
-	*saddr = fl4->saddr;
-	*out_ttl = ttl;
-	*out_dev = rt->dst.dev;
-
-	return 0;
-}
-
-static int gen_vxlan_header_ipv4(struct net_device *out_dev,
-				 char buf[],
-				 unsigned char h_dest[ETH_ALEN],
-				 int ttl,
-				 __be32 daddr,
-				 __be32 saddr,
-				 __be16 udp_dst_port,
-				 __be32 vx_vni)
-{
-	int encap_size = VXLAN_HLEN + sizeof(struct iphdr) + ETH_HLEN;
-	struct ethhdr *eth = (struct ethhdr *)buf;
-	struct iphdr  *ip = (struct iphdr *)((char *)eth + sizeof(struct ethhdr));
-	struct udphdr *udp = (struct udphdr *)((char *)ip + sizeof(struct iphdr));
-	struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
-
-	memset(buf, 0, encap_size);
-
-	ether_addr_copy(eth->h_dest, h_dest);
-	ether_addr_copy(eth->h_source, out_dev->dev_addr);
-	eth->h_proto = htons(ETH_P_IP);
-
-	ip->daddr = daddr;
-	ip->saddr = saddr;
-
-	ip->ttl = ttl;
-	ip->protocol = IPPROTO_UDP;
-	ip->version = 0x4;
-	ip->ihl = 0x5;
-
-	udp->dest = udp_dst_port;
-	vxh->vx_flags = VXLAN_HF_VNI;
-	vxh->vx_vni = vxlan_vni_field(vx_vni);
-
-	return encap_size;
-}
-
-static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
-					  struct net_device *mirred_dev,
-					  struct mlx5_encap_entry *e,
-					  struct net_device **out_dev)
-{
-	int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
-	struct flowi4 fl4 = {};
-	struct neighbour *n;
-	char *encap_header;
-	int encap_size;
-	__be32 saddr;
-	int ttl;
-	int err;
-
-	encap_header = kzalloc(max_encap_size, GFP_KERNEL);
-	if (!encap_header)
-		return -ENOMEM;
-
-	switch (e->tunnel_type) {
-	case MLX5_HEADER_TYPE_VXLAN:
-		fl4.flowi4_proto = IPPROTO_UDP;
-		fl4.fl4_dport = e->tun_info.tp_dst;
-		break;
-	default:
-		err = -EOPNOTSUPP;
-		goto out;
-	}
-	fl4.daddr = e->tun_info.daddr;
-
-	err = mlx5e_route_lookup_ipv4(priv, mirred_dev, out_dev,
-				      &fl4, &n, &saddr, &ttl);
-	if (err)
-		goto out;
-
-	e->n = n;
-	e->out_dev = *out_dev;
-
-	if (!(n->nud_state & NUD_VALID)) {
-		err = -ENOTSUPP;
-		goto out;
-	}
-
-	neigh_ha_snapshot(e->h_dest, n, *out_dev);
-
-	switch (e->tunnel_type) {
-	case MLX5_HEADER_TYPE_VXLAN:
-		encap_size = gen_vxlan_header_ipv4(*out_dev, encap_header,
-						   e->h_dest, ttl,
-						   e->tun_info.daddr,
-						   saddr, e->tun_info.tp_dst,
-						   e->tun_info.tun_id);
-		break;
-	default:
-		err = -EOPNOTSUPP;
-		goto out;
-	}
-
-	err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
-			       encap_size, encap_header, &e->encap_id);
-out:
-	kfree(encap_header);
-	return err;
-}
-
-static int mlx5e_attach_encap(struct mlx5e_priv *priv,
-			      struct ip_tunnel_info *tun_info,
-			      struct net_device *mirred_dev,
-			      struct mlx5_esw_flow_attr *attr)
-{
-	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
-	unsigned short family = ip_tunnel_info_af(tun_info);
-	struct ip_tunnel_key *key = &tun_info->key;
-	struct mlx5_encap_info info;
-	struct mlx5_encap_entry *e;
-	struct net_device *out_dev;
-	uintptr_t hash_key;
-	bool found = false;
-	int tunnel_type;
-	int err;
-
-	/* udp dst port must be given */
-	if (!memchr_inv(&key->tp_dst, 0, sizeof(key->tp_dst)))
-		return -EOPNOTSUPP;
-
-	if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->tp_dst)) &&
-	    MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
-		info.tp_dst = key->tp_dst;
-		info.tun_id = tunnel_id_to_key32(key->tun_id);
-		tunnel_type = MLX5_HEADER_TYPE_VXLAN;
-	} else {
-		return -EOPNOTSUPP;
-	}
-
-	switch (family) {
-	case AF_INET:
-		info.daddr = key->u.ipv4.dst;
-		break;
-	default:
-		return -EOPNOTSUPP;
-	}
-
-	hash_key = hash_encap_info(&info);
-
-	hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
-				   encap_hlist, hash_key) {
-		if (!cmp_encap_info(&e->tun_info, &info)) {
-			found = true;
-			break;
-		}
-	}
-
-	if (found) {
-		attr->encap = e;
-		return 0;
-	}
-
-	e = kzalloc(sizeof(*e), GFP_KERNEL);
-	if (!e)
-		return -ENOMEM;
-
-	e->tun_info = info;
-	e->tunnel_type = tunnel_type;
-	INIT_LIST_HEAD(&e->flows);
-
-	err = mlx5e_create_encap_header_ipv4(priv, mirred_dev, e, &out_dev);
-	if (err)
-		goto out_err;
-
-	attr->encap = e;
-	hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
-
-	return err;
-
-out_err:
-	kfree(e);
-	return err;
-}
-
-static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
-				struct mlx5e_tc_flow *flow)
-{
-	struct mlx5_esw_flow_attr *attr = flow->attr;
-	struct ip_tunnel_info *info = NULL;
-	const struct tc_action *a;
-	LIST_HEAD(actions);
-	bool encap = false;
-	int err;
-
-	if (tc_no_actions(exts))
-		return -EINVAL;
-
-	memset(attr, 0, sizeof(*attr));
-	attr->in_rep = priv->ppriv;
-
-	tcf_exts_to_list(exts, &actions);
-	list_for_each_entry(a, &actions, list) {
-		if (is_tcf_gact_shot(a)) {
-			attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
-					MLX5_FLOW_CONTEXT_ACTION_COUNT;
-			continue;
-		}
-
-		if (is_tcf_mirred_egress_redirect(a)) {
-			int ifindex = tcf_mirred_ifindex(a);
-			struct net_device *out_dev;
-			struct mlx5e_priv *out_priv;
-
-			out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
-
-			if (switchdev_port_same_parent_id(priv->netdev,
-							  out_dev)) {
-				attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
-					MLX5_FLOW_CONTEXT_ACTION_COUNT;
-				out_priv = netdev_priv(out_dev);
-				attr->out_rep = out_priv->ppriv;
-			} else if (encap) {
-				err = mlx5e_attach_encap(priv, info,
-							 out_dev, attr);
-				if (err)
-					return err;
-				list_add(&flow->encap, &attr->encap->flows);
-				attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP |
-					MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
-					MLX5_FLOW_CONTEXT_ACTION_COUNT;
-				out_priv = netdev_priv(attr->encap->out_dev);
-				attr->out_rep = out_priv->ppriv;
-			} else {
-				pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
-				       priv->netdev->name, out_dev->name);
-				return -EINVAL;
-			}
-			continue;
-		}
-
-		if (is_tcf_tunnel_set(a)) {
-			info = tcf_tunnel_info(a);
-			if (info)
-				encap = true;
-			else
-				return -EOPNOTSUPP;
-			continue;
-		}
-
-		if (is_tcf_vlan(a)) {
-			if (tcf_vlan_action(a) == VLAN_F_POP) {
-				attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
-			} else if (tcf_vlan_action(a) == VLAN_F_PUSH) {
-				if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q))
-					return -EOPNOTSUPP;
-
-				attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
-				attr->vlan = tcf_vlan_push_vid(a);
-			}
-			continue;
-		}
-
-		if (is_tcf_tunnel_release(a)) {
-			attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
-			continue;
-		}
-
-		return -EINVAL;
-	}
-	return 0;
-}
-
-int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
-			   struct tc_cls_flower_offload *f)
-{
-	struct mlx5e_tc_table *tc = &priv->fs.tc;
-	int err = 0;
-	bool fdb_flow = false;
-	u32 flow_tag, action;
-	struct mlx5e_tc_flow *flow;
-	struct mlx5_flow_spec *spec;
-	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
-
-	if (esw && esw->mode == SRIOV_OFFLOADS)
-		fdb_flow = true;
-
-	if (fdb_flow)
-		flow = kzalloc(sizeof(*flow) +
-			       sizeof(struct mlx5_esw_flow_attr),
-			       GFP_KERNEL);
-	else
-		flow = kzalloc(sizeof(*flow), GFP_KERNEL);
-
-	spec = mlx5_vzalloc(sizeof(*spec));
-	if (!spec || !flow) {
-		err = -ENOMEM;
-		goto err_free;
-	}
-
-	flow->cookie = f->cookie;
-
-	err = parse_cls_flower(priv, spec, f);
-	if (err < 0)
-		goto err_free;
-
-	if (fdb_flow) {
-		flow->attr  = (struct mlx5_esw_flow_attr *)(flow + 1);
-		err = parse_tc_fdb_actions(priv, f->exts, flow);
-		if (err < 0)
-			goto err_free;
-		flow->rule = mlx5e_tc_add_fdb_flow(priv, spec, flow->attr);
-	} else {
-		err = parse_tc_nic_actions(priv, f->exts, &action, &flow_tag);
-		if (err < 0)
-			goto err_free;
-		flow->rule = mlx5e_tc_add_nic_flow(priv, spec, action, flow_tag);
-	}
-
-	if (IS_ERR(flow->rule)) {
-		err = PTR_ERR(flow->rule);
-		goto err_free;
-	}
-
-	err = rhashtable_insert_fast(&tc->ht, &flow->node,
-				     tc->ht_params);
-	if (err)
-		goto err_del_rule;
-
-	goto out;
-
-err_del_rule:
-	mlx5_del_flow_rules(flow->rule);
-
-err_free:
-	kfree(flow);
-out:
-	kvfree(spec);
-	return err;
-}
-
-int mlx5e_delete_flower(struct mlx5e_priv *priv,
-			struct tc_cls_flower_offload *f)
-{
-	struct mlx5e_tc_flow *flow;
-	struct mlx5e_tc_table *tc = &priv->fs.tc;
-
-	flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
-				      tc->ht_params);
-	if (!flow)
-		return -EINVAL;
-
-	rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params);
-
-	mlx5e_tc_del_flow(priv, flow);
-
-
-	kfree(flow);
-
-	return 0;
-}
-
-int mlx5e_stats_flower(struct mlx5e_priv *priv,
-		       struct tc_cls_flower_offload *f)
-{
-	struct mlx5e_tc_table *tc = &priv->fs.tc;
-	struct mlx5e_tc_flow *flow;
-	struct tc_action *a;
-	struct mlx5_fc *counter;
-	LIST_HEAD(actions);
-	u64 bytes;
-	u64 packets;
-	u64 lastuse;
-
-	flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
-				      tc->ht_params);
-	if (!flow)
-		return -EINVAL;
-
-	counter = mlx5_flow_rule_counter(flow->rule);
-	if (!counter)
-		return 0;
-
-	mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
-
-	tcf_exts_to_list(f->exts, &actions);
-	list_for_each_entry(a, &actions, list)
-		tcf_action_stats_update(a, bytes, packets, lastuse);
-
-	return 0;
-}
-
-static const struct rhashtable_params mlx5e_tc_flow_ht_params = {
-	.head_offset = offsetof(struct mlx5e_tc_flow, node),
-	.key_offset = offsetof(struct mlx5e_tc_flow, cookie),
-	.key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
-	.automatic_shrinking = true,
-};
-
-int mlx5e_tc_init(struct mlx5e_priv *priv)
-{
-	struct mlx5e_tc_table *tc = &priv->fs.tc;
-
-	tc->ht_params = mlx5e_tc_flow_ht_params;
-	return rhashtable_init(&tc->ht, &tc->ht_params);
-}
-
-static void _mlx5e_tc_del_flow(void *ptr, void *arg)
-{
-	struct mlx5e_tc_flow *flow = ptr;
-	struct mlx5e_priv *priv = arg;
-
-	mlx5e_tc_del_flow(priv, flow);
-	kfree(flow);
-}
-
-void mlx5e_tc_cleanup(struct mlx5e_priv *priv)
-{
-	struct mlx5e_tc_table *tc = &priv->fs.tc;
-
-	rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, priv);
-
-	if (!IS_ERR_OR_NULL(tc->t)) {
-		mlx5_destroy_flow_table(tc->t);
-		tc->t = NULL;
-	}
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
deleted file mode 100644
index 34bf903..0000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef __MLX5_EN_TC_H__
-#define __MLX5_EN_TC_H__
-
-#define MLX5E_TC_FLOW_ID_MASK 0x0000ffff
-
-int mlx5e_tc_init(struct mlx5e_priv *priv);
-void mlx5e_tc_cleanup(struct mlx5e_priv *priv);
-
-int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
-			   struct tc_cls_flower_offload *f);
-int mlx5e_delete_flower(struct mlx5e_priv *priv,
-			struct tc_cls_flower_offload *f);
-
-int mlx5e_stats_flower(struct mlx5e_priv *priv,
-		       struct tc_cls_flower_offload *f);
-
-static inline int mlx5e_tc_num_filters(struct mlx5e_priv *priv)
-{
-	return atomic_read(&priv->fs.tc.ht.nelems);
-}
-
-#endif /* __MLX5_EN_TC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
deleted file mode 100644
index cfb6837..0000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ /dev/null
@@ -1,559 +0,0 @@
-/*
- * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/tcp.h>
-#include <linux/if_vlan.h>
-#include "en.h"
-
-#define MLX5E_SQ_NOPS_ROOM  MLX5_SEND_WQE_MAX_WQEBBS
-#define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\
-			    MLX5E_SQ_NOPS_ROOM)
-
-void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw)
-{
-	struct mlx5_wq_cyc                *wq  = &sq->wq;
-
-	u16 pi = sq->pc & wq->sz_m1;
-	struct mlx5e_tx_wqe              *wqe  = mlx5_wq_cyc_get_wqe(wq, pi);
-
-	struct mlx5_wqe_ctrl_seg         *cseg = &wqe->ctrl;
-
-	memset(cseg, 0, sizeof(*cseg));
-
-	cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_NOP);
-	cseg->qpn_ds           = cpu_to_be32((sq->sqn << 8) | 0x01);
-
-	sq->pc++;
-	sq->stats.nop++;
-
-	if (notify_hw) {
-		cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
-		mlx5e_tx_notify_hw(sq, &wqe->ctrl, 0);
-	}
-}
-
-static inline void mlx5e_tx_dma_unmap(struct device *pdev,
-				      struct mlx5e_sq_dma *dma)
-{
-	switch (dma->type) {
-	case MLX5E_DMA_MAP_SINGLE:
-		dma_unmap_single(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
-		break;
-	case MLX5E_DMA_MAP_PAGE:
-		dma_unmap_page(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
-		break;
-	default:
-		WARN_ONCE(true, "mlx5e_tx_dma_unmap unknown DMA type!\n");
-	}
-}
-
-static inline void mlx5e_dma_push(struct mlx5e_sq *sq,
-				  dma_addr_t addr,
-				  u32 size,
-				  enum mlx5e_dma_map_type map_type)
-{
-	u32 i = sq->dma_fifo_pc & sq->dma_fifo_mask;
-
-	sq->db.txq.dma_fifo[i].addr = addr;
-	sq->db.txq.dma_fifo[i].size = size;
-	sq->db.txq.dma_fifo[i].type = map_type;
-	sq->dma_fifo_pc++;
-}
-
-static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_sq *sq, u32 i)
-{
-	return &sq->db.txq.dma_fifo[i & sq->dma_fifo_mask];
-}
-
-static void mlx5e_dma_unmap_wqe_err(struct mlx5e_sq *sq, u8 num_dma)
-{
-	int i;
-
-	for (i = 0; i < num_dma; i++) {
-		struct mlx5e_sq_dma *last_pushed_dma =
-			mlx5e_dma_get(sq, --sq->dma_fifo_pc);
-
-		mlx5e_tx_dma_unmap(sq->pdev, last_pushed_dma);
-	}
-}
-
-u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
-		       void *accel_priv, select_queue_fallback_t fallback)
-{
-	struct mlx5e_priv *priv = netdev_priv(dev);
-	int channel_ix = fallback(dev, skb);
-	int up = 0;
-
-	if (!netdev_get_num_tc(dev))
-		return channel_ix;
-
-	if (skb_vlan_tag_present(skb))
-		up = skb->vlan_tci >> VLAN_PRIO_SHIFT;
-
-	/* channel_ix can be larger than num_channels since
-	 * dev->num_real_tx_queues = num_channels * num_tc
-	 */
-	if (channel_ix >= priv->params.num_channels)
-		channel_ix = reciprocal_scale(channel_ix,
-					      priv->params.num_channels);
-
-	return priv->channeltc_to_txq_map[channel_ix][up];
-}
-
-static inline int mlx5e_skb_l2_header_offset(struct sk_buff *skb)
-{
-#define MLX5E_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
-
-	return max(skb_network_offset(skb), MLX5E_MIN_INLINE);
-}
-
-static inline int mlx5e_skb_l3_header_offset(struct sk_buff *skb)
-{
-	struct flow_keys keys;
-
-	if (skb_transport_header_was_set(skb))
-		return skb_transport_offset(skb);
-	else if (skb_flow_dissect_flow_keys(skb, &keys, 0))
-		return keys.control.thoff;
-	else
-		return mlx5e_skb_l2_header_offset(skb);
-}
-
-static inline unsigned int mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
-						 struct sk_buff *skb)
-{
-	int hlen;
-
-	switch (mode) {
-	case MLX5_INLINE_MODE_TCP_UDP:
-		hlen = eth_get_headlen(skb->data, skb_headlen(skb));
-		if (hlen == ETH_HLEN && !skb_vlan_tag_present(skb))
-			hlen += VLAN_HLEN;
-		return hlen;
-	case MLX5_INLINE_MODE_IP:
-		/* When transport header is set to zero, it means no transport
-		 * header. When transport header is set to 0xff's, it means
-		 * transport header wasn't set.
-		 */
-		if (skb_transport_offset(skb))
-			return mlx5e_skb_l3_header_offset(skb);
-		/* fall through */
-	case MLX5_INLINE_MODE_L2:
-	default:
-		return mlx5e_skb_l2_header_offset(skb);
-	}
-}
-
-static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
-					    struct sk_buff *skb, bool bf)
-{
-	/* Some NIC TX decisions, e.g loopback, are based on the packet
-	 * headers and occur before the data gather.
-	 * Therefore these headers must be copied into the WQE
-	 */
-	if (bf) {
-		u16 ihs = skb_headlen(skb);
-
-		if (skb_vlan_tag_present(skb))
-			ihs += VLAN_HLEN;
-
-		if (ihs <= sq->max_inline)
-			return skb_headlen(skb);
-	}
-	return mlx5e_calc_min_inline(sq->min_inline_mode, skb);
-}
-
-static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data,
-					    unsigned int *skb_len,
-					    unsigned int len)
-{
-	*skb_len -= len;
-	*skb_data += len;
-}
-
-static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs,
-				     unsigned char **skb_data,
-				     unsigned int *skb_len)
-{
-	struct vlan_ethhdr *vhdr = (struct vlan_ethhdr *)start;
-	int cpy1_sz = 2 * ETH_ALEN;
-	int cpy2_sz = ihs - cpy1_sz;
-
-	memcpy(vhdr, *skb_data, cpy1_sz);
-	mlx5e_tx_skb_pull_inline(skb_data, skb_len, cpy1_sz);
-	vhdr->h_vlan_proto = skb->vlan_proto;
-	vhdr->h_vlan_TCI = cpu_to_be16(skb_vlan_tag_get(skb));
-	memcpy(&vhdr->h_vlan_encapsulated_proto, *skb_data, cpy2_sz);
-	mlx5e_tx_skb_pull_inline(skb_data, skb_len, cpy2_sz);
-}
-
-static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
-{
-	struct mlx5_wq_cyc       *wq   = &sq->wq;
-
-	u16 pi = sq->pc & wq->sz_m1;
-	struct mlx5e_tx_wqe      *wqe  = mlx5_wq_cyc_get_wqe(wq, pi);
-	struct mlx5e_tx_wqe_info *wi   = &sq->db.txq.wqe_info[pi];
-
-	struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
-	struct mlx5_wqe_eth_seg  *eseg = &wqe->eth;
-	struct mlx5_wqe_data_seg *dseg;
-
-	unsigned char *skb_data = skb->data;
-	unsigned int skb_len = skb->len;
-	u8  opcode = MLX5_OPCODE_SEND;
-	dma_addr_t dma_addr = 0;
-	unsigned int num_bytes;
-	bool bf = false;
-	u16 headlen;
-	u16 ds_cnt;
-	u16 ihs;
-	int i;
-
-	memset(wqe, 0, sizeof(*wqe));
-
-	if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
-		eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
-		if (skb->encapsulation) {
-			eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM |
-					  MLX5_ETH_WQE_L4_INNER_CSUM;
-			sq->stats.csum_partial_inner++;
-		} else {
-			eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
-		}
-	} else
-		sq->stats.csum_none++;
-
-	if (sq->cc != sq->prev_cc) {
-		sq->prev_cc = sq->cc;
-		sq->bf_budget = (sq->cc == sq->pc) ? MLX5E_SQ_BF_BUDGET : 0;
-	}
-
-	if (skb_is_gso(skb)) {
-		eseg->mss    = cpu_to_be16(skb_shinfo(skb)->gso_size);
-		opcode       = MLX5_OPCODE_LSO;
-
-		if (skb->encapsulation) {
-			ihs = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
-			sq->stats.tso_inner_packets++;
-			sq->stats.tso_inner_bytes += skb->len - ihs;
-		} else {
-			ihs = skb_transport_offset(skb) + tcp_hdrlen(skb);
-			sq->stats.tso_packets++;
-			sq->stats.tso_bytes += skb->len - ihs;
-		}
-
-		num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
-	} else {
-		bf = sq->bf_budget &&
-		     !skb->xmit_more &&
-		     !skb_shinfo(skb)->nr_frags;
-		ihs = mlx5e_get_inline_hdr_size(sq, skb, bf);
-		num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
-	}
-
-	wi->num_bytes = num_bytes;
-
-	if (skb_vlan_tag_present(skb)) {
-		mlx5e_insert_vlan(eseg->inline_hdr_start, skb, ihs, &skb_data,
-				  &skb_len);
-		ihs += VLAN_HLEN;
-	} else {
-		memcpy(eseg->inline_hdr_start, skb_data, ihs);
-		mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs);
-	}
-
-	eseg->inline_hdr_sz = cpu_to_be16(ihs);
-
-	ds_cnt  = sizeof(*wqe) / MLX5_SEND_WQE_DS;
-	ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr_start),
-			       MLX5_SEND_WQE_DS);
-	dseg    = (struct mlx5_wqe_data_seg *)cseg + ds_cnt;
-
-	wi->num_dma = 0;
-
-	headlen = skb_len - skb->data_len;
-	if (headlen) {
-		dma_addr = dma_map_single(sq->pdev, skb_data, headlen,
-					  DMA_TO_DEVICE);
-		if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
-			goto dma_unmap_wqe_err;
-
-		dseg->addr       = cpu_to_be64(dma_addr);
-		dseg->lkey       = sq->mkey_be;
-		dseg->byte_count = cpu_to_be32(headlen);
-
-		mlx5e_dma_push(sq, dma_addr, headlen, MLX5E_DMA_MAP_SINGLE);
-		wi->num_dma++;
-
-		dseg++;
-	}
-
-	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-		struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
-		int fsz = skb_frag_size(frag);
-
-		dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
-					    DMA_TO_DEVICE);
-		if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
-			goto dma_unmap_wqe_err;
-
-		dseg->addr       = cpu_to_be64(dma_addr);
-		dseg->lkey       = sq->mkey_be;
-		dseg->byte_count = cpu_to_be32(fsz);
-
-		mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
-		wi->num_dma++;
-
-		dseg++;
-	}
-
-	ds_cnt += wi->num_dma;
-
-	cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode);
-	cseg->qpn_ds           = cpu_to_be32((sq->sqn << 8) | ds_cnt);
-
-	sq->db.txq.skb[pi] = skb;
-
-	wi->num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
-	sq->pc += wi->num_wqebbs;
-
-	netdev_tx_sent_queue(sq->txq, wi->num_bytes);
-
-	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
-		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
-
-	if (unlikely(!mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM))) {
-		netif_tx_stop_queue(sq->txq);
-		sq->stats.stopped++;
-	}
-
-	sq->stats.xmit_more += skb->xmit_more;
-	if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) {
-		int bf_sz = 0;
-
-		if (bf && test_bit(MLX5E_SQ_STATE_BF_ENABLE, &sq->state))
-			bf_sz = wi->num_wqebbs << 3;
-
-		cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
-		mlx5e_tx_notify_hw(sq, &wqe->ctrl, bf_sz);
-	}
-
-	/* fill sq edge with nops to avoid wqe wrap around */
-	while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) {
-		sq->db.txq.skb[pi] = NULL;
-		mlx5e_send_nop(sq, false);
-	}
-
-	if (bf)
-		sq->bf_budget--;
-
-	sq->stats.packets++;
-	sq->stats.bytes += num_bytes;
-	return NETDEV_TX_OK;
-
-dma_unmap_wqe_err:
-	sq->stats.dropped++;
-	mlx5e_dma_unmap_wqe_err(sq, wi->num_dma);
-
-	dev_kfree_skb_any(skb);
-
-	return NETDEV_TX_OK;
-}
-
-netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
-{
-	struct mlx5e_priv *priv = netdev_priv(dev);
-	struct mlx5e_sq *sq = priv->txq_to_sq_map[skb_get_queue_mapping(skb)];
-
-	return mlx5e_sq_xmit(sq, skb);
-}
-
-bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
-{
-	struct mlx5e_sq *sq;
-	u32 dma_fifo_cc;
-	u32 nbytes;
-	u16 npkts;
-	u16 sqcc;
-	int i;
-
-	sq = container_of(cq, struct mlx5e_sq, cq);
-
-	if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
-		return false;
-
-	npkts = 0;
-	nbytes = 0;
-
-	/* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
-	 * otherwise a cq overrun may occur
-	 */
-	sqcc = sq->cc;
-
-	/* avoid dirtying sq cache line every cqe */
-	dma_fifo_cc = sq->dma_fifo_cc;
-
-	for (i = 0; i < MLX5E_TX_CQ_POLL_BUDGET; i++) {
-		struct mlx5_cqe64 *cqe;
-		u16 wqe_counter;
-		bool last_wqe;
-
-		cqe = mlx5e_get_cqe(cq);
-		if (!cqe)
-			break;
-
-		mlx5_cqwq_pop(&cq->wq);
-
-		wqe_counter = be16_to_cpu(cqe->wqe_counter);
-
-		do {
-			struct mlx5e_tx_wqe_info *wi;
-			struct sk_buff *skb;
-			u16 ci;
-			int j;
-
-			last_wqe = (sqcc == wqe_counter);
-
-			ci = sqcc & sq->wq.sz_m1;
-			skb = sq->db.txq.skb[ci];
-			wi = &sq->db.txq.wqe_info[ci];
-
-			if (unlikely(!skb)) { /* nop */
-				sqcc++;
-				continue;
-			}
-
-			if (unlikely(skb_shinfo(skb)->tx_flags &
-				     SKBTX_HW_TSTAMP)) {
-				struct skb_shared_hwtstamps hwts = {};
-
-				mlx5e_fill_hwstamp(sq->tstamp,
-						   get_cqe_ts(cqe), &hwts);
-				skb_tstamp_tx(skb, &hwts);
-			}
-
-			for (j = 0; j < wi->num_dma; j++) {
-				struct mlx5e_sq_dma *dma =
-					mlx5e_dma_get(sq, dma_fifo_cc++);
-
-				mlx5e_tx_dma_unmap(sq->pdev, dma);
-			}
-
-			npkts++;
-			nbytes += wi->num_bytes;
-			sqcc += wi->num_wqebbs;
-			napi_consume_skb(skb, napi_budget);
-		} while (!last_wqe);
-	}
-
-	mlx5_cqwq_update_db_record(&cq->wq);
-
-	/* ensure cq space is freed before enabling more cqes */
-	wmb();
-
-	sq->dma_fifo_cc = dma_fifo_cc;
-	sq->cc = sqcc;
-
-	netdev_tx_completed_queue(sq->txq, npkts, nbytes);
-
-	if (netif_tx_queue_stopped(sq->txq) &&
-	    mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM)) {
-		netif_tx_wake_queue(sq->txq);
-		sq->stats.wake++;
-	}
-
-	return (i == MLX5E_TX_CQ_POLL_BUDGET);
-}
-
-static void mlx5e_free_txq_sq_descs(struct mlx5e_sq *sq)
-{
-	struct mlx5e_tx_wqe_info *wi;
-	struct sk_buff *skb;
-	u16 ci;
-	int i;
-
-	while (sq->cc != sq->pc) {
-		ci = sq->cc & sq->wq.sz_m1;
-		skb = sq->db.txq.skb[ci];
-		wi = &sq->db.txq.wqe_info[ci];
-
-		if (!skb) { /* nop */
-			sq->cc++;
-			continue;
-		}
-
-		for (i = 0; i < wi->num_dma; i++) {
-			struct mlx5e_sq_dma *dma =
-				mlx5e_dma_get(sq, sq->dma_fifo_cc++);
-
-			mlx5e_tx_dma_unmap(sq->pdev, dma);
-		}
-
-		dev_kfree_skb_any(skb);
-		sq->cc += wi->num_wqebbs;
-	}
-}
-
-static void mlx5e_free_xdp_sq_descs(struct mlx5e_sq *sq)
-{
-	struct mlx5e_sq_wqe_info *wi;
-	struct mlx5e_dma_info *di;
-	u16 ci;
-
-	while (sq->cc != sq->pc) {
-		ci = sq->cc & sq->wq.sz_m1;
-		di = &sq->db.xdp.di[ci];
-		wi = &sq->db.xdp.wqe_info[ci];
-
-		if (wi->opcode == MLX5_OPCODE_NOP) {
-			sq->cc++;
-			continue;
-		}
-
-		sq->cc += wi->num_wqebbs;
-
-		mlx5e_page_release(&sq->channel->rq, di, false);
-	}
-}
-
-void mlx5e_free_sq_descs(struct mlx5e_sq *sq)
-{
-	switch (sq->type) {
-	case MLX5E_SQ_TXQ:
-		mlx5e_free_txq_sq_descs(sq);
-		break;
-	case MLX5E_SQ_XDP:
-		mlx5e_free_xdp_sq_descs(sq);
-		break;
-	}
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
deleted file mode 100644
index e5c12a7..0000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ /dev/null
@@ -1,232 +0,0 @@
-/*
- * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "en.h"
-
-struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq)
-{
-	struct mlx5_cqwq *wq = &cq->wq;
-	u32 ci = mlx5_cqwq_get_ci(wq);
-	struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci);
-	int cqe_ownership_bit = cqe->op_own & MLX5_CQE_OWNER_MASK;
-	int sw_ownership_val = mlx5_cqwq_get_wrap_cnt(wq) & 1;
-
-	if (cqe_ownership_bit != sw_ownership_val)
-		return NULL;
-
-	/* ensure cqe content is read after cqe ownership bit */
-	rmb();
-
-	return cqe;
-}
-
-static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
-{
-	struct mlx5e_sq *sq = container_of(cq, struct mlx5e_sq, cq);
-	struct mlx5_wq_cyc *wq;
-	struct mlx5_cqe64 *cqe;
-	u16 sqcc;
-
-	if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
-		return;
-
-	cqe = mlx5e_get_cqe(cq);
-	if (likely(!cqe))
-		return;
-
-	wq = &sq->wq;
-
-	/* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
-	 * otherwise a cq overrun may occur
-	 */
-	sqcc = sq->cc;
-
-	do {
-		u16 ci = be16_to_cpu(cqe->wqe_counter) & wq->sz_m1;
-		struct mlx5e_sq_wqe_info *icowi = &sq->db.ico_wqe[ci];
-
-		mlx5_cqwq_pop(&cq->wq);
-		sqcc += icowi->num_wqebbs;
-
-		if (unlikely((cqe->op_own >> 4) != MLX5_CQE_REQ)) {
-			WARN_ONCE(true, "mlx5e: Bad OP in ICOSQ CQE: 0x%x\n",
-				  cqe->op_own);
-			break;
-		}
-
-		switch (icowi->opcode) {
-		case MLX5_OPCODE_NOP:
-			break;
-		case MLX5_OPCODE_UMR:
-			mlx5e_post_rx_mpwqe(&sq->channel->rq);
-			break;
-		default:
-			WARN_ONCE(true,
-				  "mlx5e: Bad OPCODE in ICOSQ WQE info: 0x%x\n",
-				  icowi->opcode);
-		}
-
-	} while ((cqe = mlx5e_get_cqe(cq)));
-
-	mlx5_cqwq_update_db_record(&cq->wq);
-
-	/* ensure cq space is freed before enabling more cqes */
-	wmb();
-
-	sq->cc = sqcc;
-}
-
-static inline bool mlx5e_poll_xdp_tx_cq(struct mlx5e_cq *cq)
-{
-	struct mlx5e_sq *sq;
-	u16 sqcc;
-	int i;
-
-	sq = container_of(cq, struct mlx5e_sq, cq);
-
-	if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
-		return false;
-
-	/* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
-	 * otherwise a cq overrun may occur
-	 */
-	sqcc = sq->cc;
-
-	for (i = 0; i < MLX5E_TX_CQ_POLL_BUDGET; i++) {
-		struct mlx5_cqe64 *cqe;
-		u16 wqe_counter;
-		bool last_wqe;
-
-		cqe = mlx5e_get_cqe(cq);
-		if (!cqe)
-			break;
-
-		mlx5_cqwq_pop(&cq->wq);
-
-		wqe_counter = be16_to_cpu(cqe->wqe_counter);
-
-		do {
-			struct mlx5e_sq_wqe_info *wi;
-			struct mlx5e_dma_info *di;
-			u16 ci;
-
-			last_wqe = (sqcc == wqe_counter);
-
-			ci = sqcc & sq->wq.sz_m1;
-			di = &sq->db.xdp.di[ci];
-			wi = &sq->db.xdp.wqe_info[ci];
-
-			if (unlikely(wi->opcode == MLX5_OPCODE_NOP)) {
-				sqcc++;
-				continue;
-			}
-
-			sqcc += wi->num_wqebbs;
-			/* Recycle RX page */
-			mlx5e_page_release(&sq->channel->rq, di, true);
-		} while (!last_wqe);
-	}
-
-	mlx5_cqwq_update_db_record(&cq->wq);
-
-	/* ensure cq space is freed before enabling more cqes */
-	wmb();
-
-	sq->cc = sqcc;
-	return (i == MLX5E_TX_CQ_POLL_BUDGET);
-}
-
-int mlx5e_napi_poll(struct napi_struct *napi, int budget)
-{
-	struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel,
-					       napi);
-	bool busy = false;
-	int work_done;
-	int i;
-
-	clear_bit(MLX5E_CHANNEL_NAPI_SCHED, &c->flags);
-
-	for (i = 0; i < c->num_tc; i++)
-		busy |= mlx5e_poll_tx_cq(&c->sq[i].cq, budget);
-
-	work_done = mlx5e_poll_rx_cq(&c->rq.cq, budget);
-	busy |= work_done == budget;
-
-	if (c->xdp)
-		busy |= mlx5e_poll_xdp_tx_cq(&c->xdp_sq.cq);
-
-	mlx5e_poll_ico_cq(&c->icosq.cq);
-
-	busy |= mlx5e_post_rx_wqes(&c->rq);
-
-	if (busy)
-		return budget;
-
-	napi_complete_done(napi, work_done);
-
-	/* avoid losing completion event during/after polling cqs */
-	if (test_bit(MLX5E_CHANNEL_NAPI_SCHED, &c->flags)) {
-		napi_schedule(napi);
-		return work_done;
-	}
-
-	for (i = 0; i < c->num_tc; i++)
-		mlx5e_cq_arm(&c->sq[i].cq);
-
-	if (test_bit(MLX5E_RQ_STATE_AM, &c->rq.state))
-		mlx5e_rx_am(&c->rq);
-
-	mlx5e_cq_arm(&c->rq.cq);
-	mlx5e_cq_arm(&c->icosq.cq);
-
-	return work_done;
-}
-
-void mlx5e_completion_event(struct mlx5_core_cq *mcq)
-{
-	struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);
-
-	cq->event_ctr++;
-	set_bit(MLX5E_CHANNEL_NAPI_SCHED, &cq->channel->flags);
-	napi_schedule(cq->napi);
-}
-
-void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event)
-{
-	struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);
-	struct mlx5e_channel *c = cq->channel;
-	struct mlx5e_priv *priv = c->priv;
-	struct net_device *netdev = priv->netdev;
-
-	netdev_err(netdev, "%s: cqn=0x%.6x event=0x%.2x\n",
-		   __func__, mcq->cqn, event);
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
deleted file mode 100644
index 5130d65..0000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ /dev/null
@@ -1,782 +0,0 @@
-/*
- * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/mlx5/driver.h>
-#include <linux/mlx5/cmd.h>
-#include "mlx5_core.h"
-#ifdef CONFIG_MLX5_CORE_EN
-#include "eswitch.h"
-#endif
-
-enum {
-	MLX5_EQE_SIZE		= sizeof(struct mlx5_eqe),
-	MLX5_EQE_OWNER_INIT_VAL	= 0x1,
-};
-
-enum {
-	MLX5_EQ_STATE_ARMED		= 0x9,
-	MLX5_EQ_STATE_FIRED		= 0xa,
-	MLX5_EQ_STATE_ALWAYS_ARMED	= 0xb,
-};
-
-enum {
-	MLX5_NUM_SPARE_EQE	= 0x80,
-	MLX5_NUM_ASYNC_EQE	= 0x100,
-	MLX5_NUM_CMD_EQE	= 32,
-	MLX5_NUM_PF_DRAIN	= 64,
-};
-
-enum {
-	MLX5_EQ_DOORBEL_OFFSET	= 0x40,
-};
-
-#define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG)	    | \
-			       (1ull << MLX5_EVENT_TYPE_COMM_EST)	    | \
-			       (1ull << MLX5_EVENT_TYPE_SQ_DRAINED)	    | \
-			       (1ull << MLX5_EVENT_TYPE_CQ_ERROR)	    | \
-			       (1ull << MLX5_EVENT_TYPE_WQ_CATAS_ERROR)	    | \
-			       (1ull << MLX5_EVENT_TYPE_PATH_MIG_FAILED)    | \
-			       (1ull << MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
-			       (1ull << MLX5_EVENT_TYPE_WQ_ACCESS_ERROR)    | \
-			       (1ull << MLX5_EVENT_TYPE_PORT_CHANGE)	    | \
-			       (1ull << MLX5_EVENT_TYPE_SRQ_CATAS_ERROR)    | \
-			       (1ull << MLX5_EVENT_TYPE_SRQ_LAST_WQE)	    | \
-			       (1ull << MLX5_EVENT_TYPE_SRQ_RQ_LIMIT))
-
-struct map_eq_in {
-	u64	mask;
-	u32	reserved;
-	u32	unmap_eqn;
-};
-
-struct cre_des_eq {
-	u8	reserved[15];
-	u8	eqn;
-};
-
-static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn)
-{
-	u32 out[MLX5_ST_SZ_DW(destroy_eq_out)] = {0};
-	u32 in[MLX5_ST_SZ_DW(destroy_eq_in)]   = {0};
-
-	MLX5_SET(destroy_eq_in, in, opcode, MLX5_CMD_OP_DESTROY_EQ);
-	MLX5_SET(destroy_eq_in, in, eq_number, eqn);
-	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-}
-
-static struct mlx5_eqe *get_eqe(struct mlx5_eq *eq, u32 entry)
-{
-	return mlx5_buf_offset(&eq->buf, entry * MLX5_EQE_SIZE);
-}
-
-static struct mlx5_eqe *next_eqe_sw(struct mlx5_eq *eq)
-{
-	struct mlx5_eqe *eqe = get_eqe(eq, eq->cons_index & (eq->nent - 1));
-
-	return ((eqe->owner & 1) ^ !!(eq->cons_index & eq->nent)) ? NULL : eqe;
-}
-
-static const char *eqe_type_str(u8 type)
-{
-	switch (type) {
-	case MLX5_EVENT_TYPE_COMP:
-		return "MLX5_EVENT_TYPE_COMP";
-	case MLX5_EVENT_TYPE_PATH_MIG:
-		return "MLX5_EVENT_TYPE_PATH_MIG";
-	case MLX5_EVENT_TYPE_COMM_EST:
-		return "MLX5_EVENT_TYPE_COMM_EST";
-	case MLX5_EVENT_TYPE_SQ_DRAINED:
-		return "MLX5_EVENT_TYPE_SQ_DRAINED";
-	case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
-		return "MLX5_EVENT_TYPE_SRQ_LAST_WQE";
-	case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
-		return "MLX5_EVENT_TYPE_SRQ_RQ_LIMIT";
-	case MLX5_EVENT_TYPE_CQ_ERROR:
-		return "MLX5_EVENT_TYPE_CQ_ERROR";
-	case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
-		return "MLX5_EVENT_TYPE_WQ_CATAS_ERROR";
-	case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
-		return "MLX5_EVENT_TYPE_PATH_MIG_FAILED";
-	case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
-		return "MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR";
-	case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
-		return "MLX5_EVENT_TYPE_WQ_ACCESS_ERROR";
-	case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
-		return "MLX5_EVENT_TYPE_SRQ_CATAS_ERROR";
-	case MLX5_EVENT_TYPE_INTERNAL_ERROR:
-		return "MLX5_EVENT_TYPE_INTERNAL_ERROR";
-	case MLX5_EVENT_TYPE_PORT_CHANGE:
-		return "MLX5_EVENT_TYPE_PORT_CHANGE";
-	case MLX5_EVENT_TYPE_GPIO_EVENT:
-		return "MLX5_EVENT_TYPE_GPIO_EVENT";
-	case MLX5_EVENT_TYPE_PORT_MODULE_EVENT:
-		return "MLX5_EVENT_TYPE_PORT_MODULE_EVENT";
-	case MLX5_EVENT_TYPE_REMOTE_CONFIG:
-		return "MLX5_EVENT_TYPE_REMOTE_CONFIG";
-	case MLX5_EVENT_TYPE_DB_BF_CONGESTION:
-		return "MLX5_EVENT_TYPE_DB_BF_CONGESTION";
-	case MLX5_EVENT_TYPE_STALL_EVENT:
-		return "MLX5_EVENT_TYPE_STALL_EVENT";
-	case MLX5_EVENT_TYPE_CMD:
-		return "MLX5_EVENT_TYPE_CMD";
-	case MLX5_EVENT_TYPE_PAGE_REQUEST:
-		return "MLX5_EVENT_TYPE_PAGE_REQUEST";
-	case MLX5_EVENT_TYPE_PAGE_FAULT:
-		return "MLX5_EVENT_TYPE_PAGE_FAULT";
-	default:
-		return "Unrecognized event";
-	}
-}
-
-static enum mlx5_dev_event port_subtype_event(u8 subtype)
-{
-	switch (subtype) {
-	case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
-		return MLX5_DEV_EVENT_PORT_DOWN;
-	case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
-		return MLX5_DEV_EVENT_PORT_UP;
-	case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
-		return MLX5_DEV_EVENT_PORT_INITIALIZED;
-	case MLX5_PORT_CHANGE_SUBTYPE_LID:
-		return MLX5_DEV_EVENT_LID_CHANGE;
-	case MLX5_PORT_CHANGE_SUBTYPE_PKEY:
-		return MLX5_DEV_EVENT_PKEY_CHANGE;
-	case MLX5_PORT_CHANGE_SUBTYPE_GUID:
-		return MLX5_DEV_EVENT_GUID_CHANGE;
-	case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
-		return MLX5_DEV_EVENT_CLIENT_REREG;
-	}
-	return -1;
-}
-
-static void eq_update_ci(struct mlx5_eq *eq, int arm)
-{
-	__be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2);
-	u32 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24);
-	__raw_writel((__force u32) cpu_to_be32(val), addr);
-	/* We still want ordering, just not swabbing, so add a barrier */
-	mb();
-}
-
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
-static void eqe_pf_action(struct work_struct *work)
-{
-	struct mlx5_pagefault *pfault = container_of(work,
-						     struct mlx5_pagefault,
-						     work);
-	struct mlx5_eq *eq = pfault->eq;
-
-	mlx5_core_page_fault(eq->dev, pfault);
-	mempool_free(pfault, eq->pf_ctx.pool);
-}
-
-static void eq_pf_process(struct mlx5_eq *eq)
-{
-	struct mlx5_core_dev *dev = eq->dev;
-	struct mlx5_eqe_page_fault *pf_eqe;
-	struct mlx5_pagefault *pfault;
-	struct mlx5_eqe *eqe;
-	int set_ci = 0;
-
-	while ((eqe = next_eqe_sw(eq))) {
-		pfault = mempool_alloc(eq->pf_ctx.pool, GFP_ATOMIC);
-		if (!pfault) {
-			schedule_work(&eq->pf_ctx.work);
-			break;
-		}
-
-		dma_rmb();
-		pf_eqe = &eqe->data.page_fault;
-		pfault->event_subtype = eqe->sub_type;
-		pfault->bytes_committed = be32_to_cpu(pf_eqe->bytes_committed);
-
-		mlx5_core_dbg(dev,
-			      "PAGE_FAULT: subtype: 0x%02x, bytes_committed: 0x%06x\n",
-			      eqe->sub_type, pfault->bytes_committed);
-
-		switch (eqe->sub_type) {
-		case MLX5_PFAULT_SUBTYPE_RDMA:
-			/* RDMA based event */
-			pfault->type =
-				be32_to_cpu(pf_eqe->rdma.pftype_token) >> 24;
-			pfault->token =
-				be32_to_cpu(pf_eqe->rdma.pftype_token) &
-				MLX5_24BIT_MASK;
-			pfault->rdma.r_key =
-				be32_to_cpu(pf_eqe->rdma.r_key);
-			pfault->rdma.packet_size =
-				be16_to_cpu(pf_eqe->rdma.packet_length);
-			pfault->rdma.rdma_op_len =
-				be32_to_cpu(pf_eqe->rdma.rdma_op_len);
-			pfault->rdma.rdma_va =
-				be64_to_cpu(pf_eqe->rdma.rdma_va);
-			mlx5_core_dbg(dev,
-				      "PAGE_FAULT: type:0x%x, token: 0x%06x, r_key: 0x%08x\n",
-				      pfault->type, pfault->token,
-				      pfault->rdma.r_key);
-			mlx5_core_dbg(dev,
-				      "PAGE_FAULT: rdma_op_len: 0x%08x, rdma_va: 0x%016llx\n",
-				      pfault->rdma.rdma_op_len,
-				      pfault->rdma.rdma_va);
-			break;
-
-		case MLX5_PFAULT_SUBTYPE_WQE:
-			/* WQE based event */
-			pfault->type =
-				be32_to_cpu(pf_eqe->wqe.pftype_wq) >> 24;
-			pfault->token =
-				be32_to_cpu(pf_eqe->wqe.token);
-			pfault->wqe.wq_num =
-				be32_to_cpu(pf_eqe->wqe.pftype_wq) &
-				MLX5_24BIT_MASK;
-			pfault->wqe.wqe_index =
-				be16_to_cpu(pf_eqe->wqe.wqe_index);
-			pfault->wqe.packet_size =
-				be16_to_cpu(pf_eqe->wqe.packet_length);
-			mlx5_core_dbg(dev,
-				      "PAGE_FAULT: type:0x%x, token: 0x%06x, wq_num: 0x%06x, wqe_index: 0x%04x\n",
-				      pfault->type, pfault->token,
-				      pfault->wqe.wq_num,
-				      pfault->wqe.wqe_index);
-			break;
-
-		default:
-			mlx5_core_warn(dev,
-				       "Unsupported page fault event sub-type: 0x%02hhx\n",
-				       eqe->sub_type);
-			/* Unsupported page faults should still be
-			 * resolved by the page fault handler
-			 */
-		}
-
-		pfault->eq = eq;
-		INIT_WORK(&pfault->work, eqe_pf_action);
-		queue_work(eq->pf_ctx.wq, &pfault->work);
-
-		++eq->cons_index;
-		++set_ci;
-
-		if (unlikely(set_ci >= MLX5_NUM_SPARE_EQE)) {
-			eq_update_ci(eq, 0);
-			set_ci = 0;
-		}
-	}
-
-	eq_update_ci(eq, 1);
-}
-
-static irqreturn_t mlx5_eq_pf_int(int irq, void *eq_ptr)
-{
-	struct mlx5_eq *eq = eq_ptr;
-	unsigned long flags;
-
-	if (spin_trylock_irqsave(&eq->pf_ctx.lock, flags)) {
-		eq_pf_process(eq);
-		spin_unlock_irqrestore(&eq->pf_ctx.lock, flags);
-	} else {
-		schedule_work(&eq->pf_ctx.work);
-	}
-
-	return IRQ_HANDLED;
-}
-
-/* mempool_refill() was proposed but unfortunately wasn't accepted
- * http://lkml.iu.edu/hypermail/linux/kernel/1512.1/05073.html
- * Chip workaround.
- */
-static void mempool_refill(mempool_t *pool)
-{
-	while (pool->curr_nr < pool->min_nr)
-		mempool_free(mempool_alloc(pool, GFP_KERNEL), pool);
-}
-
-static void eq_pf_action(struct work_struct *work)
-{
-	struct mlx5_eq *eq = container_of(work, struct mlx5_eq, pf_ctx.work);
-
-	mempool_refill(eq->pf_ctx.pool);
-
-	spin_lock_irq(&eq->pf_ctx.lock);
-	eq_pf_process(eq);
-	spin_unlock_irq(&eq->pf_ctx.lock);
-}
-
-static int init_pf_ctx(struct mlx5_eq_pagefault *pf_ctx, const char *name)
-{
-	spin_lock_init(&pf_ctx->lock);
-	INIT_WORK(&pf_ctx->work, eq_pf_action);
-
-	pf_ctx->wq = alloc_ordered_workqueue(name,
-					     WQ_MEM_RECLAIM);
-	if (!pf_ctx->wq)
-		return -ENOMEM;
-
-	pf_ctx->pool = mempool_create_kmalloc_pool
-		(MLX5_NUM_PF_DRAIN, sizeof(struct mlx5_pagefault));
-	if (!pf_ctx->pool)
-		goto err_wq;
-
-	return 0;
-err_wq:
-	destroy_workqueue(pf_ctx->wq);
-	return -ENOMEM;
-}
-
-int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 token,
-				u32 wq_num, u8 type, int error)
-{
-	u32 out[MLX5_ST_SZ_DW(page_fault_resume_out)] = {0};
-	u32 in[MLX5_ST_SZ_DW(page_fault_resume_in)]   = {0};
-
-	MLX5_SET(page_fault_resume_in, in, opcode,
-		 MLX5_CMD_OP_PAGE_FAULT_RESUME);
-	MLX5_SET(page_fault_resume_in, in, error, !!error);
-	MLX5_SET(page_fault_resume_in, in, page_fault_type, type);
-	MLX5_SET(page_fault_resume_in, in, wq_number, wq_num);
-	MLX5_SET(page_fault_resume_in, in, token, token);
-
-	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-}
-EXPORT_SYMBOL_GPL(mlx5_core_page_fault_resume);
-#endif
-
-static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
-{
-	struct mlx5_eq *eq = eq_ptr;
-	struct mlx5_core_dev *dev = eq->dev;
-	struct mlx5_eqe *eqe;
-	int set_ci = 0;
-	u32 cqn = -1;
-	u32 rsn;
-	u8 port;
-
-	while ((eqe = next_eqe_sw(eq))) {
-		/*
-		 * Make sure we read EQ entry contents after we've
-		 * checked the ownership bit.
-		 */
-		dma_rmb();
-
-		mlx5_core_dbg(eq->dev, "eqn %d, eqe type %s\n",
-			      eq->eqn, eqe_type_str(eqe->type));
-		switch (eqe->type) {
-		case MLX5_EVENT_TYPE_COMP:
-			cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff;
-			mlx5_cq_completion(dev, cqn);
-			break;
-
-		case MLX5_EVENT_TYPE_PATH_MIG:
-		case MLX5_EVENT_TYPE_COMM_EST:
-		case MLX5_EVENT_TYPE_SQ_DRAINED:
-		case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
-		case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
-		case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
-		case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
-		case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
-			rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
-			rsn |= (eqe->data.qp_srq.type << MLX5_USER_INDEX_LEN);
-			mlx5_core_dbg(dev, "event %s(%d) arrived on resource 0x%x\n",
-				      eqe_type_str(eqe->type), eqe->type, rsn);
-			mlx5_rsc_event(dev, rsn, eqe->type);
-			break;
-
-		case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
-		case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
-			rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
-			mlx5_core_dbg(dev, "SRQ event %s(%d): srqn 0x%x\n",
-				      eqe_type_str(eqe->type), eqe->type, rsn);
-			mlx5_srq_event(dev, rsn, eqe->type);
-			break;
-
-		case MLX5_EVENT_TYPE_CMD:
-			mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector));
-			break;
-
-		case MLX5_EVENT_TYPE_PORT_CHANGE:
-			port = (eqe->data.port.port >> 4) & 0xf;
-			switch (eqe->sub_type) {
-			case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
-			case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
-			case MLX5_PORT_CHANGE_SUBTYPE_LID:
-			case MLX5_PORT_CHANGE_SUBTYPE_PKEY:
-			case MLX5_PORT_CHANGE_SUBTYPE_GUID:
-			case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
-			case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
-				if (dev->event)
-					dev->event(dev, port_subtype_event(eqe->sub_type),
-						   (unsigned long)port);
-				break;
-			default:
-				mlx5_core_warn(dev, "Port event with unrecognized subtype: port %d, sub_type %d\n",
-					       port, eqe->sub_type);
-			}
-			break;
-		case MLX5_EVENT_TYPE_CQ_ERROR:
-			cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
-			mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrom 0x%x\n",
-				       cqn, eqe->data.cq_err.syndrome);
-			mlx5_cq_event(dev, cqn, eqe->type);
-			break;
-
-		case MLX5_EVENT_TYPE_PAGE_REQUEST:
-			{
-				u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id);
-				s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages);
-
-				mlx5_core_dbg(dev, "page request for func 0x%x, npages %d\n",
-					      func_id, npages);
-				mlx5_core_req_pages_handler(dev, func_id, npages);
-			}
-			break;
-
-#ifdef CONFIG_MLX5_CORE_EN
-		case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE:
-			mlx5_eswitch_vport_event(dev->priv.eswitch, eqe);
-			break;
-#endif
-
-		case MLX5_EVENT_TYPE_PORT_MODULE_EVENT:
-			mlx5_port_module_event(dev, eqe);
-			break;
-
-		default:
-			mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n",
-				       eqe->type, eq->eqn);
-			break;
-		}
-
-		++eq->cons_index;
-		++set_ci;
-
-		/* The HCA will think the queue has overflowed if we
-		 * don't tell it we've been processing events.  We
-		 * create our EQs with MLX5_NUM_SPARE_EQE extra
-		 * entries, so we must update our consumer index at
-		 * least that often.
-		 */
-		if (unlikely(set_ci >= MLX5_NUM_SPARE_EQE)) {
-			eq_update_ci(eq, 0);
-			set_ci = 0;
-		}
-	}
-
-	eq_update_ci(eq, 1);
-
-	if (cqn != -1)
-		tasklet_schedule(&eq->tasklet_ctx.task);
-
-	return IRQ_HANDLED;
-}
-
-static void init_eq_buf(struct mlx5_eq *eq)
-{
-	struct mlx5_eqe *eqe;
-	int i;
-
-	for (i = 0; i < eq->nent; i++) {
-		eqe = get_eqe(eq, i);
-		eqe->owner = MLX5_EQE_OWNER_INIT_VAL;
-	}
-}
-
-int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
-		       int nent, u64 mask, const char *name,
-		       enum mlx5_eq_type type)
-{
-	u32 out[MLX5_ST_SZ_DW(create_eq_out)] = {0};
-	struct mlx5_priv *priv = &dev->priv;
-	irq_handler_t handler;
-	__be64 *pas;
-	void *eqc;
-	int inlen;
-	u32 *in;
-	int err;
-
-	eq->type = type;
-	eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE);
-	eq->cons_index = 0;
-	err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, &eq->buf);
-	if (err)
-		return err;
-
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
-	if (type == MLX5_EQ_TYPE_PF)
-		handler = mlx5_eq_pf_int;
-	else
-#endif
-		handler = mlx5_eq_int;
-
-	init_eq_buf(eq);
-
-	inlen = MLX5_ST_SZ_BYTES(create_eq_in) +
-		MLX5_FLD_SZ_BYTES(create_eq_in, pas[0]) * eq->buf.npages;
-
-	in = mlx5_vzalloc(inlen);
-	if (!in) {
-		err = -ENOMEM;
-		goto err_buf;
-	}
-
-	pas = (__be64 *)MLX5_ADDR_OF(create_eq_in, in, pas);
-	mlx5_fill_page_array(&eq->buf, pas);
-
-	MLX5_SET(create_eq_in, in, opcode, MLX5_CMD_OP_CREATE_EQ);
-	MLX5_SET64(create_eq_in, in, event_bitmask, mask);
-
-	eqc = MLX5_ADDR_OF(create_eq_in, in, eq_context_entry);
-	MLX5_SET(eqc, eqc, log_eq_size, ilog2(eq->nent));
-	MLX5_SET(eqc, eqc, uar_page, priv->uar->index);
-	MLX5_SET(eqc, eqc, intr, vecidx);
-	MLX5_SET(eqc, eqc, log_page_size,
-		 eq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
-
-	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
-	if (err)
-		goto err_in;
-
-	snprintf(priv->irq_info[vecidx].name, MLX5_MAX_IRQ_NAME, "%s@pci:%s",
-		 name, pci_name(dev->pdev));
-
-	eq->eqn = MLX5_GET(create_eq_out, out, eq_number);
-	eq->irqn = priv->msix_arr[vecidx].vector;
-	eq->dev = dev;
-	eq->doorbell = priv->uar->map + MLX5_EQ_DOORBEL_OFFSET;
-	err = request_irq(eq->irqn, handler, 0,
-			  priv->irq_info[vecidx].name, eq);
-	if (err)
-		goto err_eq;
-
-	err = mlx5_debug_eq_add(dev, eq);
-	if (err)
-		goto err_irq;
-
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
-	if (type == MLX5_EQ_TYPE_PF) {
-		err = init_pf_ctx(&eq->pf_ctx, name);
-		if (err)
-			goto err_irq;
-	} else
-#endif
-	{
-		INIT_LIST_HEAD(&eq->tasklet_ctx.list);
-		INIT_LIST_HEAD(&eq->tasklet_ctx.process_list);
-		spin_lock_init(&eq->tasklet_ctx.lock);
-		tasklet_init(&eq->tasklet_ctx.task, mlx5_cq_tasklet_cb,
-			     (unsigned long)&eq->tasklet_ctx);
-	}
-
-	/* EQs are created in ARMED state
-	 */
-	eq_update_ci(eq, 1);
-
-	kvfree(in);
-	return 0;
-
-err_irq:
-	free_irq(priv->msix_arr[vecidx].vector, eq);
-
-err_eq:
-	mlx5_cmd_destroy_eq(dev, eq->eqn);
-
-err_in:
-	kvfree(in);
-
-err_buf:
-	mlx5_buf_free(dev, &eq->buf);
-	return err;
-}
-EXPORT_SYMBOL_GPL(mlx5_create_map_eq);
-
-int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
-{
-	int err;
-
-	mlx5_debug_eq_remove(dev, eq);
-	free_irq(eq->irqn, eq);
-	err = mlx5_cmd_destroy_eq(dev, eq->eqn);
-	if (err)
-		mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n",
-			       eq->eqn);
-	synchronize_irq(eq->irqn);
-
-	if (eq->type == MLX5_EQ_TYPE_COMP) {
-		tasklet_disable(&eq->tasklet_ctx.task);
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
-	} else if (eq->type == MLX5_EQ_TYPE_PF) {
-		cancel_work_sync(&eq->pf_ctx.work);
-		destroy_workqueue(eq->pf_ctx.wq);
-		mempool_destroy(eq->pf_ctx.pool);
-#endif
-	}
-	mlx5_buf_free(dev, &eq->buf);
-
-	return err;
-}
-EXPORT_SYMBOL_GPL(mlx5_destroy_unmap_eq);
-
-u32 mlx5_get_msix_vec(struct mlx5_core_dev *dev, int vecidx)
-{
-	return dev->priv.msix_arr[MLX5_EQ_VEC_ASYNC].vector;
-}
-
-int mlx5_eq_init(struct mlx5_core_dev *dev)
-{
-	int err;
-
-	spin_lock_init(&dev->priv.eq_table.lock);
-
-	err = mlx5_eq_debugfs_init(dev);
-
-	return err;
-}
-
-
-void mlx5_eq_cleanup(struct mlx5_core_dev *dev)
-{
-	mlx5_eq_debugfs_cleanup(dev);
-}
-
-int mlx5_start_eqs(struct mlx5_core_dev *dev)
-{
-	struct mlx5_eq_table *table = &dev->priv.eq_table;
-	u64 async_event_mask = MLX5_ASYNC_EVENT_MASK;
-	int err;
-
-
-	if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH &&
-	    MLX5_CAP_GEN(dev, vport_group_manager) &&
-	    mlx5_core_is_pf(dev))
-		async_event_mask |= (1ull << MLX5_EVENT_TYPE_NIC_VPORT_CHANGE);
-
-	if (MLX5_CAP_GEN(dev, port_module_event))
-		async_event_mask |= (1ull << MLX5_EVENT_TYPE_PORT_MODULE_EVENT);
-	else
-		mlx5_core_dbg(dev, "port_module_event is not set\n");
-
-	err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD,
-				 MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD,
-				 "mlx5_cmd_eq", MLX5_EQ_TYPE_ASYNC);
-	if (err) {
-		mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err);
-		return err;
-	}
-
-	mlx5_cmd_use_events(dev);
-
-	err = mlx5_create_map_eq(dev, &table->async_eq, MLX5_EQ_VEC_ASYNC,
-				 MLX5_NUM_ASYNC_EQE, async_event_mask,
-				 "mlx5_async_eq", MLX5_EQ_TYPE_ASYNC);
-	if (err) {
-		mlx5_core_warn(dev, "failed to create async EQ %d\n", err);
-		goto err1;
-	}
-
-	err = mlx5_create_map_eq(dev, &table->pages_eq,
-				 MLX5_EQ_VEC_PAGES,
-				 /* TODO: sriov max_vf + */ 1,
-				 1 << MLX5_EVENT_TYPE_PAGE_REQUEST, "mlx5_pages_eq",
-				 MLX5_EQ_TYPE_ASYNC);
-	if (err) {
-		mlx5_core_warn(dev, "failed to create pages EQ %d\n", err);
-		goto err2;
-	}
-
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
-	if (MLX5_CAP_GEN(dev, pg)) {
-		err = mlx5_create_map_eq(dev, &table->pfault_eq,
-					 MLX5_EQ_VEC_PFAULT,
-					 MLX5_NUM_ASYNC_EQE,
-					 1 << MLX5_EVENT_TYPE_PAGE_FAULT,
-					 "mlx5_page_fault_eq",
-					 MLX5_EQ_TYPE_PF);
-		if (err) {
-			mlx5_core_warn(dev, "failed to create page fault EQ %d\n",
-				       err);
-			goto err3;
-		}
-	}
-
-	return err;
-err3:
-	mlx5_destroy_unmap_eq(dev, &table->pages_eq);
-#else
-	return err;
-#endif
-
-err2:
-	mlx5_destroy_unmap_eq(dev, &table->async_eq);
-
-err1:
-	mlx5_cmd_use_polling(dev);
-	mlx5_destroy_unmap_eq(dev, &table->cmd_eq);
-	return err;
-}
-
-int mlx5_stop_eqs(struct mlx5_core_dev *dev)
-{
-	struct mlx5_eq_table *table = &dev->priv.eq_table;
-	int err;
-
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
-	if (MLX5_CAP_GEN(dev, pg)) {
-		err = mlx5_destroy_unmap_eq(dev, &table->pfault_eq);
-		if (err)
-			return err;
-	}
-#endif
-
-	err = mlx5_destroy_unmap_eq(dev, &table->pages_eq);
-	if (err)
-		return err;
-
-	mlx5_destroy_unmap_eq(dev, &table->async_eq);
-	mlx5_cmd_use_polling(dev);
-
-	err = mlx5_destroy_unmap_eq(dev, &table->cmd_eq);
-	if (err)
-		mlx5_cmd_use_events(dev);
-
-	return err;
-}
-
-int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
-		       u32 *out, int outlen)
-{
-	u32 in[MLX5_ST_SZ_DW(query_eq_in)] = {0};
-
-	MLX5_SET(query_eq_in, in, opcode, MLX5_CMD_OP_QUERY_EQ);
-	MLX5_SET(query_eq_in, in, eq_number, eq->eqn);
-	return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
-}
-EXPORT_SYMBOL_GPL(mlx5_core_eq_query);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
deleted file mode 100644
index d6807c3..0000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ /dev/null
@@ -1,2133 +0,0 @@
-/*
- * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/etherdevice.h>
-#include <linux/mlx5/driver.h>
-#include <linux/mlx5/mlx5_ifc.h>
-#include <linux/mlx5/vport.h>
-#include <linux/mlx5/fs.h>
-#include "mlx5_core.h"
-#include "eswitch.h"
-
-#define UPLINK_VPORT 0xFFFF
-
-enum {
-	MLX5_ACTION_NONE = 0,
-	MLX5_ACTION_ADD  = 1,
-	MLX5_ACTION_DEL  = 2,
-};
-
-/* E-Switch UC L2 table hash node */
-struct esw_uc_addr {
-	struct l2addr_node node;
-	u32                table_index;
-	u32                vport;
-};
-
-/* E-Switch MC FDB table hash node */
-struct esw_mc_addr { /* SRIOV only */
-	struct l2addr_node     node;
-	struct mlx5_flow_handle *uplink_rule; /* Forward to uplink rule */
-	u32                    refcnt;
-};
-
-/* Vport UC/MC hash node */
-struct vport_addr {
-	struct l2addr_node     node;
-	u8                     action;
-	u32                    vport;
-	struct mlx5_flow_handle *flow_rule; /* SRIOV only */
-	/* A flag indicating that mac was added due to mc promiscuous vport */
-	bool mc_promisc;
-};
-
-enum {
-	UC_ADDR_CHANGE = BIT(0),
-	MC_ADDR_CHANGE = BIT(1),
-	PROMISC_CHANGE = BIT(3),
-};
-
-/* Vport context events */
-#define SRIOV_VPORT_EVENTS (UC_ADDR_CHANGE | \
-			    MC_ADDR_CHANGE | \
-			    PROMISC_CHANGE)
-
-static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
-					u32 events_mask)
-{
-	int in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)]   = {0};
-	int out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0};
-	void *nic_vport_ctx;
-
-	MLX5_SET(modify_nic_vport_context_in, in,
-		 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
-	MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1);
-	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
-	if (vport)
-		MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
-	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
-				     in, nic_vport_context);
-
-	MLX5_SET(nic_vport_context, nic_vport_ctx, arm_change_event, 1);
-
-	if (events_mask & UC_ADDR_CHANGE)
-		MLX5_SET(nic_vport_context, nic_vport_ctx,
-			 event_on_uc_address_change, 1);
-	if (events_mask & MC_ADDR_CHANGE)
-		MLX5_SET(nic_vport_context, nic_vport_ctx,
-			 event_on_mc_address_change, 1);
-	if (events_mask & PROMISC_CHANGE)
-		MLX5_SET(nic_vport_context, nic_vport_ctx,
-			 event_on_promisc_change, 1);
-
-	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-}
-
-/* E-Switch vport context HW commands */
-static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport,
-					void *in, int inlen)
-{
-	u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)] = {0};
-
-	MLX5_SET(modify_esw_vport_context_in, in, opcode,
-		 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
-	MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
-	if (vport)
-		MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
-	return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
-}
-
-static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
-				  u16 vlan, u8 qos, u8 set_flags)
-{
-	u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {0};
-
-	if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) ||
-	    !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist))
-		return -ENOTSUPP;
-
-	esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%x\n",
-		  vport, vlan, qos, set_flags);
-
-	if (set_flags & SET_VLAN_STRIP)
-		MLX5_SET(modify_esw_vport_context_in, in,
-			 esw_vport_context.vport_cvlan_strip, 1);
-
-	if (set_flags & SET_VLAN_INSERT) {
-		/* insert only if no vlan in packet */
-		MLX5_SET(modify_esw_vport_context_in, in,
-			 esw_vport_context.vport_cvlan_insert, 1);
-
-		MLX5_SET(modify_esw_vport_context_in, in,
-			 esw_vport_context.cvlan_pcp, qos);
-		MLX5_SET(modify_esw_vport_context_in, in,
-			 esw_vport_context.cvlan_id, vlan);
-	}
-
-	MLX5_SET(modify_esw_vport_context_in, in,
-		 field_select.vport_cvlan_strip, 1);
-	MLX5_SET(modify_esw_vport_context_in, in,
-		 field_select.vport_cvlan_insert, 1);
-
-	return modify_esw_vport_context_cmd(dev, vport, in, sizeof(in));
-}
-
-/* HW L2 Table (MPFS) management */
-static int set_l2_table_entry_cmd(struct mlx5_core_dev *dev, u32 index,
-				  u8 *mac, u8 vlan_valid, u16 vlan)
-{
-	u32 in[MLX5_ST_SZ_DW(set_l2_table_entry_in)]   = {0};
-	u32 out[MLX5_ST_SZ_DW(set_l2_table_entry_out)] = {0};
-	u8 *in_mac_addr;
-
-	MLX5_SET(set_l2_table_entry_in, in, opcode,
-		 MLX5_CMD_OP_SET_L2_TABLE_ENTRY);
-	MLX5_SET(set_l2_table_entry_in, in, table_index, index);
-	MLX5_SET(set_l2_table_entry_in, in, vlan_valid, vlan_valid);
-	MLX5_SET(set_l2_table_entry_in, in, vlan, vlan);
-
-	in_mac_addr = MLX5_ADDR_OF(set_l2_table_entry_in, in, mac_address);
-	ether_addr_copy(&in_mac_addr[2], mac);
-
-	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-}
-
-static int del_l2_table_entry_cmd(struct mlx5_core_dev *dev, u32 index)
-{
-	u32 in[MLX5_ST_SZ_DW(delete_l2_table_entry_in)]   = {0};
-	u32 out[MLX5_ST_SZ_DW(delete_l2_table_entry_out)] = {0};
-
-	MLX5_SET(delete_l2_table_entry_in, in, opcode,
-		 MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY);
-	MLX5_SET(delete_l2_table_entry_in, in, table_index, index);
-	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-}
-
-static int alloc_l2_table_index(struct mlx5_l2_table *l2_table, u32 *ix)
-{
-	int err = 0;
-
-	*ix = find_first_zero_bit(l2_table->bitmap, l2_table->size);
-	if (*ix >= l2_table->size)
-		err = -ENOSPC;
-	else
-		__set_bit(*ix, l2_table->bitmap);
-
-	return err;
-}
-
-static void free_l2_table_index(struct mlx5_l2_table *l2_table, u32 ix)
-{
-	__clear_bit(ix, l2_table->bitmap);
-}
-
-static int set_l2_table_entry(struct mlx5_core_dev *dev, u8 *mac,
-			      u8 vlan_valid, u16 vlan,
-			      u32 *index)
-{
-	struct mlx5_l2_table *l2_table = &dev->priv.eswitch->l2_table;
-	int err;
-
-	err = alloc_l2_table_index(l2_table, index);
-	if (err)
-		return err;
-
-	err = set_l2_table_entry_cmd(dev, *index, mac, vlan_valid, vlan);
-	if (err)
-		free_l2_table_index(l2_table, *index);
-
-	return err;
-}
-
-static void del_l2_table_entry(struct mlx5_core_dev *dev, u32 index)
-{
-	struct mlx5_l2_table *l2_table = &dev->priv.eswitch->l2_table;
-
-	del_l2_table_entry_cmd(dev, index);
-	free_l2_table_index(l2_table, index);
-}
-
-/* E-Switch FDB */
-static struct mlx5_flow_handle *
-__esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
-			 u8 mac_c[ETH_ALEN], u8 mac_v[ETH_ALEN])
-{
-	int match_header = (is_zero_ether_addr(mac_c) ? 0 :
-			    MLX5_MATCH_OUTER_HEADERS);
-	struct mlx5_flow_handle *flow_rule = NULL;
-	struct mlx5_flow_act flow_act = {0};
-	struct mlx5_flow_destination dest;
-	struct mlx5_flow_spec *spec;
-	void *mv_misc = NULL;
-	void *mc_misc = NULL;
-	u8 *dmac_v = NULL;
-	u8 *dmac_c = NULL;
-
-	if (rx_rule)
-		match_header |= MLX5_MATCH_MISC_PARAMETERS;
-
-	spec = mlx5_vzalloc(sizeof(*spec));
-	if (!spec) {
-		esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n");
-		return NULL;
-	}
-	dmac_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
-			      outer_headers.dmac_47_16);
-	dmac_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
-			      outer_headers.dmac_47_16);
-
-	if (match_header & MLX5_MATCH_OUTER_HEADERS) {
-		ether_addr_copy(dmac_v, mac_v);
-		ether_addr_copy(dmac_c, mac_c);
-	}
-
-	if (match_header & MLX5_MATCH_MISC_PARAMETERS) {
-		mv_misc  = MLX5_ADDR_OF(fte_match_param, spec->match_value,
-					misc_parameters);
-		mc_misc  = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
-					misc_parameters);
-		MLX5_SET(fte_match_set_misc, mv_misc, source_port, UPLINK_VPORT);
-		MLX5_SET_TO_ONES(fte_match_set_misc, mc_misc, source_port);
-	}
-
-	dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
-	dest.vport_num = vport;
-
-	esw_debug(esw->dev,
-		  "\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n",
-		  dmac_v, dmac_c, vport);
-	spec->match_criteria_enable = match_header;
-	flow_act.action =  MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
-	flow_rule =
-		mlx5_add_flow_rules(esw->fdb_table.fdb, spec,
-				    &flow_act, &dest, 1);
-	if (IS_ERR(flow_rule)) {
-		esw_warn(esw->dev,
-			 "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n",
-			 dmac_v, dmac_c, vport, PTR_ERR(flow_rule));
-		flow_rule = NULL;
-	}
-
-	kvfree(spec);
-	return flow_rule;
-}
-
-static struct mlx5_flow_handle *
-esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport)
-{
-	u8 mac_c[ETH_ALEN];
-
-	eth_broadcast_addr(mac_c);
-	return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac);
-}
-
-static struct mlx5_flow_handle *
-esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u32 vport)
-{
-	u8 mac_c[ETH_ALEN];
-	u8 mac_v[ETH_ALEN];
-
-	eth_zero_addr(mac_c);
-	eth_zero_addr(mac_v);
-	mac_c[0] = 0x01;
-	mac_v[0] = 0x01;
-	return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac_v);
-}
-
-static struct mlx5_flow_handle *
-esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u32 vport)
-{
-	u8 mac_c[ETH_ALEN];
-	u8 mac_v[ETH_ALEN];
-
-	eth_zero_addr(mac_c);
-	eth_zero_addr(mac_v);
-	return __esw_fdb_set_vport_rule(esw, vport, true, mac_c, mac_v);
-}
-
-static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw, int nvports)
-{
-	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
-	struct mlx5_core_dev *dev = esw->dev;
-	struct mlx5_flow_namespace *root_ns;
-	struct mlx5_flow_table *fdb;
-	struct mlx5_flow_group *g;
-	void *match_criteria;
-	int table_size;
-	u32 *flow_group_in;
-	u8 *dmac;
-	int err = 0;
-
-	esw_debug(dev, "Create FDB log_max_size(%d)\n",
-		  MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
-
-	root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
-	if (!root_ns) {
-		esw_warn(dev, "Failed to get FDB flow namespace\n");
-		return -ENOMEM;
-	}
-
-	flow_group_in = mlx5_vzalloc(inlen);
-	if (!flow_group_in)
-		return -ENOMEM;
-	memset(flow_group_in, 0, inlen);
-
-	table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
-	fdb = mlx5_create_flow_table(root_ns, 0, table_size, 0, 0);
-	if (IS_ERR(fdb)) {
-		err = PTR_ERR(fdb);
-		esw_warn(dev, "Failed to create FDB Table err %d\n", err);
-		goto out;
-	}
-	esw->fdb_table.fdb = fdb;
-
-	/* Addresses group : Full match unicast/multicast addresses */
-	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
-		 MLX5_MATCH_OUTER_HEADERS);
-	match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
-	dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, outer_headers.dmac_47_16);
-	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
-	/* Preserve 2 entries for allmulti and promisc rules*/
-	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 3);
-	eth_broadcast_addr(dmac);
-	g = mlx5_create_flow_group(fdb, flow_group_in);
-	if (IS_ERR(g)) {
-		err = PTR_ERR(g);
-		esw_warn(dev, "Failed to create flow group err(%d)\n", err);
-		goto out;
-	}
-	esw->fdb_table.legacy.addr_grp = g;
-
-	/* Allmulti group : One rule that forwards any mcast traffic */
-	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
-		 MLX5_MATCH_OUTER_HEADERS);
-	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 2);
-	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 2);
-	eth_zero_addr(dmac);
-	dmac[0] = 0x01;
-	g = mlx5_create_flow_group(fdb, flow_group_in);
-	if (IS_ERR(g)) {
-		err = PTR_ERR(g);
-		esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err);
-		goto out;
-	}
-	esw->fdb_table.legacy.allmulti_grp = g;
-
-	/* Promiscuous group :
-	 * One rule that forward all unmatched traffic from previous groups
-	 */
-	eth_zero_addr(dmac);
-	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
-		 MLX5_MATCH_MISC_PARAMETERS);
-	MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
-	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 1);
-	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1);
-	g = mlx5_create_flow_group(fdb, flow_group_in);
-	if (IS_ERR(g)) {
-		err = PTR_ERR(g);
-		esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err);
-		goto out;
-	}
-	esw->fdb_table.legacy.promisc_grp = g;
-
-out:
-	if (err) {
-		if (!IS_ERR_OR_NULL(esw->fdb_table.legacy.allmulti_grp)) {
-			mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp);
-			esw->fdb_table.legacy.allmulti_grp = NULL;
-		}
-		if (!IS_ERR_OR_NULL(esw->fdb_table.legacy.addr_grp)) {
-			mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp);
-			esw->fdb_table.legacy.addr_grp = NULL;
-		}
-		if (!IS_ERR_OR_NULL(esw->fdb_table.fdb)) {
-			mlx5_destroy_flow_table(esw->fdb_table.fdb);
-			esw->fdb_table.fdb = NULL;
-		}
-	}
-
-	kvfree(flow_group_in);
-	return err;
-}
-
-static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw)
-{
-	if (!esw->fdb_table.fdb)
-		return;
-
-	esw_debug(esw->dev, "Destroy FDB Table\n");
-	mlx5_destroy_flow_group(esw->fdb_table.legacy.promisc_grp);
-	mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp);
-	mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp);
-	mlx5_destroy_flow_table(esw->fdb_table.fdb);
-	esw->fdb_table.fdb = NULL;
-	esw->fdb_table.legacy.addr_grp = NULL;
-	esw->fdb_table.legacy.allmulti_grp = NULL;
-	esw->fdb_table.legacy.promisc_grp = NULL;
-}
-
-/* E-Switch vport UC/MC lists management */
-typedef int (*vport_addr_action)(struct mlx5_eswitch *esw,
-				 struct vport_addr *vaddr);
-
-static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
-{
-	struct hlist_head *hash = esw->l2_table.l2_hash;
-	struct esw_uc_addr *esw_uc;
-	u8 *mac = vaddr->node.addr;
-	u32 vport = vaddr->vport;
-	int err;
-
-	esw_uc = l2addr_hash_find(hash, mac, struct esw_uc_addr);
-	if (esw_uc) {
-		esw_warn(esw->dev,
-			 "Failed to set L2 mac(%pM) for vport(%d), mac is already in use by vport(%d)\n",
-			 mac, vport, esw_uc->vport);
-		return -EEXIST;
-	}
-
-	esw_uc = l2addr_hash_add(hash, mac, struct esw_uc_addr, GFP_KERNEL);
-	if (!esw_uc)
-		return -ENOMEM;
-	esw_uc->vport = vport;
-
-	err = set_l2_table_entry(esw->dev, mac, 0, 0, &esw_uc->table_index);
-	if (err)
-		goto abort;
-
-	/* SRIOV is enabled: Forward UC MAC to vport */
-	if (esw->fdb_table.fdb && esw->mode == SRIOV_LEGACY)
-		vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
-
-	esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM index:%d fr(%p)\n",
-		  vport, mac, esw_uc->table_index, vaddr->flow_rule);
-	return err;
-abort:
-	l2addr_hash_del(esw_uc);
-	return err;
-}
-
-static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
-{
-	struct hlist_head *hash = esw->l2_table.l2_hash;
-	struct esw_uc_addr *esw_uc;
-	u8 *mac = vaddr->node.addr;
-	u32 vport = vaddr->vport;
-
-	esw_uc = l2addr_hash_find(hash, mac, struct esw_uc_addr);
-	if (!esw_uc || esw_uc->vport != vport) {
-		esw_debug(esw->dev,
-			  "MAC(%pM) doesn't belong to vport (%d)\n",
-			  mac, vport);
-		return -EINVAL;
-	}
-	esw_debug(esw->dev, "\tDELETE UC MAC: vport[%d] %pM index:%d fr(%p)\n",
-		  vport, mac, esw_uc->table_index, vaddr->flow_rule);
-
-	del_l2_table_entry(esw->dev, esw_uc->table_index);
-
-	if (vaddr->flow_rule)
-		mlx5_del_flow_rules(vaddr->flow_rule);
-	vaddr->flow_rule = NULL;
-
-	l2addr_hash_del(esw_uc);
-	return 0;
-}
-
-static void update_allmulti_vports(struct mlx5_eswitch *esw,
-				   struct vport_addr *vaddr,
-				   struct esw_mc_addr *esw_mc)
-{
-	u8 *mac = vaddr->node.addr;
-	u32 vport_idx = 0;
-
-	for (vport_idx = 0; vport_idx < esw->total_vports; vport_idx++) {
-		struct mlx5_vport *vport = &esw->vports[vport_idx];
-		struct hlist_head *vport_hash = vport->mc_list;
-		struct vport_addr *iter_vaddr =
-					l2addr_hash_find(vport_hash,
-							 mac,
-							 struct vport_addr);
-		if (IS_ERR_OR_NULL(vport->allmulti_rule) ||
-		    vaddr->vport == vport_idx)
-			continue;
-		switch (vaddr->action) {
-		case MLX5_ACTION_ADD:
-			if (iter_vaddr)
-				continue;
-			iter_vaddr = l2addr_hash_add(vport_hash, mac,
-						     struct vport_addr,
-						     GFP_KERNEL);
-			if (!iter_vaddr) {
-				esw_warn(esw->dev,
-					 "ALL-MULTI: Failed to add MAC(%pM) to vport[%d] DB\n",
-					 mac, vport_idx);
-				continue;
-			}
-			iter_vaddr->vport = vport_idx;
-			iter_vaddr->flow_rule =
-					esw_fdb_set_vport_rule(esw,
-							       mac,
-							       vport_idx);
-			iter_vaddr->mc_promisc = true;
-			break;
-		case MLX5_ACTION_DEL:
-			if (!iter_vaddr)
-				continue;
-			mlx5_del_flow_rules(iter_vaddr->flow_rule);
-			l2addr_hash_del(iter_vaddr);
-			break;
-		}
-	}
-}
-
-static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
-{
-	struct hlist_head *hash = esw->mc_table;
-	struct esw_mc_addr *esw_mc;
-	u8 *mac = vaddr->node.addr;
-	u32 vport = vaddr->vport;
-
-	if (!esw->fdb_table.fdb)
-		return 0;
-
-	esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr);
-	if (esw_mc)
-		goto add;
-
-	esw_mc = l2addr_hash_add(hash, mac, struct esw_mc_addr, GFP_KERNEL);
-	if (!esw_mc)
-		return -ENOMEM;
-
-	esw_mc->uplink_rule = /* Forward MC MAC to Uplink */
-		esw_fdb_set_vport_rule(esw, mac, UPLINK_VPORT);
-
-	/* Add this multicast mac to all the mc promiscuous vports */
-	update_allmulti_vports(esw, vaddr, esw_mc);
-
-add:
-	/* If the multicast mac is added as a result of mc promiscuous vport,
-	 * don't increment the multicast ref count
-	 */
-	if (!vaddr->mc_promisc)
-		esw_mc->refcnt++;
-
-	/* Forward MC MAC to vport */
-	vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
-	esw_debug(esw->dev,
-		  "\tADDED MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n",
-		  vport, mac, vaddr->flow_rule,
-		  esw_mc->refcnt, esw_mc->uplink_rule);
-	return 0;
-}
-
-static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
-{
-	struct hlist_head *hash = esw->mc_table;
-	struct esw_mc_addr *esw_mc;
-	u8 *mac = vaddr->node.addr;
-	u32 vport = vaddr->vport;
-
-	if (!esw->fdb_table.fdb)
-		return 0;
-
-	esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr);
-	if (!esw_mc) {
-		esw_warn(esw->dev,
-			 "Failed to find eswitch MC addr for MAC(%pM) vport(%d)",
-			 mac, vport);
-		return -EINVAL;
-	}
-	esw_debug(esw->dev,
-		  "\tDELETE MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n",
-		  vport, mac, vaddr->flow_rule, esw_mc->refcnt,
-		  esw_mc->uplink_rule);
-
-	if (vaddr->flow_rule)
-		mlx5_del_flow_rules(vaddr->flow_rule);
-	vaddr->flow_rule = NULL;
-
-	/* If the multicast mac is added as a result of mc promiscuous vport,
-	 * don't decrement the multicast ref count.
-	 */
-	if (vaddr->mc_promisc || (--esw_mc->refcnt > 0))
-		return 0;
-
-	/* Remove this multicast mac from all the mc promiscuous vports */
-	update_allmulti_vports(esw, vaddr, esw_mc);
-
-	if (esw_mc->uplink_rule)
-		mlx5_del_flow_rules(esw_mc->uplink_rule);
-
-	l2addr_hash_del(esw_mc);
-	return 0;
-}
-
-/* Apply vport UC/MC list to HW l2 table and FDB table */
-static void esw_apply_vport_addr_list(struct mlx5_eswitch *esw,
-				      u32 vport_num, int list_type)
-{
-	struct mlx5_vport *vport = &esw->vports[vport_num];
-	bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC;
-	vport_addr_action vport_addr_add;
-	vport_addr_action vport_addr_del;
-	struct vport_addr *addr;
-	struct l2addr_node *node;
-	struct hlist_head *hash;
-	struct hlist_node *tmp;
-	int hi;
-
-	vport_addr_add = is_uc ? esw_add_uc_addr :
-				 esw_add_mc_addr;
-	vport_addr_del = is_uc ? esw_del_uc_addr :
-				 esw_del_mc_addr;
-
-	hash = is_uc ? vport->uc_list : vport->mc_list;
-	for_each_l2hash_node(node, tmp, hash, hi) {
-		addr = container_of(node, struct vport_addr, node);
-		switch (addr->action) {
-		case MLX5_ACTION_ADD:
-			vport_addr_add(esw, addr);
-			addr->action = MLX5_ACTION_NONE;
-			break;
-		case MLX5_ACTION_DEL:
-			vport_addr_del(esw, addr);
-			l2addr_hash_del(addr);
-			break;
-		}
-	}
-}
-
-/* Sync vport UC/MC list from vport context */
-static void esw_update_vport_addr_list(struct mlx5_eswitch *esw,
-				       u32 vport_num, int list_type)
-{
-	struct mlx5_vport *vport = &esw->vports[vport_num];
-	bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC;
-	u8 (*mac_list)[ETH_ALEN];
-	struct l2addr_node *node;
-	struct vport_addr *addr;
-	struct hlist_head *hash;
-	struct hlist_node *tmp;
-	int size;
-	int err;
-	int hi;
-	int i;
-
-	size = is_uc ? MLX5_MAX_UC_PER_VPORT(esw->dev) :
-		       MLX5_MAX_MC_PER_VPORT(esw->dev);
-
-	mac_list = kcalloc(size, ETH_ALEN, GFP_KERNEL);
-	if (!mac_list)
-		return;
-
-	hash = is_uc ? vport->uc_list : vport->mc_list;
-
-	for_each_l2hash_node(node, tmp, hash, hi) {
-		addr = container_of(node, struct vport_addr, node);
-		addr->action = MLX5_ACTION_DEL;
-	}
-
-	if (!vport->enabled)
-		goto out;
-
-	err = mlx5_query_nic_vport_mac_list(esw->dev, vport_num, list_type,
-					    mac_list, &size);
-	if (err)
-		goto out;
-	esw_debug(esw->dev, "vport[%d] context update %s list size (%d)\n",
-		  vport_num, is_uc ? "UC" : "MC", size);
-
-	for (i = 0; i < size; i++) {
-		if (is_uc && !is_valid_ether_addr(mac_list[i]))
-			continue;
-
-		if (!is_uc && !is_multicast_ether_addr(mac_list[i]))
-			continue;
-
-		addr = l2addr_hash_find(hash, mac_list[i], struct vport_addr);
-		if (addr) {
-			addr->action = MLX5_ACTION_NONE;
-			/* If this mac was previously added because of allmulti
-			 * promiscuous rx mode, its now converted to be original
-			 * vport mac.
-			 */
-			if (addr->mc_promisc) {
-				struct esw_mc_addr *esw_mc =
-					l2addr_hash_find(esw->mc_table,
-							 mac_list[i],
-							 struct esw_mc_addr);
-				if (!esw_mc) {
-					esw_warn(esw->dev,
-						 "Failed to MAC(%pM) in mcast DB\n",
-						 mac_list[i]);
-					continue;
-				}
-				esw_mc->refcnt++;
-				addr->mc_promisc = false;
-			}
-			continue;
-		}
-
-		addr = l2addr_hash_add(hash, mac_list[i], struct vport_addr,
-				       GFP_KERNEL);
-		if (!addr) {
-			esw_warn(esw->dev,
-				 "Failed to add MAC(%pM) to vport[%d] DB\n",
-				 mac_list[i], vport_num);
-			continue;
-		}
-		addr->vport = vport_num;
-		addr->action = MLX5_ACTION_ADD;
-	}
-out:
-	kfree(mac_list);
-}
-
-/* Sync vport UC/MC list from vport context
- * Must be called after esw_update_vport_addr_list
- */
-static void esw_update_vport_mc_promisc(struct mlx5_eswitch *esw, u32 vport_num)
-{
-	struct mlx5_vport *vport = &esw->vports[vport_num];
-	struct l2addr_node *node;
-	struct vport_addr *addr;
-	struct hlist_head *hash;
-	struct hlist_node *tmp;
-	int hi;
-
-	hash = vport->mc_list;
-
-	for_each_l2hash_node(node, tmp, esw->mc_table, hi) {
-		u8 *mac = node->addr;
-
-		addr = l2addr_hash_find(hash, mac, struct vport_addr);
-		if (addr) {
-			if (addr->action == MLX5_ACTION_DEL)
-				addr->action = MLX5_ACTION_NONE;
-			continue;
-		}
-		addr = l2addr_hash_add(hash, mac, struct vport_addr,
-				       GFP_KERNEL);
-		if (!addr) {
-			esw_warn(esw->dev,
-				 "Failed to add allmulti MAC(%pM) to vport[%d] DB\n",
-				 mac, vport_num);
-			continue;
-		}
-		addr->vport = vport_num;
-		addr->action = MLX5_ACTION_ADD;
-		addr->mc_promisc = true;
-	}
-}
-
-/* Apply vport rx mode to HW FDB table */
-static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num,
-				    bool promisc, bool mc_promisc)
-{
-	struct esw_mc_addr *allmulti_addr = esw->mc_promisc;
-	struct mlx5_vport *vport = &esw->vports[vport_num];
-
-	if (IS_ERR_OR_NULL(vport->allmulti_rule) != mc_promisc)
-		goto promisc;
-
-	if (mc_promisc) {
-		vport->allmulti_rule =
-				esw_fdb_set_vport_allmulti_rule(esw, vport_num);
-		if (!allmulti_addr->uplink_rule)
-			allmulti_addr->uplink_rule =
-				esw_fdb_set_vport_allmulti_rule(esw,
-								UPLINK_VPORT);
-		allmulti_addr->refcnt++;
-	} else if (vport->allmulti_rule) {
-		mlx5_del_flow_rules(vport->allmulti_rule);
-		vport->allmulti_rule = NULL;
-
-		if (--allmulti_addr->refcnt > 0)
-			goto promisc;
-
-		if (allmulti_addr->uplink_rule)
-			mlx5_del_flow_rules(allmulti_addr->uplink_rule);
-		allmulti_addr->uplink_rule = NULL;
-	}
-
-promisc:
-	if (IS_ERR_OR_NULL(vport->promisc_rule) != promisc)
-		return;
-
-	if (promisc) {
-		vport->promisc_rule = esw_fdb_set_vport_promisc_rule(esw,
-								     vport_num);
-	} else if (vport->promisc_rule) {
-		mlx5_del_flow_rules(vport->promisc_rule);
-		vport->promisc_rule = NULL;
-	}
-}
-
-/* Sync vport rx mode from vport context */
-static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num)
-{
-	struct mlx5_vport *vport = &esw->vports[vport_num];
-	int promisc_all = 0;
-	int promisc_uc = 0;
-	int promisc_mc = 0;
-	int err;
-
-	err = mlx5_query_nic_vport_promisc(esw->dev,
-					   vport_num,
-					   &promisc_uc,
-					   &promisc_mc,
-					   &promisc_all);
-	if (err)
-		return;
-	esw_debug(esw->dev, "vport[%d] context update rx mode promisc_all=%d, all_multi=%d\n",
-		  vport_num, promisc_all, promisc_mc);
-
-	if (!vport->info.trusted || !vport->enabled) {
-		promisc_uc = 0;
-		promisc_mc = 0;
-		promisc_all = 0;
-	}
-
-	esw_apply_vport_rx_mode(esw, vport_num, promisc_all,
-				(promisc_all || promisc_mc));
-}
-
-static void esw_vport_change_handle_locked(struct mlx5_vport *vport)
-{
-	struct mlx5_core_dev *dev = vport->dev;
-	struct mlx5_eswitch *esw = dev->priv.eswitch;
-	u8 mac[ETH_ALEN];
-
-	mlx5_query_nic_vport_mac_address(dev, vport->vport, mac);
-	esw_debug(dev, "vport[%d] Context Changed: perm mac: %pM\n",
-		  vport->vport, mac);
-
-	if (vport->enabled_events & UC_ADDR_CHANGE) {
-		esw_update_vport_addr_list(esw, vport->vport,
-					   MLX5_NVPRT_LIST_TYPE_UC);
-		esw_apply_vport_addr_list(esw, vport->vport,
-					  MLX5_NVPRT_LIST_TYPE_UC);
-	}
-
-	if (vport->enabled_events & MC_ADDR_CHANGE) {
-		esw_update_vport_addr_list(esw, vport->vport,
-					   MLX5_NVPRT_LIST_TYPE_MC);
-	}
-
-	if (vport->enabled_events & PROMISC_CHANGE) {
-		esw_update_vport_rx_mode(esw, vport->vport);
-		if (!IS_ERR_OR_NULL(vport->allmulti_rule))
-			esw_update_vport_mc_promisc(esw, vport->vport);
-	}
-
-	if (vport->enabled_events & (PROMISC_CHANGE | MC_ADDR_CHANGE)) {
-		esw_apply_vport_addr_list(esw, vport->vport,
-					  MLX5_NVPRT_LIST_TYPE_MC);
-	}
-
-	esw_debug(esw->dev, "vport[%d] Context Changed: Done\n", vport->vport);
-	if (vport->enabled)
-		arm_vport_context_events_cmd(dev, vport->vport,
-					     vport->enabled_events);
-}
-
-static void esw_vport_change_handler(struct work_struct *work)
-{
-	struct mlx5_vport *vport =
-		container_of(work, struct mlx5_vport, vport_change_handler);
-	struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
-
-	mutex_lock(&esw->state_lock);
-	esw_vport_change_handle_locked(vport);
-	mutex_unlock(&esw->state_lock);
-}
-
-static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
-				       struct mlx5_vport *vport)
-{
-	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
-	struct mlx5_flow_group *vlan_grp = NULL;
-	struct mlx5_flow_group *drop_grp = NULL;
-	struct mlx5_core_dev *dev = esw->dev;
-	struct mlx5_flow_namespace *root_ns;
-	struct mlx5_flow_table *acl;
-	void *match_criteria;
-	u32 *flow_group_in;
-	/* The egress acl table contains 2 rules:
-	 * 1)Allow traffic with vlan_tag=vst_vlan_id
-	 * 2)Drop all other traffic.
-	 */
-	int table_size = 2;
-	int err = 0;
-
-	if (!MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
-		return -EOPNOTSUPP;
-
-	if (!IS_ERR_OR_NULL(vport->egress.acl))
-		return 0;
-
-	esw_debug(dev, "Create vport[%d] egress ACL log_max_size(%d)\n",
-		  vport->vport, MLX5_CAP_ESW_EGRESS_ACL(dev, log_max_ft_size));
-
-	root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS);
-	if (!root_ns) {
-		esw_warn(dev, "Failed to get E-Switch egress flow namespace\n");
-		return -EIO;
-	}
-
-	flow_group_in = mlx5_vzalloc(inlen);
-	if (!flow_group_in)
-		return -ENOMEM;
-
-	acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
-	if (IS_ERR(acl)) {
-		err = PTR_ERR(acl);
-		esw_warn(dev, "Failed to create E-Switch vport[%d] egress flow Table, err(%d)\n",
-			 vport->vport, err);
-		goto out;
-	}
-
-	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
-	match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
-	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag);
-	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.first_vid);
-	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
-	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
-
-	vlan_grp = mlx5_create_flow_group(acl, flow_group_in);
-	if (IS_ERR(vlan_grp)) {
-		err = PTR_ERR(vlan_grp);
-		esw_warn(dev, "Failed to create E-Switch vport[%d] egress allowed vlans flow group, err(%d)\n",
-			 vport->vport, err);
-		goto out;
-	}
-
-	memset(flow_group_in, 0, inlen);
-	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
-	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
-	drop_grp = mlx5_create_flow_group(acl, flow_group_in);
-	if (IS_ERR(drop_grp)) {
-		err = PTR_ERR(drop_grp);
-		esw_warn(dev, "Failed to create E-Switch vport[%d] egress drop flow group, err(%d)\n",
-			 vport->vport, err);
-		goto out;
-	}
-
-	vport->egress.acl = acl;
-	vport->egress.drop_grp = drop_grp;
-	vport->egress.allowed_vlans_grp = vlan_grp;
-out:
-	kvfree(flow_group_in);
-	if (err && !IS_ERR_OR_NULL(vlan_grp))
-		mlx5_destroy_flow_group(vlan_grp);
-	if (err && !IS_ERR_OR_NULL(acl))
-		mlx5_destroy_flow_table(acl);
-	return err;
-}
-
-static void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
-					   struct mlx5_vport *vport)
-{
-	if (!IS_ERR_OR_NULL(vport->egress.allowed_vlan))
-		mlx5_del_flow_rules(vport->egress.allowed_vlan);
-
-	if (!IS_ERR_OR_NULL(vport->egress.drop_rule))
-		mlx5_del_flow_rules(vport->egress.drop_rule);
-
-	vport->egress.allowed_vlan = NULL;
-	vport->egress.drop_rule = NULL;
-}
-
-static void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
-					 struct mlx5_vport *vport)
-{
-	if (IS_ERR_OR_NULL(vport->egress.acl))
-		return;
-
-	esw_debug(esw->dev, "Destroy vport[%d] E-Switch egress ACL\n", vport->vport);
-
-	esw_vport_cleanup_egress_rules(esw, vport);
-	mlx5_destroy_flow_group(vport->egress.allowed_vlans_grp);
-	mlx5_destroy_flow_group(vport->egress.drop_grp);
-	mlx5_destroy_flow_table(vport->egress.acl);
-	vport->egress.allowed_vlans_grp = NULL;
-	vport->egress.drop_grp = NULL;
-	vport->egress.acl = NULL;
-}
-
-static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
-					struct mlx5_vport *vport)
-{
-	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
-	struct mlx5_core_dev *dev = esw->dev;
-	struct mlx5_flow_namespace *root_ns;
-	struct mlx5_flow_table *acl;
-	struct mlx5_flow_group *g;
-	void *match_criteria;
-	u32 *flow_group_in;
-	/* The ingress acl table contains 4 groups
-	 * (2 active rules at the same time -
-	 *      1 allow rule from one of the first 3 groups.
-	 *      1 drop rule from the last group):
-	 * 1)Allow untagged traffic with smac=original mac.
-	 * 2)Allow untagged traffic.
-	 * 3)Allow traffic with smac=original mac.
-	 * 4)Drop all other traffic.
-	 */
-	int table_size = 4;
-	int err = 0;
-
-	if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
-		return -EOPNOTSUPP;
-
-	if (!IS_ERR_OR_NULL(vport->ingress.acl))
-		return 0;
-
-	esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n",
-		  vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size));
-
-	root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS);
-	if (!root_ns) {
-		esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n");
-		return -EIO;
-	}
-
-	flow_group_in = mlx5_vzalloc(inlen);
-	if (!flow_group_in)
-		return -ENOMEM;
-
-	acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
-	if (IS_ERR(acl)) {
-		err = PTR_ERR(acl);
-		esw_warn(dev, "Failed to create E-Switch vport[%d] ingress flow Table, err(%d)\n",
-			 vport->vport, err);
-		goto out;
-	}
-	vport->ingress.acl = acl;
-
-	match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
-
-	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
-	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag);
-	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
-	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
-	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
-	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
-
-	g = mlx5_create_flow_group(acl, flow_group_in);
-	if (IS_ERR(g)) {
-		err = PTR_ERR(g);
-		esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged spoofchk flow group, err(%d)\n",
-			 vport->vport, err);
-		goto out;
-	}
-	vport->ingress.allow_untagged_spoofchk_grp = g;
-
-	memset(flow_group_in, 0, inlen);
-	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
-	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag);
-	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
-	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
-
-	g = mlx5_create_flow_group(acl, flow_group_in);
-	if (IS_ERR(g)) {
-		err = PTR_ERR(g);
-		esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged flow group, err(%d)\n",
-			 vport->vport, err);
-		goto out;
-	}
-	vport->ingress.allow_untagged_only_grp = g;
-
-	memset(flow_group_in, 0, inlen);
-	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
-	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
-	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
-	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 2);
-	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2);
-
-	g = mlx5_create_flow_group(acl, flow_group_in);
-	if (IS_ERR(g)) {
-		err = PTR_ERR(g);
-		esw_warn(dev, "Failed to create E-Switch vport[%d] ingress spoofchk flow group, err(%d)\n",
-			 vport->vport, err);
-		goto out;
-	}
-	vport->ingress.allow_spoofchk_only_grp = g;
-
-	memset(flow_group_in, 0, inlen);
-	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 3);
-	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3);
-
-	g = mlx5_create_flow_group(acl, flow_group_in);
-	if (IS_ERR(g)) {
-		err = PTR_ERR(g);
-		esw_warn(dev, "Failed to create E-Switch vport[%d] ingress drop flow group, err(%d)\n",
-			 vport->vport, err);
-		goto out;
-	}
-	vport->ingress.drop_grp = g;
-
-out:
-	if (err) {
-		if (!IS_ERR_OR_NULL(vport->ingress.allow_spoofchk_only_grp))
-			mlx5_destroy_flow_group(
-					vport->ingress.allow_spoofchk_only_grp);
-		if (!IS_ERR_OR_NULL(vport->ingress.allow_untagged_only_grp))
-			mlx5_destroy_flow_group(
-					vport->ingress.allow_untagged_only_grp);
-		if (!IS_ERR_OR_NULL(vport->ingress.allow_untagged_spoofchk_grp))
-			mlx5_destroy_flow_group(
-				vport->ingress.allow_untagged_spoofchk_grp);
-		if (!IS_ERR_OR_NULL(vport->ingress.acl))
-			mlx5_destroy_flow_table(vport->ingress.acl);
-	}
-
-	kvfree(flow_group_in);
-	return err;
-}
-
-static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
-					    struct mlx5_vport *vport)
-{
-	if (!IS_ERR_OR_NULL(vport->ingress.drop_rule))
-		mlx5_del_flow_rules(vport->ingress.drop_rule);
-
-	if (!IS_ERR_OR_NULL(vport->ingress.allow_rule))
-		mlx5_del_flow_rules(vport->ingress.allow_rule);
-
-	vport->ingress.drop_rule = NULL;
-	vport->ingress.allow_rule = NULL;
-}
-
-static void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
-					  struct mlx5_vport *vport)
-{
-	if (IS_ERR_OR_NULL(vport->ingress.acl))
-		return;
-
-	esw_debug(esw->dev, "Destroy vport[%d] E-Switch ingress ACL\n", vport->vport);
-
-	esw_vport_cleanup_ingress_rules(esw, vport);
-	mlx5_destroy_flow_group(vport->ingress.allow_spoofchk_only_grp);
-	mlx5_destroy_flow_group(vport->ingress.allow_untagged_only_grp);
-	mlx5_destroy_flow_group(vport->ingress.allow_untagged_spoofchk_grp);
-	mlx5_destroy_flow_group(vport->ingress.drop_grp);
-	mlx5_destroy_flow_table(vport->ingress.acl);
-	vport->ingress.acl = NULL;
-	vport->ingress.drop_grp = NULL;
-	vport->ingress.allow_spoofchk_only_grp = NULL;
-	vport->ingress.allow_untagged_only_grp = NULL;
-	vport->ingress.allow_untagged_spoofchk_grp = NULL;
-}
-
-static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
-				    struct mlx5_vport *vport)
-{
-	struct mlx5_flow_act flow_act = {0};
-	struct mlx5_flow_spec *spec;
-	int err = 0;
-	u8 *smac_v;
-
-	if (vport->info.spoofchk && !is_valid_ether_addr(vport->info.mac)) {
-		mlx5_core_warn(esw->dev,
-			       "vport[%d] configure ingress rules failed, illegal mac with spoofchk\n",
-			       vport->vport);
-		return -EPERM;
-
-	}
-
-	esw_vport_cleanup_ingress_rules(esw, vport);
-
-	if (!vport->info.vlan && !vport->info.qos && !vport->info.spoofchk) {
-		esw_vport_disable_ingress_acl(esw, vport);
-		return 0;
-	}
-
-	err = esw_vport_enable_ingress_acl(esw, vport);
-	if (err) {
-		mlx5_core_warn(esw->dev,
-			       "failed to enable ingress acl (%d) on vport[%d]\n",
-			       err, vport->vport);
-		return err;
-	}
-
-	esw_debug(esw->dev,
-		  "vport[%d] configure ingress rules, vlan(%d) qos(%d)\n",
-		  vport->vport, vport->info.vlan, vport->info.qos);
-
-	spec = mlx5_vzalloc(sizeof(*spec));
-	if (!spec) {
-		err = -ENOMEM;
-		esw_warn(esw->dev, "vport[%d] configure ingress rules failed, err(%d)\n",
-			 vport->vport, err);
-		goto out;
-	}
-
-	if (vport->info.vlan || vport->info.qos)
-		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag);
-
-	if (vport->info.spoofchk) {
-		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_47_16);
-		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_15_0);
-		smac_v = MLX5_ADDR_OF(fte_match_param,
-				      spec->match_value,
-				      outer_headers.smac_47_16);
-		ether_addr_copy(smac_v, vport->info.mac);
-	}
-
-	spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
-	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
-	vport->ingress.allow_rule =
-		mlx5_add_flow_rules(vport->ingress.acl, spec,
-				    &flow_act, NULL, 0);
-	if (IS_ERR(vport->ingress.allow_rule)) {
-		err = PTR_ERR(vport->ingress.allow_rule);
-		esw_warn(esw->dev,
-			 "vport[%d] configure ingress allow rule, err(%d)\n",
-			 vport->vport, err);
-		vport->ingress.allow_rule = NULL;
-		goto out;
-	}
-
-	memset(spec, 0, sizeof(*spec));
-	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
-	vport->ingress.drop_rule =
-		mlx5_add_flow_rules(vport->ingress.acl, spec,
-				    &flow_act, NULL, 0);
-	if (IS_ERR(vport->ingress.drop_rule)) {
-		err = PTR_ERR(vport->ingress.drop_rule);
-		esw_warn(esw->dev,
-			 "vport[%d] configure ingress drop rule, err(%d)\n",
-			 vport->vport, err);
-		vport->ingress.drop_rule = NULL;
-		goto out;
-	}
-
-out:
-	if (err)
-		esw_vport_cleanup_ingress_rules(esw, vport);
-	kvfree(spec);
-	return err;
-}
-
-static int esw_vport_egress_config(struct mlx5_eswitch *esw,
-				   struct mlx5_vport *vport)
-{
-	struct mlx5_flow_act flow_act = {0};
-	struct mlx5_flow_spec *spec;
-	int err = 0;
-
-	esw_vport_cleanup_egress_rules(esw, vport);
-
-	if (!vport->info.vlan && !vport->info.qos) {
-		esw_vport_disable_egress_acl(esw, vport);
-		return 0;
-	}
-
-	err = esw_vport_enable_egress_acl(esw, vport);
-	if (err) {
-		mlx5_core_warn(esw->dev,
-			       "failed to enable egress acl (%d) on vport[%d]\n",
-			       err, vport->vport);
-		return err;
-	}
-
-	esw_debug(esw->dev,
-		  "vport[%d] configure egress rules, vlan(%d) qos(%d)\n",
-		  vport->vport, vport->info.vlan, vport->info.qos);
-
-	spec = mlx5_vzalloc(sizeof(*spec));
-	if (!spec) {
-		err = -ENOMEM;
-		esw_warn(esw->dev, "vport[%d] configure egress rules failed, err(%d)\n",
-			 vport->vport, err);
-		goto out;
-	}
-
-	/* Allowed vlan rule */
-	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag);
-	MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.vlan_tag);
-	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid);
-	MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vport->info.vlan);
-
-	spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
-	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
-	vport->egress.allowed_vlan =
-		mlx5_add_flow_rules(vport->egress.acl, spec,
-				    &flow_act, NULL, 0);
-	if (IS_ERR(vport->egress.allowed_vlan)) {
-		err = PTR_ERR(vport->egress.allowed_vlan);
-		esw_warn(esw->dev,
-			 "vport[%d] configure egress allowed vlan rule failed, err(%d)\n",
-			 vport->vport, err);
-		vport->egress.allowed_vlan = NULL;
-		goto out;
-	}
-
-	/* Drop others rule (star rule) */
-	memset(spec, 0, sizeof(*spec));
-	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
-	vport->egress.drop_rule =
-		mlx5_add_flow_rules(vport->egress.acl, spec,
-				    &flow_act, NULL, 0);
-	if (IS_ERR(vport->egress.drop_rule)) {
-		err = PTR_ERR(vport->egress.drop_rule);
-		esw_warn(esw->dev,
-			 "vport[%d] configure egress drop rule failed, err(%d)\n",
-			 vport->vport, err);
-		vport->egress.drop_rule = NULL;
-	}
-out:
-	kvfree(spec);
-	return err;
-}
-
-/* Vport QoS management */
-static int esw_create_tsar(struct mlx5_eswitch *esw)
-{
-	u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
-	struct mlx5_core_dev *dev = esw->dev;
-	int err;
-
-	if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
-		return 0;
-
-	if (esw->qos.enabled)
-		return -EEXIST;
-
-	err = mlx5_create_scheduling_element_cmd(dev,
-						 SCHEDULING_HIERARCHY_E_SWITCH,
-						 &tsar_ctx,
-						 &esw->qos.root_tsar_id);
-	if (err) {
-		esw_warn(esw->dev, "E-Switch create TSAR failed (%d)\n", err);
-		return err;
-	}
-
-	esw->qos.enabled = true;
-	return 0;
-}
-
-static void esw_destroy_tsar(struct mlx5_eswitch *esw)
-{
-	int err;
-
-	if (!esw->qos.enabled)
-		return;
-
-	err = mlx5_destroy_scheduling_element_cmd(esw->dev,
-						  SCHEDULING_HIERARCHY_E_SWITCH,
-						  esw->qos.root_tsar_id);
-	if (err)
-		esw_warn(esw->dev, "E-Switch destroy TSAR failed (%d)\n", err);
-
-	esw->qos.enabled = false;
-}
-
-static int esw_vport_enable_qos(struct mlx5_eswitch *esw, int vport_num,
-				u32 initial_max_rate)
-{
-	u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
-	struct mlx5_vport *vport = &esw->vports[vport_num];
-	struct mlx5_core_dev *dev = esw->dev;
-	void *vport_elem;
-	int err = 0;
-
-	if (!esw->qos.enabled || !MLX5_CAP_GEN(dev, qos) ||
-	    !MLX5_CAP_QOS(dev, esw_scheduling))
-		return 0;
-
-	if (vport->qos.enabled)
-		return -EEXIST;
-
-	MLX5_SET(scheduling_context, &sched_ctx, element_type,
-		 SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
-	vport_elem = MLX5_ADDR_OF(scheduling_context, &sched_ctx,
-				  element_attributes);
-	MLX5_SET(vport_element, vport_elem, vport_number, vport_num);
-	MLX5_SET(scheduling_context, &sched_ctx, parent_element_id,
-		 esw->qos.root_tsar_id);
-	MLX5_SET(scheduling_context, &sched_ctx, max_average_bw,
-		 initial_max_rate);
-
-	err = mlx5_create_scheduling_element_cmd(dev,
-						 SCHEDULING_HIERARCHY_E_SWITCH,
-						 &sched_ctx,
-						 &vport->qos.esw_tsar_ix);
-	if (err) {
-		esw_warn(esw->dev, "E-Switch create TSAR vport element failed (vport=%d,err=%d)\n",
-			 vport_num, err);
-		return err;
-	}
-
-	vport->qos.enabled = true;
-	return 0;
-}
-
-static void esw_vport_disable_qos(struct mlx5_eswitch *esw, int vport_num)
-{
-	struct mlx5_vport *vport = &esw->vports[vport_num];
-	int err = 0;
-
-	if (!vport->qos.enabled)
-		return;
-
-	err = mlx5_destroy_scheduling_element_cmd(esw->dev,
-						  SCHEDULING_HIERARCHY_E_SWITCH,
-						  vport->qos.esw_tsar_ix);
-	if (err)
-		esw_warn(esw->dev, "E-Switch destroy TSAR vport element failed (vport=%d,err=%d)\n",
-			 vport_num, err);
-
-	vport->qos.enabled = false;
-}
-
-static int esw_vport_qos_config(struct mlx5_eswitch *esw, int vport_num,
-				u32 max_rate)
-{
-	u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
-	struct mlx5_vport *vport = &esw->vports[vport_num];
-	struct mlx5_core_dev *dev = esw->dev;
-	void *vport_elem;
-	u32 bitmask = 0;
-	int err = 0;
-
-	if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
-		return -EOPNOTSUPP;
-
-	if (!vport->qos.enabled)
-		return -EIO;
-
-	MLX5_SET(scheduling_context, &sched_ctx, element_type,
-		 SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
-	vport_elem = MLX5_ADDR_OF(scheduling_context, &sched_ctx,
-				  element_attributes);
-	MLX5_SET(vport_element, vport_elem, vport_number, vport_num);
-	MLX5_SET(scheduling_context, &sched_ctx, parent_element_id,
-		 esw->qos.root_tsar_id);
-	MLX5_SET(scheduling_context, &sched_ctx, max_average_bw,
-		 max_rate);
-	bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW;
-
-	err = mlx5_modify_scheduling_element_cmd(dev,
-						 SCHEDULING_HIERARCHY_E_SWITCH,
-						 &sched_ctx,
-						 vport->qos.esw_tsar_ix,
-						 bitmask);
-	if (err) {
-		esw_warn(esw->dev, "E-Switch modify TSAR vport element failed (vport=%d,err=%d)\n",
-			 vport_num, err);
-		return err;
-	}
-
-	return 0;
-}
-
-static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN])
-{
-	((u8 *)node_guid)[7] = mac[0];
-	((u8 *)node_guid)[6] = mac[1];
-	((u8 *)node_guid)[5] = mac[2];
-	((u8 *)node_guid)[4] = 0xff;
-	((u8 *)node_guid)[3] = 0xfe;
-	((u8 *)node_guid)[2] = mac[3];
-	((u8 *)node_guid)[1] = mac[4];
-	((u8 *)node_guid)[0] = mac[5];
-}
-
-static void esw_apply_vport_conf(struct mlx5_eswitch *esw,
-				 struct mlx5_vport *vport)
-{
-	int vport_num = vport->vport;
-
-	if (!vport_num)
-		return;
-
-	mlx5_modify_vport_admin_state(esw->dev,
-				      MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
-				      vport_num,
-				      vport->info.link_state);
-	mlx5_modify_nic_vport_mac_address(esw->dev, vport_num, vport->info.mac);
-	mlx5_modify_nic_vport_node_guid(esw->dev, vport_num, vport->info.node_guid);
-	modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan, vport->info.qos,
-			       (vport->info.vlan || vport->info.qos));
-
-	/* Only legacy mode needs ACLs */
-	if (esw->mode == SRIOV_LEGACY) {
-		esw_vport_ingress_config(esw, vport);
-		esw_vport_egress_config(esw, vport);
-	}
-}
-
-static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
-			     int enable_events)
-{
-	struct mlx5_vport *vport = &esw->vports[vport_num];
-
-	mutex_lock(&esw->state_lock);
-	WARN_ON(vport->enabled);
-
-	esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num);
-
-	/* Restore old vport configuration */
-	esw_apply_vport_conf(esw, vport);
-
-	/* Attach vport to the eswitch rate limiter */
-	if (esw_vport_enable_qos(esw, vport_num, vport->info.max_rate))
-		esw_warn(esw->dev, "Failed to attach vport %d to eswitch rate limiter", vport_num);
-
-	/* Sync with current vport context */
-	vport->enabled_events = enable_events;
-	vport->enabled = true;
-
-	/* only PF is trusted by default */
-	if (!vport_num)
-		vport->info.trusted = true;
-
-	esw_vport_change_handle_locked(vport);
-
-	esw->enabled_vports++;
-	esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num);
-	mutex_unlock(&esw->state_lock);
-}
-
-static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
-{
-	struct mlx5_vport *vport = &esw->vports[vport_num];
-
-	if (!vport->enabled)
-		return;
-
-	esw_debug(esw->dev, "Disabling vport(%d)\n", vport_num);
-	/* Mark this vport as disabled to discard new events */
-	vport->enabled = false;
-
-	synchronize_irq(mlx5_get_msix_vec(esw->dev, MLX5_EQ_VEC_ASYNC));
-	/* Wait for current already scheduled events to complete */
-	flush_workqueue(esw->work_queue);
-	/* Disable events from this vport */
-	arm_vport_context_events_cmd(esw->dev, vport->vport, 0);
-	mutex_lock(&esw->state_lock);
-	/* We don't assume VFs will cleanup after themselves.
-	 * Calling vport change handler while vport is disabled will cleanup
-	 * the vport resources.
-	 */
-	esw_vport_change_handle_locked(vport);
-	vport->enabled_events = 0;
-	esw_vport_disable_qos(esw, vport_num);
-	if (vport_num && esw->mode == SRIOV_LEGACY) {
-		mlx5_modify_vport_admin_state(esw->dev,
-					      MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
-					      vport_num,
-					      MLX5_ESW_VPORT_ADMIN_STATE_DOWN);
-		esw_vport_disable_egress_acl(esw, vport);
-		esw_vport_disable_ingress_acl(esw, vport);
-	}
-	esw->enabled_vports--;
-	mutex_unlock(&esw->state_lock);
-}
-
-/* Public E-Switch API */
-int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
-{
-	int err;
-	int i, enabled_events;
-
-	if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
-	    MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
-		return 0;
-
-	if (!MLX5_CAP_GEN(esw->dev, eswitch_flow_table) ||
-	    !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
-		esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n");
-		return -ENOTSUPP;
-	}
-
-	if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support))
-		esw_warn(esw->dev, "E-Switch ingress ACL is not supported by FW\n");
-
-	if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support))
-		esw_warn(esw->dev, "E-Switch engress ACL is not supported by FW\n");
-
-	esw_info(esw->dev, "E-Switch enable SRIOV: nvfs(%d) mode (%d)\n", nvfs, mode);
-	esw->mode = mode;
-	esw_disable_vport(esw, 0);
-
-	if (mode == SRIOV_LEGACY)
-		err = esw_create_legacy_fdb_table(esw, nvfs + 1);
-	else
-		err = esw_offloads_init(esw, nvfs + 1);
-	if (err)
-		goto abort;
-
-	err = esw_create_tsar(esw);
-	if (err)
-		esw_warn(esw->dev, "Failed to create eswitch TSAR");
-
-	enabled_events = (mode == SRIOV_LEGACY) ? SRIOV_VPORT_EVENTS : UC_ADDR_CHANGE;
-	for (i = 0; i <= nvfs; i++)
-		esw_enable_vport(esw, i, enabled_events);
-
-	esw_info(esw->dev, "SRIOV enabled: active vports(%d)\n",
-		 esw->enabled_vports);
-	return 0;
-
-abort:
-	esw_enable_vport(esw, 0, UC_ADDR_CHANGE);
-	esw->mode = SRIOV_NONE;
-	return err;
-}
-
-void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
-{
-	struct esw_mc_addr *mc_promisc;
-	int nvports;
-	int i;
-
-	if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
-	    MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
-		return;
-
-	esw_info(esw->dev, "disable SRIOV: active vports(%d) mode(%d)\n",
-		 esw->enabled_vports, esw->mode);
-
-	mc_promisc = esw->mc_promisc;
-	nvports = esw->enabled_vports;
-
-	for (i = 0; i < esw->total_vports; i++)
-		esw_disable_vport(esw, i);
-
-	if (mc_promisc && mc_promisc->uplink_rule)
-		mlx5_del_flow_rules(mc_promisc->uplink_rule);
-
-	esw_destroy_tsar(esw);
-
-	if (esw->mode == SRIOV_LEGACY)
-		esw_destroy_legacy_fdb_table(esw);
-	else if (esw->mode == SRIOV_OFFLOADS)
-		esw_offloads_cleanup(esw, nvports);
-
-	esw->mode = SRIOV_NONE;
-	/* VPORT 0 (PF) must be enabled back with non-sriov configuration */
-	esw_enable_vport(esw, 0, UC_ADDR_CHANGE);
-}
-
-void mlx5_eswitch_attach(struct mlx5_eswitch *esw)
-{
-	if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
-	    MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
-		return;
-
-	esw_enable_vport(esw, 0, UC_ADDR_CHANGE);
-	/* VF Vports will be enabled when SRIOV is enabled */
-}
-
-void mlx5_eswitch_detach(struct mlx5_eswitch *esw)
-{
-	if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
-	    MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
-		return;
-
-	esw_disable_vport(esw, 0);
-}
-
-int mlx5_eswitch_init(struct mlx5_core_dev *dev)
-{
-	int l2_table_size = 1 << MLX5_CAP_GEN(dev, log_max_l2_table);
-	int total_vports = MLX5_TOTAL_VPORTS(dev);
-	struct esw_mc_addr *mc_promisc;
-	struct mlx5_eswitch *esw;
-	int vport_num;
-	int err;
-
-	if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
-	    MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
-		return 0;
-
-	esw_info(dev,
-		 "Total vports %d, l2 table size(%d), per vport: max uc(%d) max mc(%d)\n",
-		 total_vports, l2_table_size,
-		 MLX5_MAX_UC_PER_VPORT(dev),
-		 MLX5_MAX_MC_PER_VPORT(dev));
-
-	esw = kzalloc(sizeof(*esw), GFP_KERNEL);
-	if (!esw)
-		return -ENOMEM;
-
-	esw->dev = dev;
-
-	esw->l2_table.bitmap = kcalloc(BITS_TO_LONGS(l2_table_size),
-				   sizeof(uintptr_t), GFP_KERNEL);
-	if (!esw->l2_table.bitmap) {
-		err = -ENOMEM;
-		goto abort;
-	}
-	esw->l2_table.size = l2_table_size;
-
-	mc_promisc = kzalloc(sizeof(*mc_promisc), GFP_KERNEL);
-	if (!mc_promisc) {
-		err = -ENOMEM;
-		goto abort;
-	}
-	esw->mc_promisc = mc_promisc;
-
-	esw->work_queue = create_singlethread_workqueue("mlx5_esw_wq");
-	if (!esw->work_queue) {
-		err = -ENOMEM;
-		goto abort;
-	}
-
-	esw->vports = kcalloc(total_vports, sizeof(struct mlx5_vport),
-			      GFP_KERNEL);
-	if (!esw->vports) {
-		err = -ENOMEM;
-		goto abort;
-	}
-
-	esw->offloads.vport_reps =
-		kzalloc(total_vports * sizeof(struct mlx5_eswitch_rep),
-			GFP_KERNEL);
-	if (!esw->offloads.vport_reps) {
-		err = -ENOMEM;
-		goto abort;
-	}
-
-	hash_init(esw->offloads.encap_tbl);
-	mutex_init(&esw->state_lock);
-
-	for (vport_num = 0; vport_num < total_vports; vport_num++) {
-		struct mlx5_vport *vport = &esw->vports[vport_num];
-
-		vport->vport = vport_num;
-		vport->info.link_state = MLX5_ESW_VPORT_ADMIN_STATE_AUTO;
-		vport->dev = dev;
-		INIT_WORK(&vport->vport_change_handler,
-			  esw_vport_change_handler);
-	}
-
-	esw->total_vports = total_vports;
-	esw->enabled_vports = 0;
-	esw->mode = SRIOV_NONE;
-	esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE;
-
-	dev->priv.eswitch = esw;
-	return 0;
-abort:
-	if (esw->work_queue)
-		destroy_workqueue(esw->work_queue);
-	kfree(esw->l2_table.bitmap);
-	kfree(esw->vports);
-	kfree(esw->offloads.vport_reps);
-	kfree(esw);
-	return err;
-}
-
-void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
-{
-	if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
-	    MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
-		return;
-
-	esw_info(esw->dev, "cleanup\n");
-
-	esw->dev->priv.eswitch = NULL;
-	destroy_workqueue(esw->work_queue);
-	kfree(esw->l2_table.bitmap);
-	kfree(esw->mc_promisc);
-	kfree(esw->offloads.vport_reps);
-	kfree(esw->vports);
-	kfree(esw);
-}
-
-void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe)
-{
-	struct mlx5_eqe_vport_change *vc_eqe = &eqe->data.vport_change;
-	u16 vport_num = be16_to_cpu(vc_eqe->vport_num);
-	struct mlx5_vport *vport;
-
-	if (!esw) {
-		pr_warn("MLX5 E-Switch: vport %d got an event while eswitch is not initialized\n",
-			vport_num);
-		return;
-	}
-
-	vport = &esw->vports[vport_num];
-	if (vport->enabled)
-		queue_work(esw->work_queue, &vport->vport_change_handler);
-}
-
-/* Vport Administration */
-#define ESW_ALLOWED(esw) \
-	(esw && MLX5_CAP_GEN(esw->dev, vport_group_manager) && mlx5_core_is_pf(esw->dev))
-#define LEGAL_VPORT(esw, vport) (vport >= 0 && vport < esw->total_vports)
-
-int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
-			       int vport, u8 mac[ETH_ALEN])
-{
-	struct mlx5_vport *evport;
-	u64 node_guid;
-	int err = 0;
-
-	if (!ESW_ALLOWED(esw))
-		return -EPERM;
-	if (!LEGAL_VPORT(esw, vport))
-		return -EINVAL;
-
-	mutex_lock(&esw->state_lock);
-	evport = &esw->vports[vport];
-
-	if (evport->info.spoofchk && !is_valid_ether_addr(mac)) {
-		mlx5_core_warn(esw->dev,
-			       "MAC invalidation is not allowed when spoofchk is on, vport(%d)\n",
-			       vport);
-		err = -EPERM;
-		goto unlock;
-	}
-
-	err = mlx5_modify_nic_vport_mac_address(esw->dev, vport, mac);
-	if (err) {
-		mlx5_core_warn(esw->dev,
-			       "Failed to mlx5_modify_nic_vport_mac vport(%d) err=(%d)\n",
-			       vport, err);
-		goto unlock;
-	}
-
-	node_guid_gen_from_mac(&node_guid, mac);
-	err = mlx5_modify_nic_vport_node_guid(esw->dev, vport, node_guid);
-	if (err)
-		mlx5_core_warn(esw->dev,
-			       "Failed to set vport %d node guid, err = %d. RDMA_CM will not function properly for this VF.\n",
-			       vport, err);
-
-	ether_addr_copy(evport->info.mac, mac);
-	evport->info.node_guid = node_guid;
-	if (evport->enabled && esw->mode == SRIOV_LEGACY)
-		err = esw_vport_ingress_config(esw, evport);
-
-unlock:
-	mutex_unlock(&esw->state_lock);
-	return err;
-}
-
-int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
-				 int vport, int link_state)
-{
-	struct mlx5_vport *evport;
-	int err = 0;
-
-	if (!ESW_ALLOWED(esw))
-		return -EPERM;
-	if (!LEGAL_VPORT(esw, vport))
-		return -EINVAL;
-
-	mutex_lock(&esw->state_lock);
-	evport = &esw->vports[vport];
-
-	err = mlx5_modify_vport_admin_state(esw->dev,
-					    MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
-					    vport, link_state);
-	if (err) {
-		mlx5_core_warn(esw->dev,
-			       "Failed to set vport %d link state, err = %d",
-			       vport, err);
-		goto unlock;
-	}
-
-	evport->info.link_state = link_state;
-
-unlock:
-	mutex_unlock(&esw->state_lock);
-	return 0;
-}
-
-int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
-				  int vport, struct ifla_vf_info *ivi)
-{
-	struct mlx5_vport *evport;
-
-	if (!ESW_ALLOWED(esw))
-		return -EPERM;
-	if (!LEGAL_VPORT(esw, vport))
-		return -EINVAL;
-
-	evport = &esw->vports[vport];
-
-	memset(ivi, 0, sizeof(*ivi));
-	ivi->vf = vport - 1;
-
-	mutex_lock(&esw->state_lock);
-	ether_addr_copy(ivi->mac, evport->info.mac);
-	ivi->linkstate = evport->info.link_state;
-	ivi->vlan = evport->info.vlan;
-	ivi->qos = evport->info.qos;
-	ivi->spoofchk = evport->info.spoofchk;
-	ivi->trusted = evport->info.trusted;
-	ivi->max_tx_rate = evport->info.max_rate;
-	mutex_unlock(&esw->state_lock);
-
-	return 0;
-}
-
-int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
-				  int vport, u16 vlan, u8 qos, u8 set_flags)
-{
-	struct mlx5_vport *evport;
-	int err = 0;
-
-	if (!ESW_ALLOWED(esw))
-		return -EPERM;
-	if (!LEGAL_VPORT(esw, vport) || (vlan > 4095) || (qos > 7))
-		return -EINVAL;
-
-	mutex_lock(&esw->state_lock);
-	evport = &esw->vports[vport];
-
-	err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set_flags);
-	if (err)
-		goto unlock;
-
-	evport->info.vlan = vlan;
-	evport->info.qos = qos;
-	if (evport->enabled && esw->mode == SRIOV_LEGACY) {
-		err = esw_vport_ingress_config(esw, evport);
-		if (err)
-			goto unlock;
-		err = esw_vport_egress_config(esw, evport);
-	}
-
-unlock:
-	mutex_unlock(&esw->state_lock);
-	return err;
-}
-
-int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
-				int vport, u16 vlan, u8 qos)
-{
-	u8 set_flags = 0;
-
-	if (vlan || qos)
-		set_flags = SET_VLAN_STRIP | SET_VLAN_INSERT;
-
-	return __mlx5_eswitch_set_vport_vlan(esw, vport, vlan, qos, set_flags);
-}
-
-int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
-				    int vport, bool spoofchk)
-{
-	struct mlx5_vport *evport;
-	bool pschk;
-	int err = 0;
-
-	if (!ESW_ALLOWED(esw))
-		return -EPERM;
-	if (!LEGAL_VPORT(esw, vport))
-		return -EINVAL;
-
-	mutex_lock(&esw->state_lock);
-	evport = &esw->vports[vport];
-	pschk = evport->info.spoofchk;
-	evport->info.spoofchk = spoofchk;
-	if (evport->enabled && esw->mode == SRIOV_LEGACY)
-		err = esw_vport_ingress_config(esw, evport);
-	if (err)
-		evport->info.spoofchk = pschk;
-	mutex_unlock(&esw->state_lock);
-
-	return err;
-}
-
-int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
-				 int vport, bool setting)
-{
-	struct mlx5_vport *evport;
-
-	if (!ESW_ALLOWED(esw))
-		return -EPERM;
-	if (!LEGAL_VPORT(esw, vport))
-		return -EINVAL;
-
-	mutex_lock(&esw->state_lock);
-	evport = &esw->vports[vport];
-	evport->info.trusted = setting;
-	if (evport->enabled)
-		esw_vport_change_handle_locked(evport);
-	mutex_unlock(&esw->state_lock);
-
-	return 0;
-}
-
-int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw,
-				int vport, u32 max_rate)
-{
-	struct mlx5_vport *evport;
-	int err = 0;
-
-	if (!ESW_ALLOWED(esw))
-		return -EPERM;
-	if (!LEGAL_VPORT(esw, vport))
-		return -EINVAL;
-
-	mutex_lock(&esw->state_lock);
-	evport = &esw->vports[vport];
-	err = esw_vport_qos_config(esw, vport, max_rate);
-	if (!err)
-		evport->info.max_rate = max_rate;
-
-	mutex_unlock(&esw->state_lock);
-	return err;
-}
-
-int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
-				 int vport,
-				 struct ifla_vf_stats *vf_stats)
-{
-	int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
-	u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
-	int err = 0;
-	u32 *out;
-
-	if (!ESW_ALLOWED(esw))
-		return -EPERM;
-	if (!LEGAL_VPORT(esw, vport))
-		return -EINVAL;
-
-	out = mlx5_vzalloc(outlen);
-	if (!out)
-		return -ENOMEM;
-
-	MLX5_SET(query_vport_counter_in, in, opcode,
-		 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
-	MLX5_SET(query_vport_counter_in, in, op_mod, 0);
-	MLX5_SET(query_vport_counter_in, in, vport_number, vport);
-	if (vport)
-		MLX5_SET(query_vport_counter_in, in, other_vport, 1);
-
-	memset(out, 0, outlen);
-	err = mlx5_cmd_exec(esw->dev, in, sizeof(in), out, outlen);
-	if (err)
-		goto free_out;
-
-	#define MLX5_GET_CTR(p, x) \
-		MLX5_GET64(query_vport_counter_out, p, x)
-
-	memset(vf_stats, 0, sizeof(*vf_stats));
-	vf_stats->rx_packets =
-		MLX5_GET_CTR(out, received_eth_unicast.packets) +
-		MLX5_GET_CTR(out, received_eth_multicast.packets) +
-		MLX5_GET_CTR(out, received_eth_broadcast.packets);
-
-	vf_stats->rx_bytes =
-		MLX5_GET_CTR(out, received_eth_unicast.octets) +
-		MLX5_GET_CTR(out, received_eth_multicast.octets) +
-		MLX5_GET_CTR(out, received_eth_broadcast.octets);
-
-	vf_stats->tx_packets =
-		MLX5_GET_CTR(out, transmitted_eth_unicast.packets) +
-		MLX5_GET_CTR(out, transmitted_eth_multicast.packets) +
-		MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
-
-	vf_stats->tx_bytes =
-		MLX5_GET_CTR(out, transmitted_eth_unicast.octets) +
-		MLX5_GET_CTR(out, transmitted_eth_multicast.octets) +
-		MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
-
-	vf_stats->multicast =
-		MLX5_GET_CTR(out, received_eth_multicast.packets);
-
-	vf_stats->broadcast =
-		MLX5_GET_CTR(out, received_eth_broadcast.packets);
-
-free_out:
-	kvfree(out);
-	return err;
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
deleted file mode 100644
index 8661dd3..0000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ /dev/null
@@ -1,340 +0,0 @@
-/*
- * Copyright (c) 2015, Mellanox Technologies, Ltd.  All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef __MLX5_ESWITCH_H__
-#define __MLX5_ESWITCH_H__
-
-#include <linux/if_ether.h>
-#include <linux/if_link.h>
-#include <net/devlink.h>
-#include <linux/mlx5/device.h>
-
-#define MLX5_MAX_UC_PER_VPORT(dev) \
-	(1 << MLX5_CAP_GEN(dev, log_max_current_uc_list))
-
-#define MLX5_MAX_MC_PER_VPORT(dev) \
-	(1 << MLX5_CAP_GEN(dev, log_max_current_mc_list))
-
-#define MLX5_L2_ADDR_HASH_SIZE (BIT(BITS_PER_BYTE))
-#define MLX5_L2_ADDR_HASH(addr) (addr[5])
-
-#define FDB_UPLINK_VPORT 0xffff
-
-/* L2 -mac address based- hash helpers */
-struct l2addr_node {
-	struct hlist_node hlist;
-	u8                addr[ETH_ALEN];
-};
-
-#define for_each_l2hash_node(hn, tmp, hash, i) \
-	for (i = 0; i < MLX5_L2_ADDR_HASH_SIZE; i++) \
-		hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
-
-#define l2addr_hash_find(hash, mac, type) ({                \
-	int ix = MLX5_L2_ADDR_HASH(mac);                    \
-	bool found = false;                                 \
-	type *ptr = NULL;                                   \
-							    \
-	hlist_for_each_entry(ptr, &hash[ix], node.hlist)    \
-		if (ether_addr_equal(ptr->node.addr, mac)) {\
-			found = true;                       \
-			break;                              \
-		}                                           \
-	if (!found)                                         \
-		ptr = NULL;                                 \
-	ptr;                                                \
-})
-
-#define l2addr_hash_add(hash, mac, type, gfp) ({            \
-	int ix = MLX5_L2_ADDR_HASH(mac);                    \
-	type *ptr = NULL;                                   \
-							    \
-	ptr = kzalloc(sizeof(type), gfp);                   \
-	if (ptr) {                                          \
-		ether_addr_copy(ptr->node.addr, mac);       \
-		hlist_add_head(&ptr->node.hlist, &hash[ix]);\
-	}                                                   \
-	ptr;                                                \
-})
-
-#define l2addr_hash_del(ptr) ({                             \
-	hlist_del(&ptr->node.hlist);                        \
-	kfree(ptr);                                         \
-})
-
-struct vport_ingress {
-	struct mlx5_flow_table *acl;
-	struct mlx5_flow_group *allow_untagged_spoofchk_grp;
-	struct mlx5_flow_group *allow_spoofchk_only_grp;
-	struct mlx5_flow_group *allow_untagged_only_grp;
-	struct mlx5_flow_group *drop_grp;
-	struct mlx5_flow_handle  *allow_rule;
-	struct mlx5_flow_handle  *drop_rule;
-};
-
-struct vport_egress {
-	struct mlx5_flow_table *acl;
-	struct mlx5_flow_group *allowed_vlans_grp;
-	struct mlx5_flow_group *drop_grp;
-	struct mlx5_flow_handle  *allowed_vlan;
-	struct mlx5_flow_handle  *drop_rule;
-};
-
-struct mlx5_vport_info {
-	u8                      mac[ETH_ALEN];
-	u16                     vlan;
-	u8                      qos;
-	u64                     node_guid;
-	int                     link_state;
-	u32                     max_rate;
-	bool                    spoofchk;
-	bool                    trusted;
-};
-
-struct mlx5_vport {
-	struct mlx5_core_dev    *dev;
-	int                     vport;
-	struct hlist_head       uc_list[MLX5_L2_ADDR_HASH_SIZE];
-	struct hlist_head       mc_list[MLX5_L2_ADDR_HASH_SIZE];
-	struct mlx5_flow_handle *promisc_rule;
-	struct mlx5_flow_handle *allmulti_rule;
-	struct work_struct      vport_change_handler;
-
-	struct vport_ingress    ingress;
-	struct vport_egress     egress;
-
-	struct mlx5_vport_info  info;
-
-	struct {
-		bool            enabled;
-		u32             esw_tsar_ix;
-	} qos;
-
-	bool                    enabled;
-	u16                     enabled_events;
-};
-
-struct mlx5_l2_table {
-	struct hlist_head l2_hash[MLX5_L2_ADDR_HASH_SIZE];
-	u32                  size;
-	unsigned long        *bitmap;
-};
-
-struct mlx5_eswitch_fdb {
-	void *fdb;
-	union {
-		struct legacy_fdb {
-			struct mlx5_flow_group *addr_grp;
-			struct mlx5_flow_group *allmulti_grp;
-			struct mlx5_flow_group *promisc_grp;
-		} legacy;
-
-		struct offloads_fdb {
-			struct mlx5_flow_table *fdb;
-			struct mlx5_flow_group *send_to_vport_grp;
-			struct mlx5_flow_group *miss_grp;
-			struct mlx5_flow_handle *miss_rule;
-			int vlan_push_pop_refcount;
-		} offloads;
-	};
-};
-
-enum {
-	SRIOV_NONE,
-	SRIOV_LEGACY,
-	SRIOV_OFFLOADS
-};
-
-struct mlx5_esw_sq {
-	struct mlx5_flow_handle	*send_to_vport_rule;
-	struct list_head	 list;
-};
-
-struct mlx5_eswitch_rep {
-	int		       (*load)(struct mlx5_eswitch *esw,
-				       struct mlx5_eswitch_rep *rep);
-	void		       (*unload)(struct mlx5_eswitch *esw,
-					 struct mlx5_eswitch_rep *rep);
-	u16		       vport;
-	u8		       hw_id[ETH_ALEN];
-	struct net_device      *netdev;
-
-	struct mlx5_flow_handle *vport_rx_rule;
-	struct list_head       vport_sqs_list;
-	u16		       vlan;
-	u32		       vlan_refcount;
-	bool		       valid;
-};
-
-struct mlx5_esw_offload {
-	struct mlx5_flow_table *ft_offloads;
-	struct mlx5_flow_group *vport_rx_group;
-	struct mlx5_eswitch_rep *vport_reps;
-	DECLARE_HASHTABLE(encap_tbl, 8);
-	u8 inline_mode;
-};
-
-struct mlx5_eswitch {
-	struct mlx5_core_dev    *dev;
-	struct mlx5_l2_table    l2_table;
-	struct mlx5_eswitch_fdb fdb_table;
-	struct hlist_head       mc_table[MLX5_L2_ADDR_HASH_SIZE];
-	struct workqueue_struct *work_queue;
-	struct mlx5_vport       *vports;
-	int                     total_vports;
-	int                     enabled_vports;
-	/* Synchronize between vport change events
-	 * and async SRIOV admin state changes
-	 */
-	struct mutex            state_lock;
-	struct esw_mc_addr      *mc_promisc;
-
-	struct {
-		bool            enabled;
-		u32             root_tsar_id;
-	} qos;
-
-	struct mlx5_esw_offload offloads;
-	int                     mode;
-};
-
-void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports);
-int esw_offloads_init(struct mlx5_eswitch *esw, int nvports);
-
-/* E-Switch API */
-int mlx5_eswitch_init(struct mlx5_core_dev *dev);
-void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw);
-void mlx5_eswitch_attach(struct mlx5_eswitch *esw);
-void mlx5_eswitch_detach(struct mlx5_eswitch *esw);
-void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe);
-int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode);
-void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw);
-int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
-			       int vport, u8 mac[ETH_ALEN]);
-int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
-				 int vport, int link_state);
-int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
-				int vport, u16 vlan, u8 qos);
-int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
-				    int vport, bool spoofchk);
-int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
-				 int vport_num, bool setting);
-int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw,
-				int vport, u32 max_rate);
-int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
-				  int vport, struct ifla_vf_info *ivi);
-int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
-				 int vport,
-				 struct ifla_vf_stats *vf_stats);
-
-struct mlx5_flow_spec;
-struct mlx5_esw_flow_attr;
-
-struct mlx5_flow_handle *
-mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
-				struct mlx5_flow_spec *spec,
-				struct mlx5_esw_flow_attr *attr);
-struct mlx5_flow_handle *
-mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn);
-
-enum {
-	SET_VLAN_STRIP	= BIT(0),
-	SET_VLAN_INSERT	= BIT(1)
-};
-
-#define MLX5_FLOW_CONTEXT_ACTION_VLAN_POP  0x40
-#define MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH 0x80
-
-struct mlx5_encap_info {
-	__be32 daddr;
-	__be32 tun_id;
-	__be16 tp_dst;
-};
-
-struct mlx5_encap_entry {
-	struct hlist_node encap_hlist;
-	struct list_head flows;
-	u32 encap_id;
-	struct neighbour *n;
-	struct mlx5_encap_info tun_info;
-	unsigned char h_dest[ETH_ALEN];	/* destination eth addr	*/
-
-	struct net_device *out_dev;
-	int tunnel_type;
-};
-
-struct mlx5_esw_flow_attr {
-	struct mlx5_eswitch_rep *in_rep;
-	struct mlx5_eswitch_rep *out_rep;
-
-	int	action;
-	u16	vlan;
-	bool	vlan_handled;
-	struct mlx5_encap_entry *encap;
-};
-
-int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
-				 struct mlx5_eswitch_rep *rep,
-				 u16 *sqns_array, int sqns_num);
-void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw,
-				 struct mlx5_eswitch_rep *rep);
-
-int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode);
-int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode);
-int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode);
-int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode);
-int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode);
-void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
-				     int vport_index,
-				     struct mlx5_eswitch_rep *rep);
-void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
-				       int vport_index);
-struct net_device *mlx5_eswitch_get_uplink_netdev(struct mlx5_eswitch *esw);
-
-int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
-				 struct mlx5_esw_flow_attr *attr);
-int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
-				 struct mlx5_esw_flow_attr *attr);
-int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
-				  int vport, u16 vlan, u8 qos, u8 set_flags);
-
-#define MLX5_DEBUG_ESWITCH_MASK BIT(3)
-
-#define esw_info(dev, format, ...)				\
-	pr_info("(%s): E-Switch: " format, (dev)->priv.name, ##__VA_ARGS__)
-
-#define esw_warn(dev, format, ...)				\
-	pr_warn("(%s): E-Switch: " format, (dev)->priv.name, ##__VA_ARGS__)
-
-#define esw_debug(dev, format, ...)				\
-	mlx5_core_dbg_mask(dev, MLX5_DEBUG_ESWITCH_MASK, format, ##__VA_ARGS__)
-#endif /* __MLX5_ESWITCH_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
deleted file mode 100644
index 466e161..0000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ /dev/null
@@ -1,1002 +0,0 @@
-/*
- * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/etherdevice.h>
-#include <linux/mlx5/driver.h>
-#include <linux/mlx5/mlx5_ifc.h>
-#include <linux/mlx5/vport.h>
-#include <linux/mlx5/fs.h>
-#include "mlx5_core.h"
-#include "eswitch.h"
-
-enum {
-	FDB_FAST_PATH = 0,
-	FDB_SLOW_PATH
-};
-
-struct mlx5_flow_handle *
-mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
-				struct mlx5_flow_spec *spec,
-				struct mlx5_esw_flow_attr *attr)
-{
-	struct mlx5_flow_destination dest[2] = {};
-	struct mlx5_flow_act flow_act = {0};
-	struct mlx5_fc *counter = NULL;
-	struct mlx5_flow_handle *rule;
-	void *misc;
-	int i = 0;
-
-	if (esw->mode != SRIOV_OFFLOADS)
-		return ERR_PTR(-EOPNOTSUPP);
-
-	/* per flow vlan pop/push is emulated, don't set that into the firmware */
-	flow_act.action = attr->action & ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH | MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
-
-	if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
-		dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
-		dest[i].vport_num = attr->out_rep->vport;
-		i++;
-	}
-	if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
-		counter = mlx5_fc_create(esw->dev, true);
-		if (IS_ERR(counter))
-			return ERR_CAST(counter);
-		dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
-		dest[i].counter = counter;
-		i++;
-	}
-
-	misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
-	MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
-
-	misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
-	MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
-
-	spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
-				      MLX5_MATCH_MISC_PARAMETERS;
-	if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
-		spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
-
-	if (attr->encap)
-		flow_act.encap_id = attr->encap->encap_id;
-
-	rule = mlx5_add_flow_rules((struct mlx5_flow_table *)esw->fdb_table.fdb,
-				   spec, &flow_act, dest, i);
-	if (IS_ERR(rule))
-		mlx5_fc_destroy(esw->dev, counter);
-
-	return rule;
-}
-
-static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
-{
-	struct mlx5_eswitch_rep *rep;
-	int vf_vport, err = 0;
-
-	esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
-	for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) {
-		rep = &esw->offloads.vport_reps[vf_vport];
-		if (!rep->valid)
-			continue;
-
-		err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
-		if (err)
-			goto out;
-	}
-
-out:
-	return err;
-}
-
-static struct mlx5_eswitch_rep *
-esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
-{
-	struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
-
-	in_rep  = attr->in_rep;
-	out_rep = attr->out_rep;
-
-	if (push)
-		vport = in_rep;
-	else if (pop)
-		vport = out_rep;
-	else
-		vport = in_rep;
-
-	return vport;
-}
-
-static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
-				     bool push, bool pop, bool fwd)
-{
-	struct mlx5_eswitch_rep *in_rep, *out_rep;
-
-	if ((push || pop) && !fwd)
-		goto out_notsupp;
-
-	in_rep  = attr->in_rep;
-	out_rep = attr->out_rep;
-
-	if (push && in_rep->vport == FDB_UPLINK_VPORT)
-		goto out_notsupp;
-
-	if (pop && out_rep->vport == FDB_UPLINK_VPORT)
-		goto out_notsupp;
-
-	/* vport has vlan push configured, can't offload VF --> wire rules w.o it */
-	if (!push && !pop && fwd)
-		if (in_rep->vlan && out_rep->vport == FDB_UPLINK_VPORT)
-			goto out_notsupp;
-
-	/* protects against (1) setting rules with different vlans to push and
-	 * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
-	 */
-	if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan))
-		goto out_notsupp;
-
-	return 0;
-
-out_notsupp:
-	return -ENOTSUPP;
-}
-
-int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
-				 struct mlx5_esw_flow_attr *attr)
-{
-	struct offloads_fdb *offloads = &esw->fdb_table.offloads;
-	struct mlx5_eswitch_rep *vport = NULL;
-	bool push, pop, fwd;
-	int err = 0;
-
-	push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
-	pop  = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
-	fwd  = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
-
-	err = esw_add_vlan_action_check(attr, push, pop, fwd);
-	if (err)
-		return err;
-
-	attr->vlan_handled = false;
-
-	vport = esw_vlan_action_get_vport(attr, push, pop);
-
-	if (!push && !pop && fwd) {
-		/* tracks VF --> wire rules without vlan push action */
-		if (attr->out_rep->vport == FDB_UPLINK_VPORT) {
-			vport->vlan_refcount++;
-			attr->vlan_handled = true;
-		}
-
-		return 0;
-	}
-
-	if (!push && !pop)
-		return 0;
-
-	if (!(offloads->vlan_push_pop_refcount)) {
-		/* it's the 1st vlan rule, apply global vlan pop policy */
-		err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
-		if (err)
-			goto out;
-	}
-	offloads->vlan_push_pop_refcount++;
-
-	if (push) {
-		if (vport->vlan_refcount)
-			goto skip_set_push;
-
-		err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan, 0,
-						    SET_VLAN_INSERT | SET_VLAN_STRIP);
-		if (err)
-			goto out;
-		vport->vlan = attr->vlan;
-skip_set_push:
-		vport->vlan_refcount++;
-	}
-out:
-	if (!err)
-		attr->vlan_handled = true;
-	return err;
-}
-
-int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
-				 struct mlx5_esw_flow_attr *attr)
-{
-	struct offloads_fdb *offloads = &esw->fdb_table.offloads;
-	struct mlx5_eswitch_rep *vport = NULL;
-	bool push, pop, fwd;
-	int err = 0;
-
-	if (!attr->vlan_handled)
-		return 0;
-
-	push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
-	pop  = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
-	fwd  = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
-
-	vport = esw_vlan_action_get_vport(attr, push, pop);
-
-	if (!push && !pop && fwd) {
-		/* tracks VF --> wire rules without vlan push action */
-		if (attr->out_rep->vport == FDB_UPLINK_VPORT)
-			vport->vlan_refcount--;
-
-		return 0;
-	}
-
-	if (push) {
-		vport->vlan_refcount--;
-		if (vport->vlan_refcount)
-			goto skip_unset_push;
-
-		vport->vlan = 0;
-		err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
-						    0, 0, SET_VLAN_STRIP);
-		if (err)
-			goto out;
-	}
-
-skip_unset_push:
-	offloads->vlan_push_pop_refcount--;
-	if (offloads->vlan_push_pop_refcount)
-		return 0;
-
-	/* no more vlan rules, stop global vlan pop policy */
-	err = esw_set_global_vlan_pop(esw, 0);
-
-out:
-	return err;
-}
-
-static struct mlx5_flow_handle *
-mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
-{
-	struct mlx5_flow_act flow_act = {0};
-	struct mlx5_flow_destination dest;
-	struct mlx5_flow_handle *flow_rule;
-	struct mlx5_flow_spec *spec;
-	void *misc;
-
-	spec = mlx5_vzalloc(sizeof(*spec));
-	if (!spec) {
-		esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n");
-		flow_rule = ERR_PTR(-ENOMEM);
-		goto out;
-	}
-
-	misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
-	MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
-	MLX5_SET(fte_match_set_misc, misc, source_port, 0x0); /* source vport is 0 */
-
-	misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
-	MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
-	MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
-
-	spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
-	dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
-	dest.vport_num = vport;
-	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
-
-	flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec,
-					&flow_act, &dest, 1);
-	if (IS_ERR(flow_rule))
-		esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
-out:
-	kvfree(spec);
-	return flow_rule;
-}
-
-void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw,
-				 struct mlx5_eswitch_rep *rep)
-{
-	struct mlx5_esw_sq *esw_sq, *tmp;
-
-	if (esw->mode != SRIOV_OFFLOADS)
-		return;
-
-	list_for_each_entry_safe(esw_sq, tmp, &rep->vport_sqs_list, list) {
-		mlx5_del_flow_rules(esw_sq->send_to_vport_rule);
-		list_del(&esw_sq->list);
-		kfree(esw_sq);
-	}
-}
-
-int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
-				 struct mlx5_eswitch_rep *rep,
-				 u16 *sqns_array, int sqns_num)
-{
-	struct mlx5_flow_handle *flow_rule;
-	struct mlx5_esw_sq *esw_sq;
-	int err;
-	int i;
-
-	if (esw->mode != SRIOV_OFFLOADS)
-		return 0;
-
-	for (i = 0; i < sqns_num; i++) {
-		esw_sq = kzalloc(sizeof(*esw_sq), GFP_KERNEL);
-		if (!esw_sq) {
-			err = -ENOMEM;
-			goto out_err;
-		}
-
-		/* Add re-inject rule to the PF/representor sqs */
-		flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw,
-								rep->vport,
-								sqns_array[i]);
-		if (IS_ERR(flow_rule)) {
-			err = PTR_ERR(flow_rule);
-			kfree(esw_sq);
-			goto out_err;
-		}
-		esw_sq->send_to_vport_rule = flow_rule;
-		list_add(&esw_sq->list, &rep->vport_sqs_list);
-	}
-	return 0;
-
-out_err:
-	mlx5_eswitch_sqs2vport_stop(esw, rep);
-	return err;
-}
-
-static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
-{
-	struct mlx5_flow_act flow_act = {0};
-	struct mlx5_flow_destination dest;
-	struct mlx5_flow_handle *flow_rule = NULL;
-	struct mlx5_flow_spec *spec;
-	int err = 0;
-
-	spec = mlx5_vzalloc(sizeof(*spec));
-	if (!spec) {
-		esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n");
-		err = -ENOMEM;
-		goto out;
-	}
-
-	dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
-	dest.vport_num = 0;
-	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
-
-	flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec,
-					&flow_act, &dest, 1);
-	if (IS_ERR(flow_rule)) {
-		err = PTR_ERR(flow_rule);
-		esw_warn(esw->dev,  "FDB: Failed to add miss flow rule err %d\n", err);
-		goto out;
-	}
-
-	esw->fdb_table.offloads.miss_rule = flow_rule;
-out:
-	kvfree(spec);
-	return err;
-}
-
-#define MAX_PF_SQ 256
-#define ESW_OFFLOADS_NUM_ENTRIES (1 << 13) /* 8K */
-#define ESW_OFFLOADS_NUM_GROUPS  4
-
-static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports)
-{
-	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
-	struct mlx5_core_dev *dev = esw->dev;
-	struct mlx5_flow_namespace *root_ns;
-	struct mlx5_flow_table *fdb = NULL;
-	struct mlx5_flow_group *g;
-	u32 *flow_group_in;
-	void *match_criteria;
-	int table_size, ix, err = 0;
-	u32 flags = 0;
-
-	flow_group_in = mlx5_vzalloc(inlen);
-	if (!flow_group_in)
-		return -ENOMEM;
-
-	root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
-	if (!root_ns) {
-		esw_warn(dev, "Failed to get FDB flow namespace\n");
-		goto ns_err;
-	}
-
-	esw_debug(dev, "Create offloads FDB table, log_max_size(%d)\n",
-		  MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
-
-	if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) &&
-	    MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))
-		flags |= MLX5_FLOW_TABLE_TUNNEL_EN;
-
-	fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH,
-						  ESW_OFFLOADS_NUM_ENTRIES,
-						  ESW_OFFLOADS_NUM_GROUPS, 0,
-						  flags);
-	if (IS_ERR(fdb)) {
-		err = PTR_ERR(fdb);
-		esw_warn(dev, "Failed to create Fast path FDB Table err %d\n", err);
-		goto fast_fdb_err;
-	}
-	esw->fdb_table.fdb = fdb;
-
-	table_size = nvports + MAX_PF_SQ + 1;
-	fdb = mlx5_create_flow_table(root_ns, FDB_SLOW_PATH, table_size, 0, 0);
-	if (IS_ERR(fdb)) {
-		err = PTR_ERR(fdb);
-		esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
-		goto slow_fdb_err;
-	}
-	esw->fdb_table.offloads.fdb = fdb;
-
-	/* create send-to-vport group */
-	memset(flow_group_in, 0, inlen);
-	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
-		 MLX5_MATCH_MISC_PARAMETERS);
-
-	match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
-
-	MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
-	MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
-
-	ix = nvports + MAX_PF_SQ;
-	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
-	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
-
-	g = mlx5_create_flow_group(fdb, flow_group_in);
-	if (IS_ERR(g)) {
-		err = PTR_ERR(g);
-		esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
-		goto send_vport_err;
-	}
-	esw->fdb_table.offloads.send_to_vport_grp = g;
-
-	/* create miss group */
-	memset(flow_group_in, 0, inlen);
-	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 0);
-
-	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
-	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + 1);
-
-	g = mlx5_create_flow_group(fdb, flow_group_in);
-	if (IS_ERR(g)) {
-		err = PTR_ERR(g);
-		esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
-		goto miss_err;
-	}
-	esw->fdb_table.offloads.miss_grp = g;
-
-	err = esw_add_fdb_miss_rule(esw);
-	if (err)
-		goto miss_rule_err;
-
-	return 0;
-
-miss_rule_err:
-	mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
-miss_err:
-	mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
-send_vport_err:
-	mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb);
-slow_fdb_err:
-	mlx5_destroy_flow_table(esw->fdb_table.fdb);
-fast_fdb_err:
-ns_err:
-	kvfree(flow_group_in);
-	return err;
-}
-
-static void esw_destroy_offloads_fdb_table(struct mlx5_eswitch *esw)
-{
-	if (!esw->fdb_table.fdb)
-		return;
-
-	esw_debug(esw->dev, "Destroy offloads FDB Table\n");
-	mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule);
-	mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
-	mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
-
-	mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb);
-	mlx5_destroy_flow_table(esw->fdb_table.fdb);
-}
-
-static int esw_create_offloads_table(struct mlx5_eswitch *esw)
-{
-	struct mlx5_flow_namespace *ns;
-	struct mlx5_flow_table *ft_offloads;
-	struct mlx5_core_dev *dev = esw->dev;
-	int err = 0;
-
-	ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
-	if (!ns) {
-		esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
-		return -ENOMEM;
-	}
-
-	ft_offloads = mlx5_create_flow_table(ns, 0, dev->priv.sriov.num_vfs + 2, 0, 0);
-	if (IS_ERR(ft_offloads)) {
-		err = PTR_ERR(ft_offloads);
-		esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
-		return err;
-	}
-
-	esw->offloads.ft_offloads = ft_offloads;
-	return 0;
-}
-
-static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
-{
-	struct mlx5_esw_offload *offloads = &esw->offloads;
-
-	mlx5_destroy_flow_table(offloads->ft_offloads);
-}
-
-static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
-{
-	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
-	struct mlx5_flow_group *g;
-	struct mlx5_priv *priv = &esw->dev->priv;
-	u32 *flow_group_in;
-	void *match_criteria, *misc;
-	int err = 0;
-	int nvports = priv->sriov.num_vfs + 2;
-
-	flow_group_in = mlx5_vzalloc(inlen);
-	if (!flow_group_in)
-		return -ENOMEM;
-
-	/* create vport rx group */
-	memset(flow_group_in, 0, inlen);
-	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
-		 MLX5_MATCH_MISC_PARAMETERS);
-
-	match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
-	misc = MLX5_ADDR_OF(fte_match_param, match_criteria, misc_parameters);
-	MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
-
-	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
-	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
-
-	g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
-
-	if (IS_ERR(g)) {
-		err = PTR_ERR(g);
-		mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
-		goto out;
-	}
-
-	esw->offloads.vport_rx_group = g;
-out:
-	kfree(flow_group_in);
-	return err;
-}
-
-static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
-{
-	mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
-}
-
-struct mlx5_flow_handle *
-mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn)
-{
-	struct mlx5_flow_act flow_act = {0};
-	struct mlx5_flow_destination dest;
-	struct mlx5_flow_handle *flow_rule;
-	struct mlx5_flow_spec *spec;
-	void *misc;
-
-	spec = mlx5_vzalloc(sizeof(*spec));
-	if (!spec) {
-		esw_warn(esw->dev, "Failed to alloc match parameters\n");
-		flow_rule = ERR_PTR(-ENOMEM);
-		goto out;
-	}
-
-	misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
-	MLX5_SET(fte_match_set_misc, misc, source_port, vport);
-
-	misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
-	MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
-
-	spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
-	dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
-	dest.tir_num = tirn;
-
-	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
-	flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
-				       &flow_act, &dest, 1);
-	if (IS_ERR(flow_rule)) {
-		esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
-		goto out;
-	}
-
-out:
-	kvfree(spec);
-	return flow_rule;
-}
-
-static int esw_offloads_start(struct mlx5_eswitch *esw)
-{
-	int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
-
-	if (esw->mode != SRIOV_LEGACY) {
-		esw_warn(esw->dev, "Can't set offloads mode, SRIOV legacy not enabled\n");
-		return -EINVAL;
-	}
-
-	mlx5_eswitch_disable_sriov(esw);
-	err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
-	if (err) {
-		esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err);
-		err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
-		if (err1)
-			esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err);
-	}
-	if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
-		if (mlx5_eswitch_inline_mode_get(esw,
-						 num_vfs,
-						 &esw->offloads.inline_mode)) {
-			esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
-			esw_warn(esw->dev, "Inline mode is different between vports\n");
-		}
-	}
-	return err;
-}
-
-int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
-{
-	struct mlx5_eswitch_rep *rep;
-	int vport;
-	int err;
-
-	err = esw_create_offloads_fdb_table(esw, nvports);
-	if (err)
-		return err;
-
-	err = esw_create_offloads_table(esw);
-	if (err)
-		goto create_ft_err;
-
-	err = esw_create_vport_rx_group(esw);
-	if (err)
-		goto create_fg_err;
-
-	for (vport = 0; vport < nvports; vport++) {
-		rep = &esw->offloads.vport_reps[vport];
-		if (!rep->valid)
-			continue;
-
-		err = rep->load(esw, rep);
-		if (err)
-			goto err_reps;
-	}
-	return 0;
-
-err_reps:
-	for (vport--; vport >= 0; vport--) {
-		rep = &esw->offloads.vport_reps[vport];
-		if (!rep->valid)
-			continue;
-		rep->unload(esw, rep);
-	}
-	esw_destroy_vport_rx_group(esw);
-
-create_fg_err:
-	esw_destroy_offloads_table(esw);
-
-create_ft_err:
-	esw_destroy_offloads_fdb_table(esw);
-	return err;
-}
-
-static int esw_offloads_stop(struct mlx5_eswitch *esw)
-{
-	int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
-
-	mlx5_eswitch_disable_sriov(esw);
-	err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
-	if (err) {
-		esw_warn(esw->dev, "Failed setting eswitch to legacy, err %d\n", err);
-		err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
-		if (err1)
-			esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err);
-	}
-
-	return err;
-}
-
-void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
-{
-	struct mlx5_eswitch_rep *rep;
-	int vport;
-
-	for (vport = 0; vport < nvports; vport++) {
-		rep = &esw->offloads.vport_reps[vport];
-		if (!rep->valid)
-			continue;
-		rep->unload(esw, rep);
-	}
-
-	esw_destroy_vport_rx_group(esw);
-	esw_destroy_offloads_table(esw);
-	esw_destroy_offloads_fdb_table(esw);
-}
-
-static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
-{
-	switch (mode) {
-	case DEVLINK_ESWITCH_MODE_LEGACY:
-		*mlx5_mode = SRIOV_LEGACY;
-		break;
-	case DEVLINK_ESWITCH_MODE_SWITCHDEV:
-		*mlx5_mode = SRIOV_OFFLOADS;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
-{
-	switch (mlx5_mode) {
-	case SRIOV_LEGACY:
-		*mode = DEVLINK_ESWITCH_MODE_LEGACY;
-		break;
-	case SRIOV_OFFLOADS:
-		*mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
-{
-	switch (mode) {
-	case DEVLINK_ESWITCH_INLINE_MODE_NONE:
-		*mlx5_mode = MLX5_INLINE_MODE_NONE;
-		break;
-	case DEVLINK_ESWITCH_INLINE_MODE_LINK:
-		*mlx5_mode = MLX5_INLINE_MODE_L2;
-		break;
-	case DEVLINK_ESWITCH_INLINE_MODE_NETWORK:
-		*mlx5_mode = MLX5_INLINE_MODE_IP;
-		break;
-	case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT:
-		*mlx5_mode = MLX5_INLINE_MODE_TCP_UDP;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
-{
-	switch (mlx5_mode) {
-	case MLX5_INLINE_MODE_NONE:
-		*mode = DEVLINK_ESWITCH_INLINE_MODE_NONE;
-		break;
-	case MLX5_INLINE_MODE_L2:
-		*mode = DEVLINK_ESWITCH_INLINE_MODE_LINK;
-		break;
-	case MLX5_INLINE_MODE_IP:
-		*mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK;
-		break;
-	case MLX5_INLINE_MODE_TCP_UDP:
-		*mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
-{
-	struct mlx5_core_dev *dev;
-	u16 cur_mlx5_mode, mlx5_mode = 0;
-
-	dev = devlink_priv(devlink);
-
-	if (!MLX5_CAP_GEN(dev, vport_group_manager))
-		return -EOPNOTSUPP;
-
-	cur_mlx5_mode = dev->priv.eswitch->mode;
-
-	if (cur_mlx5_mode == SRIOV_NONE)
-		return -EOPNOTSUPP;
-
-	if (esw_mode_from_devlink(mode, &mlx5_mode))
-		return -EINVAL;
-
-	if (cur_mlx5_mode == mlx5_mode)
-		return 0;
-
-	if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
-		return esw_offloads_start(dev->priv.eswitch);
-	else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
-		return esw_offloads_stop(dev->priv.eswitch);
-	else
-		return -EINVAL;
-}
-
-int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
-{
-	struct mlx5_core_dev *dev;
-
-	dev = devlink_priv(devlink);
-
-	if (!MLX5_CAP_GEN(dev, vport_group_manager))
-		return -EOPNOTSUPP;
-
-	if (dev->priv.eswitch->mode == SRIOV_NONE)
-		return -EOPNOTSUPP;
-
-	return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
-}
-
-int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
-{
-	struct mlx5_core_dev *dev = devlink_priv(devlink);
-	struct mlx5_eswitch *esw = dev->priv.eswitch;
-	int num_vports = esw->enabled_vports;
-	int err;
-	int vport;
-	u8 mlx5_mode;
-
-	if (!MLX5_CAP_GEN(dev, vport_group_manager))
-		return -EOPNOTSUPP;
-
-	if (esw->mode == SRIOV_NONE)
-		return -EOPNOTSUPP;
-
-	if (MLX5_CAP_ETH(dev, wqe_inline_mode) !=
-	    MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
-		return -EOPNOTSUPP;
-
-	err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
-	if (err)
-		goto out;
-
-	for (vport = 1; vport < num_vports; vport++) {
-		err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
-		if (err) {
-			esw_warn(dev, "Failed to set min inline on vport %d\n",
-				 vport);
-			goto revert_inline_mode;
-		}
-	}
-
-	esw->offloads.inline_mode = mlx5_mode;
-	return 0;
-
-revert_inline_mode:
-	while (--vport > 0)
-		mlx5_modify_nic_vport_min_inline(dev,
-						 vport,
-						 esw->offloads.inline_mode);
-out:
-	return err;
-}
-
-int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
-{
-	struct mlx5_core_dev *dev = devlink_priv(devlink);
-	struct mlx5_eswitch *esw = dev->priv.eswitch;
-
-	if (!MLX5_CAP_GEN(dev, vport_group_manager))
-		return -EOPNOTSUPP;
-
-	if (esw->mode == SRIOV_NONE)
-		return -EOPNOTSUPP;
-
-	if (MLX5_CAP_ETH(dev, wqe_inline_mode) !=
-	    MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
-		return -EOPNOTSUPP;
-
-	return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
-}
-
-int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
-{
-	struct mlx5_core_dev *dev = esw->dev;
-	int vport;
-	u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
-
-	if (!MLX5_CAP_GEN(dev, vport_group_manager))
-		return -EOPNOTSUPP;
-
-	if (esw->mode == SRIOV_NONE)
-		return -EOPNOTSUPP;
-
-	if (MLX5_CAP_ETH(dev, wqe_inline_mode) !=
-	    MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
-		return -EOPNOTSUPP;
-
-	for (vport = 1; vport <= nvfs; vport++) {
-		mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
-		if (vport > 1 && prev_mlx5_mode != mlx5_mode)
-			return -EINVAL;
-		prev_mlx5_mode = mlx5_mode;
-	}
-
-	*mode = mlx5_mode;
-	return 0;
-}
-
-void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
-				     int vport_index,
-				     struct mlx5_eswitch_rep *__rep)
-{
-	struct mlx5_esw_offload *offloads = &esw->offloads;
-	struct mlx5_eswitch_rep *rep;
-
-	rep = &offloads->vport_reps[vport_index];
-
-	memset(rep, 0, sizeof(*rep));
-
-	rep->load   = __rep->load;
-	rep->unload = __rep->unload;
-	rep->vport  = __rep->vport;
-	rep->netdev = __rep->netdev;
-	ether_addr_copy(rep->hw_id, __rep->hw_id);
-
-	INIT_LIST_HEAD(&rep->vport_sqs_list);
-	rep->valid = true;
-}
-
-void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
-				       int vport_index)
-{
-	struct mlx5_esw_offload *offloads = &esw->offloads;
-	struct mlx5_eswitch_rep *rep;
-
-	rep = &offloads->vport_reps[vport_index];
-
-	if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport_index].enabled)
-		rep->unload(esw, rep);
-
-	rep->valid = false;
-}
-
-struct net_device *mlx5_eswitch_get_uplink_netdev(struct mlx5_eswitch *esw)
-{
-#define UPLINK_REP_INDEX 0
-	struct mlx5_esw_offload *offloads = &esw->offloads;
-	struct mlx5_eswitch_rep *rep;
-
-	rep = &offloads->vport_reps[UPLINK_REP_INDEX];
-	return rep->netdev;
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
deleted file mode 100644
index c4478ec..0000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ /dev/null
@@ -1,514 +0,0 @@
-/*
- * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/mlx5/driver.h>
-#include <linux/mlx5/device.h>
-#include <linux/mlx5/mlx5_ifc.h>
-
-#include "fs_core.h"
-#include "fs_cmd.h"
-#include "mlx5_core.h"
-#include "eswitch.h"
-
-int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
-			    struct mlx5_flow_table *ft)
-{
-	u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)]   = {0};
-	u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {0};
-
-	MLX5_SET(set_flow_table_root_in, in, opcode,
-		 MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
-	MLX5_SET(set_flow_table_root_in, in, table_type, ft->type);
-	MLX5_SET(set_flow_table_root_in, in, table_id, ft->id);
-	if (ft->vport) {
-		MLX5_SET(set_flow_table_root_in, in, vport_number, ft->vport);
-		MLX5_SET(set_flow_table_root_in, in, other_vport, 1);
-	}
-
-	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-}
-
-int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
-			       u16 vport,
-			       enum fs_flow_table_op_mod op_mod,
-			       enum fs_flow_table_type type, unsigned int level,
-			       unsigned int log_size, struct mlx5_flow_table
-			       *next_ft, unsigned int *table_id, u32 flags)
-{
-	int en_encap_decap = !!(flags & MLX5_FLOW_TABLE_TUNNEL_EN);
-	u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0};
-	u32 in[MLX5_ST_SZ_DW(create_flow_table_in)]   = {0};
-	int err;
-
-	MLX5_SET(create_flow_table_in, in, opcode,
-		 MLX5_CMD_OP_CREATE_FLOW_TABLE);
-
-	MLX5_SET(create_flow_table_in, in, table_type, type);
-	MLX5_SET(create_flow_table_in, in, level, level);
-	MLX5_SET(create_flow_table_in, in, log_size, log_size);
-	if (vport) {
-		MLX5_SET(create_flow_table_in, in, vport_number, vport);
-		MLX5_SET(create_flow_table_in, in, other_vport, 1);
-	}
-
-	MLX5_SET(create_flow_table_in, in, decap_en, en_encap_decap);
-	MLX5_SET(create_flow_table_in, in, encap_en, en_encap_decap);
-
-	switch (op_mod) {
-	case FS_FT_OP_MOD_NORMAL:
-		if (next_ft) {
-			MLX5_SET(create_flow_table_in, in, table_miss_mode, 1);
-			MLX5_SET(create_flow_table_in, in, table_miss_id, next_ft->id);
-		}
-		break;
-
-	case FS_FT_OP_MOD_LAG_DEMUX:
-		MLX5_SET(create_flow_table_in, in, op_mod, 0x1);
-		if (next_ft)
-			MLX5_SET(create_flow_table_in, in, lag_master_next_table_id,
-				 next_ft->id);
-		break;
-	}
-
-	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-	if (!err)
-		*table_id = MLX5_GET(create_flow_table_out, out,
-				     table_id);
-	return err;
-}
-
-int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev,
-				struct mlx5_flow_table *ft)
-{
-	u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)]   = {0};
-	u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)] = {0};
-
-	MLX5_SET(destroy_flow_table_in, in, opcode,
-		 MLX5_CMD_OP_DESTROY_FLOW_TABLE);
-	MLX5_SET(destroy_flow_table_in, in, table_type, ft->type);
-	MLX5_SET(destroy_flow_table_in, in, table_id, ft->id);
-	if (ft->vport) {
-		MLX5_SET(destroy_flow_table_in, in, vport_number, ft->vport);
-		MLX5_SET(destroy_flow_table_in, in, other_vport, 1);
-	}
-
-	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-}
-
-int mlx5_cmd_modify_flow_table(struct mlx5_core_dev *dev,
-			       struct mlx5_flow_table *ft,
-			       struct mlx5_flow_table *next_ft)
-{
-	u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)]   = {0};
-	u32 out[MLX5_ST_SZ_DW(modify_flow_table_out)] = {0};
-
-	MLX5_SET(modify_flow_table_in, in, opcode,
-		 MLX5_CMD_OP_MODIFY_FLOW_TABLE);
-	MLX5_SET(modify_flow_table_in, in, table_type, ft->type);
-	MLX5_SET(modify_flow_table_in, in, table_id, ft->id);
-
-	if (ft->op_mod == FS_FT_OP_MOD_LAG_DEMUX) {
-		MLX5_SET(modify_flow_table_in, in, modify_field_select,
-			 MLX5_MODIFY_FLOW_TABLE_LAG_NEXT_TABLE_ID);
-		if (next_ft) {
-			MLX5_SET(modify_flow_table_in, in,
-				 lag_master_next_table_id, next_ft->id);
-		} else {
-			MLX5_SET(modify_flow_table_in, in,
-				 lag_master_next_table_id, 0);
-		}
-	} else {
-		if (ft->vport) {
-			MLX5_SET(modify_flow_table_in, in, vport_number,
-				 ft->vport);
-			MLX5_SET(modify_flow_table_in, in, other_vport, 1);
-		}
-		MLX5_SET(modify_flow_table_in, in, modify_field_select,
-			 MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID);
-		if (next_ft) {
-			MLX5_SET(modify_flow_table_in, in, table_miss_mode, 1);
-			MLX5_SET(modify_flow_table_in, in, table_miss_id,
-				 next_ft->id);
-		} else {
-			MLX5_SET(modify_flow_table_in, in, table_miss_mode, 0);
-		}
-	}
-
-	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-}
-
-int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev,
-			       struct mlx5_flow_table *ft,
-			       u32 *in,
-			       unsigned int *group_id)
-{
-	u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {0};
-	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
-	int err;
-
-	MLX5_SET(create_flow_group_in, in, opcode,
-		 MLX5_CMD_OP_CREATE_FLOW_GROUP);
-	MLX5_SET(create_flow_group_in, in, table_type, ft->type);
-	MLX5_SET(create_flow_group_in, in, table_id, ft->id);
-	if (ft->vport) {
-		MLX5_SET(create_flow_group_in, in, vport_number, ft->vport);
-		MLX5_SET(create_flow_group_in, in, other_vport, 1);
-	}
-
-	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
-	if (!err)
-		*group_id = MLX5_GET(create_flow_group_out, out,
-				     group_id);
-	return err;
-}
-
-int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev,
-				struct mlx5_flow_table *ft,
-				unsigned int group_id)
-{
-	u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)] = {0};
-	u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)]   = {0};
-
-	MLX5_SET(destroy_flow_group_in, in, opcode,
-		 MLX5_CMD_OP_DESTROY_FLOW_GROUP);
-	MLX5_SET(destroy_flow_group_in, in, table_type, ft->type);
-	MLX5_SET(destroy_flow_group_in, in, table_id, ft->id);
-	MLX5_SET(destroy_flow_group_in, in, group_id, group_id);
-	if (ft->vport) {
-		MLX5_SET(destroy_flow_group_in, in, vport_number, ft->vport);
-		MLX5_SET(destroy_flow_group_in, in, other_vport, 1);
-	}
-
-	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-}
-
-static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
-			    int opmod, int modify_mask,
-			    struct mlx5_flow_table *ft,
-			    unsigned group_id,
-			    struct fs_fte *fte)
-{
-	unsigned int inlen = MLX5_ST_SZ_BYTES(set_fte_in) +
-		fte->dests_size * MLX5_ST_SZ_BYTES(dest_format_struct);
-	u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {0};
-	struct mlx5_flow_rule *dst;
-	void *in_flow_context;
-	void *in_match_value;
-	void *in_dests;
-	u32 *in;
-	int err;
-
-	in = mlx5_vzalloc(inlen);
-	if (!in) {
-		mlx5_core_warn(dev, "failed to allocate inbox\n");
-		return -ENOMEM;
-	}
-
-	MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
-	MLX5_SET(set_fte_in, in, op_mod, opmod);
-	MLX5_SET(set_fte_in, in, modify_enable_mask, modify_mask);
-	MLX5_SET(set_fte_in, in, table_type, ft->type);
-	MLX5_SET(set_fte_in, in, table_id,   ft->id);
-	MLX5_SET(set_fte_in, in, flow_index, fte->index);
-	if (ft->vport) {
-		MLX5_SET(set_fte_in, in, vport_number, ft->vport);
-		MLX5_SET(set_fte_in, in, other_vport, 1);
-	}
-
-	in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
-	MLX5_SET(flow_context, in_flow_context, group_id, group_id);
-	MLX5_SET(flow_context, in_flow_context, flow_tag, fte->flow_tag);
-	MLX5_SET(flow_context, in_flow_context, action, fte->action);
-	MLX5_SET(flow_context, in_flow_context, encap_id, fte->encap_id);
-	in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
-				      match_value);
-	memcpy(in_match_value, &fte->val, MLX5_ST_SZ_BYTES(fte_match_param));
-
-	in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
-	if (fte->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
-		int list_size = 0;
-
-		list_for_each_entry(dst, &fte->node.children, node.list) {
-			unsigned int id;
-
-			if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
-				continue;
-
-			MLX5_SET(dest_format_struct, in_dests, destination_type,
-				 dst->dest_attr.type);
-			if (dst->dest_attr.type ==
-			    MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) {
-				id = dst->dest_attr.ft->id;
-			} else {
-				id = dst->dest_attr.tir_num;
-			}
-			MLX5_SET(dest_format_struct, in_dests, destination_id, id);
-			in_dests += MLX5_ST_SZ_BYTES(dest_format_struct);
-			list_size++;
-		}
-
-		MLX5_SET(flow_context, in_flow_context, destination_list_size,
-			 list_size);
-	}
-
-	if (fte->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
-		int list_size = 0;
-
-		list_for_each_entry(dst, &fte->node.children, node.list) {
-			if (dst->dest_attr.type !=
-			    MLX5_FLOW_DESTINATION_TYPE_COUNTER)
-				continue;
-
-			MLX5_SET(flow_counter_list, in_dests, flow_counter_id,
-				 dst->dest_attr.counter->id);
-			in_dests += MLX5_ST_SZ_BYTES(dest_format_struct);
-			list_size++;
-		}
-
-		MLX5_SET(flow_context, in_flow_context, flow_counter_list_size,
-			 list_size);
-	}
-
-	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
-	kvfree(in);
-	return err;
-}
-
-int mlx5_cmd_create_fte(struct mlx5_core_dev *dev,
-			struct mlx5_flow_table *ft,
-			unsigned group_id,
-			struct fs_fte *fte)
-{
-	return mlx5_cmd_set_fte(dev, 0, 0, ft, group_id, fte);
-}
-
-int mlx5_cmd_update_fte(struct mlx5_core_dev *dev,
-			struct mlx5_flow_table *ft,
-			unsigned group_id,
-			int modify_mask,
-			struct fs_fte *fte)
-{
-	int opmod;
-	int atomic_mod_cap = MLX5_CAP_FLOWTABLE(dev,
-						flow_table_properties_nic_receive.
-						flow_modify_en);
-	if (!atomic_mod_cap)
-		return -ENOTSUPP;
-	opmod = 1;
-
-	return	mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, group_id, fte);
-}
-
-int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev,
-			struct mlx5_flow_table *ft,
-			unsigned int index)
-{
-	u32 out[MLX5_ST_SZ_DW(delete_fte_out)] = {0};
-	u32 in[MLX5_ST_SZ_DW(delete_fte_in)]   = {0};
-
-	MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
-	MLX5_SET(delete_fte_in, in, table_type, ft->type);
-	MLX5_SET(delete_fte_in, in, table_id, ft->id);
-	MLX5_SET(delete_fte_in, in, flow_index, index);
-	if (ft->vport) {
-		MLX5_SET(delete_fte_in, in, vport_number, ft->vport);
-		MLX5_SET(delete_fte_in, in, other_vport, 1);
-	}
-
-	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-}
-
-int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u16 *id)
-{
-	u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)]   = {0};
-	u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {0};
-	int err;
-
-	MLX5_SET(alloc_flow_counter_in, in, opcode,
-		 MLX5_CMD_OP_ALLOC_FLOW_COUNTER);
-
-	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-	if (!err)
-		*id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id);
-	return err;
-}
-
-int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u16 id)
-{
-	u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)]   = {0};
-	u32 out[MLX5_ST_SZ_DW(dealloc_flow_counter_out)] = {0};
-
-	MLX5_SET(dealloc_flow_counter_in, in, opcode,
-		 MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
-	MLX5_SET(dealloc_flow_counter_in, in, flow_counter_id, id);
-	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-}
-
-int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u16 id,
-		      u64 *packets, u64 *bytes)
-{
-	u32 out[MLX5_ST_SZ_BYTES(query_flow_counter_out) +
-		MLX5_ST_SZ_BYTES(traffic_counter)]   = {0};
-	u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0};
-	void *stats;
-	int err = 0;
-
-	MLX5_SET(query_flow_counter_in, in, opcode,
-		 MLX5_CMD_OP_QUERY_FLOW_COUNTER);
-	MLX5_SET(query_flow_counter_in, in, op_mod, 0);
-	MLX5_SET(query_flow_counter_in, in, flow_counter_id, id);
-	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-	if (err)
-		return err;
-
-	stats = MLX5_ADDR_OF(query_flow_counter_out, out, flow_statistics);
-	*packets = MLX5_GET64(traffic_counter, stats, packets);
-	*bytes = MLX5_GET64(traffic_counter, stats, octets);
-	return 0;
-}
-
-struct mlx5_cmd_fc_bulk {
-	u16 id;
-	int num;
-	int outlen;
-	u32 out[0];
-};
-
-struct mlx5_cmd_fc_bulk *
-mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u16 id, int num)
-{
-	struct mlx5_cmd_fc_bulk *b;
-	int outlen =
-		MLX5_ST_SZ_BYTES(query_flow_counter_out) +
-		MLX5_ST_SZ_BYTES(traffic_counter) * num;
-
-	b = kzalloc(sizeof(*b) + outlen, GFP_KERNEL);
-	if (!b)
-		return NULL;
-
-	b->id = id;
-	b->num = num;
-	b->outlen = outlen;
-
-	return b;
-}
-
-void mlx5_cmd_fc_bulk_free(struct mlx5_cmd_fc_bulk *b)
-{
-	kfree(b);
-}
-
-int
-mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, struct mlx5_cmd_fc_bulk *b)
-{
-	u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0};
-
-	MLX5_SET(query_flow_counter_in, in, opcode,
-		 MLX5_CMD_OP_QUERY_FLOW_COUNTER);
-	MLX5_SET(query_flow_counter_in, in, op_mod, 0);
-	MLX5_SET(query_flow_counter_in, in, flow_counter_id, b->id);
-	MLX5_SET(query_flow_counter_in, in, num_of_counters, b->num);
-	return mlx5_cmd_exec(dev, in, sizeof(in), b->out, b->outlen);
-}
-
-void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev,
-			  struct mlx5_cmd_fc_bulk *b, u16 id,
-			  u64 *packets, u64 *bytes)
-{
-	int index = id - b->id;
-	void *stats;
-
-	if (index < 0 || index >= b->num) {
-		mlx5_core_warn(dev, "Flow counter id (0x%x) out of range (0x%x..0x%x). Counter ignored.\n",
-			       id, b->id, b->id + b->num - 1);
-		return;
-	}
-
-	stats = MLX5_ADDR_OF(query_flow_counter_out, b->out,
-			     flow_statistics[index]);
-	*packets = MLX5_GET64(traffic_counter, stats, packets);
-	*bytes = MLX5_GET64(traffic_counter, stats, octets);
-}
-
-int mlx5_encap_alloc(struct mlx5_core_dev *dev,
-		     int header_type,
-		     size_t size,
-		     void *encap_header,
-		     u32 *encap_id)
-{
-	int max_encap_size = MLX5_CAP_ESW(dev, max_encap_header_size);
-	u32 out[MLX5_ST_SZ_DW(alloc_encap_header_out)];
-	void *encap_header_in;
-	void *header;
-	int inlen;
-	int err;
-	u32 *in;
-
-	if (size > MLX5_CAP_ESW(dev, max_encap_header_size))
-		return -EINVAL;
-
-	in = kzalloc(MLX5_ST_SZ_BYTES(alloc_encap_header_in) + max_encap_size,
-		     GFP_KERNEL);
-	if (!in)
-		return -ENOMEM;
-
-	encap_header_in = MLX5_ADDR_OF(alloc_encap_header_in, in, encap_header);
-	header = MLX5_ADDR_OF(encap_header_in, encap_header_in, encap_header);
-	inlen = header - (void *)in  + size;
-
-	memset(in, 0, inlen);
-	MLX5_SET(alloc_encap_header_in, in, opcode,
-		 MLX5_CMD_OP_ALLOC_ENCAP_HEADER);
-	MLX5_SET(encap_header_in, encap_header_in, encap_header_size, size);
-	MLX5_SET(encap_header_in, encap_header_in, header_type, header_type);
-	memcpy(header, encap_header, size);
-
-	memset(out, 0, sizeof(out));
-	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
-
-	*encap_id = MLX5_GET(alloc_encap_header_out, out, encap_id);
-	kfree(in);
-	return err;
-}
-
-void mlx5_encap_dealloc(struct mlx5_core_dev *dev, u32 encap_id)
-{
-	u32 in[MLX5_ST_SZ_DW(dealloc_encap_header_in)];
-	u32 out[MLX5_ST_SZ_DW(dealloc_encap_header_out)];
-
-	memset(in, 0, sizeof(in));
-	MLX5_SET(dealloc_encap_header_in, in, opcode,
-		 MLX5_CMD_OP_DEALLOC_ENCAP_HEADER);
-	MLX5_SET(dealloc_encap_header_in, in, encap_id, encap_id);
-
-	mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
deleted file mode 100644
index 8fad806..0000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef _MLX5_FS_CMD_
-#define _MLX5_FS_CMD_
-
-int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
-			       u16 vport,
-			       enum fs_flow_table_op_mod op_mod,
-			       enum fs_flow_table_type type, unsigned int level,
-			       unsigned int log_size, struct mlx5_flow_table
-			       *next_ft, unsigned int *table_id, u32 flags);
-
-int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev,
-				struct mlx5_flow_table *ft);
-
-int mlx5_cmd_modify_flow_table(struct mlx5_core_dev *dev,
-			       struct mlx5_flow_table *ft,
-			       struct mlx5_flow_table *next_ft);
-
-int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev,
-			       struct mlx5_flow_table *ft,
-			       u32 *in, unsigned int *group_id);
-
-int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev,
-				struct mlx5_flow_table *ft,
-				unsigned int group_id);
-
-int mlx5_cmd_create_fte(struct mlx5_core_dev *dev,
-			struct mlx5_flow_table *ft,
-			unsigned group_id,
-			struct fs_fte *fte);
-
-int mlx5_cmd_update_fte(struct mlx5_core_dev *dev,
-			struct mlx5_flow_table *ft,
-			unsigned group_id,
-			int modify_mask,
-			struct fs_fte *fte);
-
-int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev,
-			struct mlx5_flow_table *ft,
-			unsigned int index);
-
-int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
-			    struct mlx5_flow_table *ft);
-
-int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u16 *id);
-int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u16 id);
-int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u16 id,
-		      u64 *packets, u64 *bytes);
-
-struct mlx5_cmd_fc_bulk;
-
-struct mlx5_cmd_fc_bulk *
-mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u16 id, int num);
-void mlx5_cmd_fc_bulk_free(struct mlx5_cmd_fc_bulk *b);
-int
-mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, struct mlx5_cmd_fc_bulk *b);
-void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev,
-			  struct mlx5_cmd_fc_bulk *b, u16 id,
-			  u64 *packets, u64 *bytes);
-
-#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
deleted file mode 100644
index a263d89..0000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ /dev/null
@@ -1,2039 +0,0 @@
-/*
- * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/mutex.h>
-#include <linux/mlx5/driver.h>
-
-#include "mlx5_core.h"
-#include "fs_core.h"
-#include "fs_cmd.h"
-
-#define INIT_TREE_NODE_ARRAY_SIZE(...)	(sizeof((struct init_tree_node[]){__VA_ARGS__}) /\
-					 sizeof(struct init_tree_node))
-
-#define ADD_PRIO(num_prios_val, min_level_val, num_levels_val, caps_val,\
-		 ...) {.type = FS_TYPE_PRIO,\
-	.min_ft_level = min_level_val,\
-	.num_levels = num_levels_val,\
-	.num_leaf_prios = num_prios_val,\
-	.caps = caps_val,\
-	.children = (struct init_tree_node[]) {__VA_ARGS__},\
-	.ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
-}
-
-#define ADD_MULTIPLE_PRIO(num_prios_val, num_levels_val, ...)\
-	ADD_PRIO(num_prios_val, 0, num_levels_val, {},\
-		 __VA_ARGS__)\
-
-#define ADD_NS(...) {.type = FS_TYPE_NAMESPACE,\
-	.children = (struct init_tree_node[]) {__VA_ARGS__},\
-	.ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
-}
-
-#define INIT_CAPS_ARRAY_SIZE(...) (sizeof((long[]){__VA_ARGS__}) /\
-				   sizeof(long))
-
-#define FS_CAP(cap) (__mlx5_bit_off(flow_table_nic_cap, cap))
-
-#define FS_REQUIRED_CAPS(...) {.arr_sz = INIT_CAPS_ARRAY_SIZE(__VA_ARGS__), \
-			       .caps = (long[]) {__VA_ARGS__} }
-
-#define FS_CHAINING_CAPS  FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en), \
-					   FS_CAP(flow_table_properties_nic_receive.modify_root), \
-					   FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode), \
-					   FS_CAP(flow_table_properties_nic_receive.flow_table_modify))
-
-#define LEFTOVERS_NUM_LEVELS 1
-#define LEFTOVERS_NUM_PRIOS 1
-
-#define BY_PASS_PRIO_NUM_LEVELS 1
-#define BY_PASS_MIN_LEVEL (ETHTOOL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\
-			   LEFTOVERS_NUM_PRIOS)
-
-#define ETHTOOL_PRIO_NUM_LEVELS 1
-#define ETHTOOL_NUM_PRIOS 11
-#define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
-/* Vlan, mac, ttc, aRFS */
-#define KERNEL_NIC_PRIO_NUM_LEVELS 4
-#define KERNEL_NIC_NUM_PRIOS 1
-/* One more level for tc */
-#define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
-
-#define ANCHOR_NUM_LEVELS 1
-#define ANCHOR_NUM_PRIOS 1
-#define ANCHOR_MIN_LEVEL (BY_PASS_MIN_LEVEL + 1)
-
-#define OFFLOADS_MAX_FT 1
-#define OFFLOADS_NUM_PRIOS 1
-#define OFFLOADS_MIN_LEVEL (ANCHOR_MIN_LEVEL + 1)
-
-#define LAG_PRIO_NUM_LEVELS 1
-#define LAG_NUM_PRIOS 1
-#define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + 1)
-
-struct node_caps {
-	size_t	arr_sz;
-	long	*caps;
-};
-static struct init_tree_node {
-	enum fs_node_type	type;
-	struct init_tree_node *children;
-	int ar_size;
-	struct node_caps caps;
-	int min_ft_level;
-	int num_leaf_prios;
-	int prio;
-	int num_levels;
-} root_fs = {
-	.type = FS_TYPE_NAMESPACE,
-	.ar_size = 7,
-	.children = (struct init_tree_node[]) {
-		ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0,
-			 FS_CHAINING_CAPS,
-			 ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
-						  BY_PASS_PRIO_NUM_LEVELS))),
-		ADD_PRIO(0, LAG_MIN_LEVEL, 0,
-			 FS_CHAINING_CAPS,
-			 ADD_NS(ADD_MULTIPLE_PRIO(LAG_NUM_PRIOS,
-						  LAG_PRIO_NUM_LEVELS))),
-		ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, {},
-			 ADD_NS(ADD_MULTIPLE_PRIO(OFFLOADS_NUM_PRIOS, OFFLOADS_MAX_FT))),
-		ADD_PRIO(0, ETHTOOL_MIN_LEVEL, 0,
-			 FS_CHAINING_CAPS,
-			 ADD_NS(ADD_MULTIPLE_PRIO(ETHTOOL_NUM_PRIOS,
-						  ETHTOOL_PRIO_NUM_LEVELS))),
-		ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {},
-			 ADD_NS(ADD_MULTIPLE_PRIO(1, 1),
-				ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS,
-						  KERNEL_NIC_PRIO_NUM_LEVELS))),
-		ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0,
-			 FS_CHAINING_CAPS,
-			 ADD_NS(ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS, LEFTOVERS_NUM_LEVELS))),
-		ADD_PRIO(0, ANCHOR_MIN_LEVEL, 0, {},
-			 ADD_NS(ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS, ANCHOR_NUM_LEVELS))),
-	}
-};
-
-enum fs_i_mutex_lock_class {
-	FS_MUTEX_GRANDPARENT,
-	FS_MUTEX_PARENT,
-	FS_MUTEX_CHILD
-};
-
-static void del_rule(struct fs_node *node);
-static void del_flow_table(struct fs_node *node);
-static void del_flow_group(struct fs_node *node);
-static void del_fte(struct fs_node *node);
-static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
-				struct mlx5_flow_destination *d2);
-static struct mlx5_flow_rule *
-find_flow_rule(struct fs_fte *fte,
-	       struct mlx5_flow_destination *dest);
-
-static void tree_init_node(struct fs_node *node,
-			   unsigned int refcount,
-			   void (*remove_func)(struct fs_node *))
-{
-	atomic_set(&node->refcount, refcount);
-	INIT_LIST_HEAD(&node->list);
-	INIT_LIST_HEAD(&node->children);
-	mutex_init(&node->lock);
-	node->remove_func = remove_func;
-}
-
-static void tree_add_node(struct fs_node *node, struct fs_node *parent)
-{
-	if (parent)
-		atomic_inc(&parent->refcount);
-	node->parent = parent;
-
-	/* Parent is the root */
-	if (!parent)
-		node->root = node;
-	else
-		node->root = parent->root;
-}
-
-static void tree_get_node(struct fs_node *node)
-{
-	atomic_inc(&node->refcount);
-}
-
-static void nested_lock_ref_node(struct fs_node *node,
-				 enum fs_i_mutex_lock_class class)
-{
-	if (node) {
-		mutex_lock_nested(&node->lock, class);
-		atomic_inc(&node->refcount);
-	}
-}
-
-static void lock_ref_node(struct fs_node *node)
-{
-	if (node) {
-		mutex_lock(&node->lock);
-		atomic_inc(&node->refcount);
-	}
-}
-
-static void unlock_ref_node(struct fs_node *node)
-{
-	if (node) {
-		atomic_dec(&node->refcount);
-		mutex_unlock(&node->lock);
-	}
-}
-
-static void tree_put_node(struct fs_node *node)
-{
-	struct fs_node *parent_node = node->parent;
-
-	lock_ref_node(parent_node);
-	if (atomic_dec_and_test(&node->refcount)) {
-		if (parent_node)
-			list_del_init(&node->list);
-		if (node->remove_func)
-			node->remove_func(node);
-		kfree(node);
-		node = NULL;
-	}
-	unlock_ref_node(parent_node);
-	if (!node && parent_node)
-		tree_put_node(parent_node);
-}
-
-static int tree_remove_node(struct fs_node *node)
-{
-	if (atomic_read(&node->refcount) > 1) {
-		atomic_dec(&node->refcount);
-		return -EEXIST;
-	}
-	tree_put_node(node);
-	return 0;
-}
-
-static struct fs_prio *find_prio(struct mlx5_flow_namespace *ns,
-				 unsigned int prio)
-{
-	struct fs_prio *iter_prio;
-
-	fs_for_each_prio(iter_prio, ns) {
-		if (iter_prio->prio == prio)
-			return iter_prio;
-	}
-
-	return NULL;
-}
-
-static bool masked_memcmp(void *mask, void *val1, void *val2, size_t size)
-{
-	unsigned int i;
-
-	for (i = 0; i < size; i++, mask++, val1++, val2++)
-		if ((*((u8 *)val1) & (*(u8 *)mask)) !=
-		    ((*(u8 *)val2) & (*(u8 *)mask)))
-			return false;
-
-	return true;
-}
-
-static bool compare_match_value(struct mlx5_flow_group_mask *mask,
-				void *fte_param1, void *fte_param2)
-{
-	if (mask->match_criteria_enable &
-	    1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS) {
-		void *fte_match1 = MLX5_ADDR_OF(fte_match_param,
-						fte_param1, outer_headers);
-		void *fte_match2 = MLX5_ADDR_OF(fte_match_param,
-						fte_param2, outer_headers);
-		void *fte_mask = MLX5_ADDR_OF(fte_match_param,
-					      mask->match_criteria, outer_headers);
-
-		if (!masked_memcmp(fte_mask, fte_match1, fte_match2,
-				   MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4)))
-			return false;
-	}
-
-	if (mask->match_criteria_enable &
-	    1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS) {
-		void *fte_match1 = MLX5_ADDR_OF(fte_match_param,
-						fte_param1, misc_parameters);
-		void *fte_match2 = MLX5_ADDR_OF(fte_match_param,
-						fte_param2, misc_parameters);
-		void *fte_mask = MLX5_ADDR_OF(fte_match_param,
-					  mask->match_criteria, misc_parameters);
-
-		if (!masked_memcmp(fte_mask, fte_match1, fte_match2,
-				   MLX5_ST_SZ_BYTES(fte_match_set_misc)))
-			return false;
-	}
-
-	if (mask->match_criteria_enable &
-	    1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS) {
-		void *fte_match1 = MLX5_ADDR_OF(fte_match_param,
-						fte_param1, inner_headers);
-		void *fte_match2 = MLX5_ADDR_OF(fte_match_param,
-						fte_param2, inner_headers);
-		void *fte_mask = MLX5_ADDR_OF(fte_match_param,
-					  mask->match_criteria, inner_headers);
-
-		if (!masked_memcmp(fte_mask, fte_match1, fte_match2,
-				   MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4)))
-			return false;
-	}
-	return true;
-}
-
-static bool compare_match_criteria(u8 match_criteria_enable1,
-				   u8 match_criteria_enable2,
-				   void *mask1, void *mask2)
-{
-	return match_criteria_enable1 == match_criteria_enable2 &&
-		!memcmp(mask1, mask2, MLX5_ST_SZ_BYTES(fte_match_param));
-}
-
-static struct mlx5_flow_root_namespace *find_root(struct fs_node *node)
-{
-	struct fs_node *root;
-	struct mlx5_flow_namespace *ns;
-
-	root = node->root;
-
-	if (WARN_ON(root->type != FS_TYPE_NAMESPACE)) {
-		pr_warn("mlx5: flow steering node is not in tree or garbaged\n");
-		return NULL;
-	}
-
-	ns = container_of(root, struct mlx5_flow_namespace, node);
-	return container_of(ns, struct mlx5_flow_root_namespace, ns);
-}
-
-static inline struct mlx5_core_dev *get_dev(struct fs_node *node)
-{
-	struct mlx5_flow_root_namespace *root = find_root(node);
-
-	if (root)
-		return root->dev;
-	return NULL;
-}
-
-static void del_flow_table(struct fs_node *node)
-{
-	struct mlx5_flow_table *ft;
-	struct mlx5_core_dev *dev;
-	struct fs_prio *prio;
-	int err;
-
-	fs_get_obj(ft, node);
-	dev = get_dev(&ft->node);
-
-	err = mlx5_cmd_destroy_flow_table(dev, ft);
-	if (err)
-		mlx5_core_warn(dev, "flow steering can't destroy ft\n");
-	fs_get_obj(prio, ft->node.parent);
-	prio->num_ft--;
-}
-
-static void del_rule(struct fs_node *node)
-{
-	struct mlx5_flow_rule *rule;
-	struct mlx5_flow_table *ft;
-	struct mlx5_flow_group *fg;
-	struct fs_fte *fte;
-	u32	*match_value;
-	int modify_mask;
-	struct mlx5_core_dev *dev = get_dev(node);
-	int match_len = MLX5_ST_SZ_BYTES(fte_match_param);
-	int err;
-	bool update_fte = false;
-
-	match_value = mlx5_vzalloc(match_len);
-	if (!match_value) {
-		mlx5_core_warn(dev, "failed to allocate inbox\n");
-		return;
-	}
-
-	fs_get_obj(rule, node);
-	fs_get_obj(fte, rule->node.parent);
-	fs_get_obj(fg, fte->node.parent);
-	memcpy(match_value, fte->val, sizeof(fte->val));
-	fs_get_obj(ft, fg->node.parent);
-	list_del(&rule->node.list);
-	if (rule->sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
-		mutex_lock(&rule->dest_attr.ft->lock);
-		list_del(&rule->next_ft);
-		mutex_unlock(&rule->dest_attr.ft->lock);
-	}
-
-	if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER  &&
-	    --fte->dests_size) {
-		modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
-		fte->action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT;
-		update_fte = true;
-		goto out;
-	}
-
-	if ((fte->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
-	    --fte->dests_size) {
-		modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST),
-		update_fte = true;
-	}
-out:
-	if (update_fte && fte->dests_size) {
-		err = mlx5_cmd_update_fte(dev, ft, fg->id, modify_mask, fte);
-		if (err)
-			mlx5_core_warn(dev,
-				       "%s can't del rule fg id=%d fte_index=%d\n",
-				       __func__, fg->id, fte->index);
-	}
-	kvfree(match_value);
-}
-
-static void del_fte(struct fs_node *node)
-{
-	struct mlx5_flow_table *ft;
-	struct mlx5_flow_group *fg;
-	struct mlx5_core_dev *dev;
-	struct fs_fte *fte;
-	int err;
-
-	fs_get_obj(fte, node);
-	fs_get_obj(fg, fte->node.parent);
-	fs_get_obj(ft, fg->node.parent);
-
-	dev = get_dev(&ft->node);
-	err = mlx5_cmd_delete_fte(dev, ft,
-				  fte->index);
-	if (err)
-		mlx5_core_warn(dev,
-			       "flow steering can't delete fte in index %d of flow group id %d\n",
-			       fte->index, fg->id);
-
-	fte->status = 0;
-	fg->num_ftes--;
-}
-
-static void del_flow_group(struct fs_node *node)
-{
-	struct mlx5_flow_group *fg;
-	struct mlx5_flow_table *ft;
-	struct mlx5_core_dev *dev;
-
-	fs_get_obj(fg, node);
-	fs_get_obj(ft, fg->node.parent);
-	dev = get_dev(&ft->node);
-
-	if (ft->autogroup.active)
-		ft->autogroup.num_groups--;
-
-	if (mlx5_cmd_destroy_flow_group(dev, ft, fg->id))
-		mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n",
-			       fg->id, ft->id);
-}
-
-static struct fs_fte *alloc_fte(struct mlx5_flow_act *flow_act,
-				u32 *match_value,
-				unsigned int index)
-{
-	struct fs_fte *fte;
-
-	fte = kzalloc(sizeof(*fte), GFP_KERNEL);
-	if (!fte)
-		return ERR_PTR(-ENOMEM);
-
-	memcpy(fte->val, match_value, sizeof(fte->val));
-	fte->node.type =  FS_TYPE_FLOW_ENTRY;
-	fte->flow_tag = flow_act->flow_tag;
-	fte->index = index;
-	fte->action = flow_act->action;
-	fte->encap_id = flow_act->encap_id;
-
-	return fte;
-}
-
-static struct mlx5_flow_group *alloc_flow_group(u32 *create_fg_in)
-{
-	struct mlx5_flow_group *fg;
-	void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
-					    create_fg_in, match_criteria);
-	u8 match_criteria_enable = MLX5_GET(create_flow_group_in,
-					    create_fg_in,
-					    match_criteria_enable);
-	fg = kzalloc(sizeof(*fg), GFP_KERNEL);
-	if (!fg)
-		return ERR_PTR(-ENOMEM);
-
-	fg->mask.match_criteria_enable = match_criteria_enable;
-	memcpy(&fg->mask.match_criteria, match_criteria,
-	       sizeof(fg->mask.match_criteria));
-	fg->node.type =  FS_TYPE_FLOW_GROUP;
-	fg->start_index = MLX5_GET(create_flow_group_in, create_fg_in,
-				   start_flow_index);
-	fg->max_ftes = MLX5_GET(create_flow_group_in, create_fg_in,
-				end_flow_index) - fg->start_index + 1;
-	return fg;
-}
-
-static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport, int max_fte,
-						enum fs_flow_table_type table_type,
-						enum fs_flow_table_op_mod op_mod,
-						u32 flags)
-{
-	struct mlx5_flow_table *ft;
-
-	ft  = kzalloc(sizeof(*ft), GFP_KERNEL);
-	if (!ft)
-		return NULL;
-
-	ft->level = level;
-	ft->node.type = FS_TYPE_FLOW_TABLE;
-	ft->op_mod = op_mod;
-	ft->type = table_type;
-	ft->vport = vport;
-	ft->max_fte = max_fte;
-	ft->flags = flags;
-	INIT_LIST_HEAD(&ft->fwd_rules);
-	mutex_init(&ft->lock);
-
-	return ft;
-}
-
-/* If reverse is false, then we search for the first flow table in the
- * root sub-tree from start(closest from right), else we search for the
- * last flow table in the root sub-tree till start(closest from left).
- */
-static struct mlx5_flow_table *find_closest_ft_recursive(struct fs_node  *root,
-							 struct list_head *start,
-							 bool reverse)
-{
-#define list_advance_entry(pos, reverse)		\
-	((reverse) ? list_prev_entry(pos, list) : list_next_entry(pos, list))
-
-#define list_for_each_advance_continue(pos, head, reverse)	\
-	for (pos = list_advance_entry(pos, reverse);		\
-	     &pos->list != (head);				\
-	     pos = list_advance_entry(pos, reverse))
-
-	struct fs_node *iter = list_entry(start, struct fs_node, list);
-	struct mlx5_flow_table *ft = NULL;
-
-	if (!root)
-		return NULL;
-
-	list_for_each_advance_continue(iter, &root->children, reverse) {
-		if (iter->type == FS_TYPE_FLOW_TABLE) {
-			fs_get_obj(ft, iter);
-			return ft;
-		}
-		ft = find_closest_ft_recursive(iter, &iter->children, reverse);
-		if (ft)
-			return ft;
-	}
-
-	return ft;
-}
-
-/* If reverse if false then return the first flow table in next priority of
- * prio in the tree, else return the last flow table in the previous priority
- * of prio in the tree.
- */
-static struct mlx5_flow_table *find_closest_ft(struct fs_prio *prio, bool reverse)
-{
-	struct mlx5_flow_table *ft = NULL;
-	struct fs_node *curr_node;
-	struct fs_node *parent;
-
-	parent = prio->node.parent;
-	curr_node = &prio->node;
-	while (!ft && parent) {
-		ft = find_closest_ft_recursive(parent, &curr_node->list, reverse);
-		curr_node = parent;
-		parent = curr_node->parent;
-	}
-	return ft;
-}
-
-/* Assuming all the tree is locked by mutex chain lock */
-static struct mlx5_flow_table *find_next_chained_ft(struct fs_prio *prio)
-{
-	return find_closest_ft(prio, false);
-}
-
-/* Assuming all the tree is locked by mutex chain lock */
-static struct mlx5_flow_table *find_prev_chained_ft(struct fs_prio *prio)
-{
-	return find_closest_ft(prio, true);
-}
-
-static int connect_fts_in_prio(struct mlx5_core_dev *dev,
-			       struct fs_prio *prio,
-			       struct mlx5_flow_table *ft)
-{
-	struct mlx5_flow_table *iter;
-	int i = 0;
-	int err;
-
-	fs_for_each_ft(iter, prio) {
-		i++;
-		err = mlx5_cmd_modify_flow_table(dev,
-						 iter,
-						 ft);
-		if (err) {
-			mlx5_core_warn(dev, "Failed to modify flow table %d\n",
-				       iter->id);
-			/* The driver is out of sync with the FW */
-			if (i > 1)
-				WARN_ON(true);
-			return err;
-		}
-	}
-	return 0;
-}
-
-/* Connect flow tables from previous priority of prio to ft */
-static int connect_prev_fts(struct mlx5_core_dev *dev,
-			    struct mlx5_flow_table *ft,
-			    struct fs_prio *prio)
-{
-	struct mlx5_flow_table *prev_ft;
-
-	prev_ft = find_prev_chained_ft(prio);
-	if (prev_ft) {
-		struct fs_prio *prev_prio;
-
-		fs_get_obj(prev_prio, prev_ft->node.parent);
-		return connect_fts_in_prio(dev, prev_prio, ft);
-	}
-	return 0;
-}
-
-static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
-				 *prio)
-{
-	struct mlx5_flow_root_namespace *root = find_root(&prio->node);
-	int min_level = INT_MAX;
-	int err;
-
-	if (root->root_ft)
-		min_level = root->root_ft->level;
-
-	if (ft->level >= min_level)
-		return 0;
-
-	err = mlx5_cmd_update_root_ft(root->dev, ft);
-	if (err)
-		mlx5_core_warn(root->dev, "Update root flow table of id=%u failed\n",
-			       ft->id);
-	else
-		root->root_ft = ft;
-
-	return err;
-}
-
-static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
-					 struct mlx5_flow_destination *dest)
-{
-	struct mlx5_flow_table *ft;
-	struct mlx5_flow_group *fg;
-	struct fs_fte *fte;
-	int modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
-	int err = 0;
-
-	fs_get_obj(fte, rule->node.parent);
-	if (!(fte->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
-		return -EINVAL;
-	lock_ref_node(&fte->node);
-	fs_get_obj(fg, fte->node.parent);
-	fs_get_obj(ft, fg->node.parent);
-
-	memcpy(&rule->dest_attr, dest, sizeof(*dest));
-	err = mlx5_cmd_update_fte(get_dev(&ft->node),
-				  ft, fg->id,
-				  modify_mask,
-				  fte);
-	unlock_ref_node(&fte->node);
-
-	return err;
-}
-
-int mlx5_modify_rule_destination(struct mlx5_flow_handle *handle,
-				 struct mlx5_flow_destination *new_dest,
-				 struct mlx5_flow_destination *old_dest)
-{
-	int i;
-
-	if (!old_dest) {
-		if (handle->num_rules != 1)
-			return -EINVAL;
-		return _mlx5_modify_rule_destination(handle->rule[0],
-						     new_dest);
-	}
-
-	for (i = 0; i < handle->num_rules; i++) {
-		if (mlx5_flow_dests_cmp(new_dest, &handle->rule[i]->dest_attr))
-			return _mlx5_modify_rule_destination(handle->rule[i],
-							     new_dest);
-	}
-
-	return -EINVAL;
-}
-
-/* Modify/set FWD rules that point on old_next_ft to point on new_next_ft  */
-static int connect_fwd_rules(struct mlx5_core_dev *dev,
-			     struct mlx5_flow_table *new_next_ft,
-			     struct mlx5_flow_table *old_next_ft)
-{
-	struct mlx5_flow_destination dest;
-	struct mlx5_flow_rule *iter;
-	int err = 0;
-
-	/* new_next_ft and old_next_ft could be NULL only
-	 * when we create/destroy the anchor flow table.
-	 */
-	if (!new_next_ft || !old_next_ft)
-		return 0;
-
-	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
-	dest.ft = new_next_ft;
-
-	mutex_lock(&old_next_ft->lock);
-	list_splice_init(&old_next_ft->fwd_rules, &new_next_ft->fwd_rules);
-	mutex_unlock(&old_next_ft->lock);
-	list_for_each_entry(iter, &new_next_ft->fwd_rules, next_ft) {
-		err = _mlx5_modify_rule_destination(iter, &dest);
-		if (err)
-			pr_err("mlx5_core: failed to modify rule to point on flow table %d\n",
-			       new_next_ft->id);
-	}
-	return 0;
-}
-
-static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
-			      struct fs_prio *prio)
-{
-	struct mlx5_flow_table *next_ft;
-	int err = 0;
-
-	/* Connect_prev_fts and update_root_ft_create are mutually exclusive */
-
-	if (list_empty(&prio->node.children)) {
-		err = connect_prev_fts(dev, ft, prio);
-		if (err)
-			return err;
-
-		next_ft = find_next_chained_ft(prio);
-		err = connect_fwd_rules(dev, ft, next_ft);
-		if (err)
-			return err;
-	}
-
-	if (MLX5_CAP_FLOWTABLE(dev,
-			       flow_table_properties_nic_receive.modify_root))
-		err = update_root_ft_create(ft, prio);
-	return err;
-}
-
-static void list_add_flow_table(struct mlx5_flow_table *ft,
-				struct fs_prio *prio)
-{
-	struct list_head *prev = &prio->node.children;
-	struct mlx5_flow_table *iter;
-
-	fs_for_each_ft(iter, prio) {
-		if (iter->level > ft->level)
-			break;
-		prev = &iter->node.list;
-	}
-	list_add(&ft->node.list, prev);
-}
-
-static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
-							enum fs_flow_table_op_mod op_mod,
-							u16 vport, int prio,
-							int max_fte, u32 level,
-							u32 flags)
-{
-	struct mlx5_flow_table *next_ft = NULL;
-	struct mlx5_flow_table *ft;
-	int err;
-	int log_table_sz;
-	struct mlx5_flow_root_namespace *root =
-		find_root(&ns->node);
-	struct fs_prio *fs_prio = NULL;
-
-	if (!root) {
-		pr_err("mlx5: flow steering failed to find root of namespace\n");
-		return ERR_PTR(-ENODEV);
-	}
-
-	mutex_lock(&root->chain_lock);
-	fs_prio = find_prio(ns, prio);
-	if (!fs_prio) {
-		err = -EINVAL;
-		goto unlock_root;
-	}
-	if (level >= fs_prio->num_levels) {
-		err = -ENOSPC;
-		goto unlock_root;
-	}
-	/* The level is related to the
-	 * priority level range.
-	 */
-	level += fs_prio->start_level;
-	ft = alloc_flow_table(level,
-			      vport,
-			      max_fte ? roundup_pow_of_two(max_fte) : 0,
-			      root->table_type,
-			      op_mod, flags);
-	if (!ft) {
-		err = -ENOMEM;
-		goto unlock_root;
-	}
-
-	tree_init_node(&ft->node, 1, del_flow_table);
-	log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0;
-	next_ft = find_next_chained_ft(fs_prio);
-	err = mlx5_cmd_create_flow_table(root->dev, ft->vport, ft->op_mod, ft->type,
-					 ft->level, log_table_sz, next_ft, &ft->id,
-					 ft->flags);
-	if (err)
-		goto free_ft;
-
-	err = connect_flow_table(root->dev, ft, fs_prio);
-	if (err)
-		goto destroy_ft;
-	lock_ref_node(&fs_prio->node);
-	tree_add_node(&ft->node, &fs_prio->node);
-	list_add_flow_table(ft, fs_prio);
-	fs_prio->num_ft++;
-	unlock_ref_node(&fs_prio->node);
-	mutex_unlock(&root->chain_lock);
-	return ft;
-destroy_ft:
-	mlx5_cmd_destroy_flow_table(root->dev, ft);
-free_ft:
-	kfree(ft);
-unlock_root:
-	mutex_unlock(&root->chain_lock);
-	return ERR_PTR(err);
-}
-
-struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
-					       int prio, int max_fte,
-					       u32 level,
-					       u32 flags)
-{
-	return __mlx5_create_flow_table(ns, FS_FT_OP_MOD_NORMAL, 0, prio,
-					max_fte, level, flags);
-}
-
-struct mlx5_flow_table *mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
-						     int prio, int max_fte,
-						     u32 level, u16 vport)
-{
-	return __mlx5_create_flow_table(ns, FS_FT_OP_MOD_NORMAL, vport, prio,
-					max_fte, level, 0);
-}
-
-struct mlx5_flow_table *mlx5_create_lag_demux_flow_table(
-					       struct mlx5_flow_namespace *ns,
-					       int prio, u32 level)
-{
-	return __mlx5_create_flow_table(ns, FS_FT_OP_MOD_LAG_DEMUX, 0, prio, 0,
-					level, 0);
-}
-EXPORT_SYMBOL(mlx5_create_lag_demux_flow_table);
-
-struct mlx5_flow_table *mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
-							    int prio,
-							    int num_flow_table_entries,
-							    int max_num_groups,
-							    u32 level,
-							    u32 flags)
-{
-	struct mlx5_flow_table *ft;
-
-	if (max_num_groups > num_flow_table_entries)
-		return ERR_PTR(-EINVAL);
-
-	ft = mlx5_create_flow_table(ns, prio, num_flow_table_entries, level, flags);
-	if (IS_ERR(ft))
-		return ft;
-
-	ft->autogroup.active = true;
-	ft->autogroup.required_groups = max_num_groups;
-
-	return ft;
-}
-EXPORT_SYMBOL(mlx5_create_auto_grouped_flow_table);
-
-/* Flow table should be locked */
-static struct mlx5_flow_group *create_flow_group_common(struct mlx5_flow_table *ft,
-							u32 *fg_in,
-							struct list_head
-							*prev_fg,
-							bool is_auto_fg)
-{
-	struct mlx5_flow_group *fg;
-	struct mlx5_core_dev *dev = get_dev(&ft->node);
-	int err;
-
-	if (!dev)
-		return ERR_PTR(-ENODEV);
-
-	fg = alloc_flow_group(fg_in);
-	if (IS_ERR(fg))
-		return fg;
-
-	err = mlx5_cmd_create_flow_group(dev, ft, fg_in, &fg->id);
-	if (err) {
-		kfree(fg);
-		return ERR_PTR(err);
-	}
-
-	if (ft->autogroup.active)
-		ft->autogroup.num_groups++;
-	/* Add node to tree */
-	tree_init_node(&fg->node, !is_auto_fg, del_flow_group);
-	tree_add_node(&fg->node, &ft->node);
-	/* Add node to group list */
-	list_add(&fg->node.list, prev_fg);
-
-	return fg;
-}
-
-struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
-					       u32 *fg_in)
-{
-	struct mlx5_flow_group *fg;
-
-	if (ft->autogroup.active)
-		return ERR_PTR(-EPERM);
-
-	lock_ref_node(&ft->node);
-	fg = create_flow_group_common(ft, fg_in, ft->node.children.prev, false);
-	unlock_ref_node(&ft->node);
-
-	return fg;
-}
-
-static struct mlx5_flow_rule *alloc_rule(struct mlx5_flow_destination *dest)
-{
-	struct mlx5_flow_rule *rule;
-
-	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
-	if (!rule)
-		return NULL;
-
-	INIT_LIST_HEAD(&rule->next_ft);
-	rule->node.type = FS_TYPE_FLOW_DEST;
-	if (dest)
-		memcpy(&rule->dest_attr, dest, sizeof(*dest));
-
-	return rule;
-}
-
-static struct mlx5_flow_handle *alloc_handle(int num_rules)
-{
-	struct mlx5_flow_handle *handle;
-
-	handle = kzalloc(sizeof(*handle) + sizeof(handle->rule[0]) *
-			  num_rules, GFP_KERNEL);
-	if (!handle)
-		return NULL;
-
-	handle->num_rules = num_rules;
-
-	return handle;
-}
-
-static void destroy_flow_handle(struct fs_fte *fte,
-				struct mlx5_flow_handle *handle,
-				struct mlx5_flow_destination *dest,
-				int i)
-{
-	for (; --i >= 0;) {
-		if (atomic_dec_and_test(&handle->rule[i]->node.refcount)) {
-			fte->dests_size--;
-			list_del(&handle->rule[i]->node.list);
-			kfree(handle->rule[i]);
-		}
-	}
-	kfree(handle);
-}
-
-static struct mlx5_flow_handle *
-create_flow_handle(struct fs_fte *fte,
-		   struct mlx5_flow_destination *dest,
-		   int dest_num,
-		   int *modify_mask,
-		   bool *new_rule)
-{
-	struct mlx5_flow_handle *handle;
-	struct mlx5_flow_rule *rule = NULL;
-	static int count = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
-	static int dst = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
-	int type;
-	int i = 0;
-
-	handle = alloc_handle((dest_num) ? dest_num : 1);
-	if (!handle)
-		return ERR_PTR(-ENOMEM);
-
-	do {
-		if (dest) {
-			rule = find_flow_rule(fte, dest + i);
-			if (rule) {
-				atomic_inc(&rule->node.refcount);
-				goto rule_found;
-			}
-		}
-
-		*new_rule = true;
-		rule = alloc_rule(dest + i);
-		if (!rule)
-			goto free_rules;
-
-		/* Add dest to dests list- we need flow tables to be in the
-		 * end of the list for forward to next prio rules.
-		 */
-		tree_init_node(&rule->node, 1, del_rule);
-		if (dest &&
-		    dest[i].type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
-			list_add(&rule->node.list, &fte->node.children);
-		else
-			list_add_tail(&rule->node.list, &fte->node.children);
-		if (dest) {
-			fte->dests_size++;
-
-			type = dest[i].type ==
-				MLX5_FLOW_DESTINATION_TYPE_COUNTER;
-			*modify_mask |= type ? count : dst;
-		}
-rule_found:
-		handle->rule[i] = rule;
-	} while (++i < dest_num);
-
-	return handle;
-
-free_rules:
-	destroy_flow_handle(fte, handle, dest, i);
-	return ERR_PTR(-ENOMEM);
-}
-
-/* fte should not be deleted while calling this function */
-static struct mlx5_flow_handle *
-add_rule_fte(struct fs_fte *fte,
-	     struct mlx5_flow_group *fg,
-	     struct mlx5_flow_destination *dest,
-	     int dest_num,
-	     bool update_action)
-{
-	struct mlx5_flow_handle *handle;
-	struct mlx5_flow_table *ft;
-	int modify_mask = 0;
-	int err;
-	bool new_rule = false;
-
-	handle = create_flow_handle(fte, dest, dest_num, &modify_mask,
-				    &new_rule);
-	if (IS_ERR(handle) || !new_rule)
-		goto out;
-
-	if (update_action)
-		modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
-
-	fs_get_obj(ft, fg->node.parent);
-	if (!(fte->status & FS_FTE_STATUS_EXISTING))
-		err = mlx5_cmd_create_fte(get_dev(&ft->node),
-					  ft, fg->id, fte);
-	else
-		err = mlx5_cmd_update_fte(get_dev(&ft->node),
-					  ft, fg->id, modify_mask, fte);
-	if (err)
-		goto free_handle;
-
-	fte->status |= FS_FTE_STATUS_EXISTING;
-
-out:
-	return handle;
-
-free_handle:
-	destroy_flow_handle(fte, handle, dest, handle->num_rules);
-	return ERR_PTR(err);
-}
-
-/* Assumed fg is locked */
-static unsigned int get_free_fte_index(struct mlx5_flow_group *fg,
-				       struct list_head **prev)
-{
-	struct fs_fte *fte;
-	unsigned int start = fg->start_index;
-
-	if (prev)
-		*prev = &fg->node.children;
-
-	/* assumed list is sorted by index */
-	fs_for_each_fte(fte, fg) {
-		if (fte->index != start)
-			return start;
-		start++;
-		if (prev)
-			*prev = &fte->node.list;
-	}
-
-	return start;
-}
-
-/* prev is output, prev->next = new_fte */
-static struct fs_fte *create_fte(struct mlx5_flow_group *fg,
-				 u32 *match_value,
-				 struct mlx5_flow_act *flow_act,
-				 struct list_head **prev)
-{
-	struct fs_fte *fte;
-	int index;
-
-	index = get_free_fte_index(fg, prev);
-	fte = alloc_fte(flow_act, match_value, index);
-	if (IS_ERR(fte))
-		return fte;
-
-	return fte;
-}
-
-static struct mlx5_flow_group *create_autogroup(struct mlx5_flow_table *ft,
-						u8 match_criteria_enable,
-						u32 *match_criteria)
-{
-	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
-	struct list_head *prev = ft->node.children.prev;
-	unsigned int candidate_index = 0;
-	struct mlx5_flow_group *fg;
-	void *match_criteria_addr;
-	unsigned int group_size = 0;
-	u32 *in;
-
-	if (!ft->autogroup.active)
-		return ERR_PTR(-ENOENT);
-
-	in = mlx5_vzalloc(inlen);
-	if (!in)
-		return ERR_PTR(-ENOMEM);
-
-	if (ft->autogroup.num_groups < ft->autogroup.required_groups)
-		/* We save place for flow groups in addition to max types */
-		group_size = ft->max_fte / (ft->autogroup.required_groups + 1);
-
-	/*  ft->max_fte == ft->autogroup.max_types */
-	if (group_size == 0)
-		group_size = 1;
-
-	/* sorted by start_index */
-	fs_for_each_fg(fg, ft) {
-		if (candidate_index + group_size > fg->start_index)
-			candidate_index = fg->start_index + fg->max_ftes;
-		else
-			break;
-		prev = &fg->node.list;
-	}
-
-	if (candidate_index + group_size > ft->max_fte) {
-		fg = ERR_PTR(-ENOSPC);
-		goto out;
-	}
-
-	MLX5_SET(create_flow_group_in, in, match_criteria_enable,
-		 match_criteria_enable);
-	MLX5_SET(create_flow_group_in, in, start_flow_index, candidate_index);
-	MLX5_SET(create_flow_group_in, in, end_flow_index,   candidate_index +
-		 group_size - 1);
-	match_criteria_addr = MLX5_ADDR_OF(create_flow_group_in,
-					   in, match_criteria);
-	memcpy(match_criteria_addr, match_criteria,
-	       MLX5_ST_SZ_BYTES(fte_match_param));
-
-	fg = create_flow_group_common(ft, in, prev, true);
-out:
-	kvfree(in);
-	return fg;
-}
-
-static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
-				struct mlx5_flow_destination *d2)
-{
-	if (d1->type == d2->type) {
-		if ((d1->type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
-		     d1->vport_num == d2->vport_num) ||
-		    (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
-		     d1->ft == d2->ft) ||
-		    (d1->type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
-		     d1->tir_num == d2->tir_num))
-			return true;
-	}
-
-	return false;
-}
-
-static struct mlx5_flow_rule *find_flow_rule(struct fs_fte *fte,
-					     struct mlx5_flow_destination *dest)
-{
-	struct mlx5_flow_rule *rule;
-
-	list_for_each_entry(rule, &fte->node.children, node.list) {
-		if (mlx5_flow_dests_cmp(&rule->dest_attr, dest))
-			return rule;
-	}
-	return NULL;
-}
-
-static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
-					    u32 *match_value,
-					    struct mlx5_flow_act *flow_act,
-					    struct mlx5_flow_destination *dest,
-					    int dest_num)
-{
-	struct mlx5_flow_handle *handle;
-	struct mlx5_flow_table *ft;
-	struct list_head *prev;
-	struct fs_fte *fte;
-	int i;
-
-	nested_lock_ref_node(&fg->node, FS_MUTEX_PARENT);
-	fs_for_each_fte(fte, fg) {
-		nested_lock_ref_node(&fte->node, FS_MUTEX_CHILD);
-		if (compare_match_value(&fg->mask, match_value, &fte->val) &&
-		    (flow_act->action & fte->action) &&
-		    flow_act->flow_tag == fte->flow_tag) {
-			int old_action = fte->action;
-
-			fte->action |= flow_act->action;
-			handle = add_rule_fte(fte, fg, dest, dest_num,
-					      old_action != flow_act->action);
-			if (IS_ERR(handle)) {
-				fte->action = old_action;
-				goto unlock_fte;
-			} else {
-				goto add_rules;
-			}
-		}
-		unlock_ref_node(&fte->node);
-	}
-	fs_get_obj(ft, fg->node.parent);
-	if (fg->num_ftes >= fg->max_ftes) {
-		handle = ERR_PTR(-ENOSPC);
-		goto unlock_fg;
-	}
-
-	fte = create_fte(fg, match_value, flow_act, &prev);
-	if (IS_ERR(fte)) {
-		handle = (void *)fte;
-		goto unlock_fg;
-	}
-	tree_init_node(&fte->node, 0, del_fte);
-	nested_lock_ref_node(&fte->node, FS_MUTEX_CHILD);
-	handle = add_rule_fte(fte, fg, dest, dest_num, false);
-	if (IS_ERR(handle)) {
-		kfree(fte);
-		goto unlock_fg;
-	}
-
-	fg->num_ftes++;
-
-	tree_add_node(&fte->node, &fg->node);
-	list_add(&fte->node.list, prev);
-add_rules:
-	for (i = 0; i < handle->num_rules; i++) {
-		if (atomic_read(&handle->rule[i]->node.refcount) == 1)
-			tree_add_node(&handle->rule[i]->node, &fte->node);
-	}
-unlock_fte:
-	unlock_ref_node(&fte->node);
-unlock_fg:
-	unlock_ref_node(&fg->node);
-	return handle;
-}
-
-struct mlx5_fc *mlx5_flow_rule_counter(struct mlx5_flow_handle *handle)
-{
-	struct mlx5_flow_rule *dst;
-	struct fs_fte *fte;
-
-	fs_get_obj(fte, handle->rule[0]->node.parent);
-
-	fs_for_each_dst(dst, fte) {
-		if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
-			return dst->dest_attr.counter;
-	}
-
-	return NULL;
-}
-
-static bool counter_is_valid(struct mlx5_fc *counter, u32 action)
-{
-	if (!(action & MLX5_FLOW_CONTEXT_ACTION_COUNT))
-		return !counter;
-
-	if (!counter)
-		return false;
-
-	return (action & (MLX5_FLOW_CONTEXT_ACTION_DROP |
-			  MLX5_FLOW_CONTEXT_ACTION_FWD_DEST));
-}
-
-static bool dest_is_valid(struct mlx5_flow_destination *dest,
-			  u32 action,
-			  struct mlx5_flow_table *ft)
-{
-	if (dest && (dest->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER))
-		return counter_is_valid(dest->counter, action);
-
-	if (!(action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
-		return true;
-
-	if (!dest || ((dest->type ==
-	    MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) &&
-	    (dest->ft->level <= ft->level)))
-		return false;
-	return true;
-}
-
-static struct mlx5_flow_handle *
-_mlx5_add_flow_rules(struct mlx5_flow_table *ft,
-		     struct mlx5_flow_spec *spec,
-		     struct mlx5_flow_act *flow_act,
-		     struct mlx5_flow_destination *dest,
-		     int dest_num)
-
-{
-	struct mlx5_flow_group *g;
-	struct mlx5_flow_handle *rule;
-	int i;
-
-	for (i = 0; i < dest_num; i++) {
-		if (!dest_is_valid(&dest[i], flow_act->action, ft))
-			return ERR_PTR(-EINVAL);
-	}
-
-	nested_lock_ref_node(&ft->node, FS_MUTEX_GRANDPARENT);
-	fs_for_each_fg(g, ft)
-		if (compare_match_criteria(g->mask.match_criteria_enable,
-					   spec->match_criteria_enable,
-					   g->mask.match_criteria,
-					   spec->match_criteria)) {
-			rule = add_rule_fg(g, spec->match_value,
-					   flow_act, dest, dest_num);
-			if (!IS_ERR(rule) || PTR_ERR(rule) != -ENOSPC)
-				goto unlock;
-		}
-
-	g = create_autogroup(ft, spec->match_criteria_enable,
-			     spec->match_criteria);
-	if (IS_ERR(g)) {
-		rule = (void *)g;
-		goto unlock;
-	}
-
-	rule = add_rule_fg(g, spec->match_value, flow_act, dest, dest_num);
-	if (IS_ERR(rule)) {
-		/* Remove assumes refcount > 0 and autogroup creates a group
-		 * with a refcount = 0.
-		 */
-		unlock_ref_node(&ft->node);
-		tree_get_node(&g->node);
-		tree_remove_node(&g->node);
-		return rule;
-	}
-unlock:
-	unlock_ref_node(&ft->node);
-	return rule;
-}
-
-static bool fwd_next_prio_supported(struct mlx5_flow_table *ft)
-{
-	return ((ft->type == FS_FT_NIC_RX) &&
-		(MLX5_CAP_FLOWTABLE(get_dev(&ft->node), nic_rx_multi_path_tirs)));
-}
-
-struct mlx5_flow_handle *
-mlx5_add_flow_rules(struct mlx5_flow_table *ft,
-		    struct mlx5_flow_spec *spec,
-		    struct mlx5_flow_act *flow_act,
-		    struct mlx5_flow_destination *dest,
-		    int dest_num)
-{
-	struct mlx5_flow_root_namespace *root = find_root(&ft->node);
-	struct mlx5_flow_destination gen_dest;
-	struct mlx5_flow_table *next_ft = NULL;
-	struct mlx5_flow_handle *handle = NULL;
-	u32 sw_action = flow_act->action;
-	struct fs_prio *prio;
-
-	fs_get_obj(prio, ft->node.parent);
-	if (flow_act->action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
-		if (!fwd_next_prio_supported(ft))
-			return ERR_PTR(-EOPNOTSUPP);
-		if (dest)
-			return ERR_PTR(-EINVAL);
-		mutex_lock(&root->chain_lock);
-		next_ft = find_next_chained_ft(prio);
-		if (next_ft) {
-			gen_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
-			gen_dest.ft = next_ft;
-			dest = &gen_dest;
-			dest_num = 1;
-			flow_act->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
-		} else {
-			mutex_unlock(&root->chain_lock);
-			return ERR_PTR(-EOPNOTSUPP);
-		}
-	}
-
-	handle = _mlx5_add_flow_rules(ft, spec, flow_act, dest, dest_num);
-
-	if (sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
-		if (!IS_ERR_OR_NULL(handle) &&
-		    (list_empty(&handle->rule[0]->next_ft))) {
-			mutex_lock(&next_ft->lock);
-			list_add(&handle->rule[0]->next_ft,
-				 &next_ft->fwd_rules);
-			mutex_unlock(&next_ft->lock);
-			handle->rule[0]->sw_action = MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
-		}
-		mutex_unlock(&root->chain_lock);
-	}
-	return handle;
-}
-EXPORT_SYMBOL(mlx5_add_flow_rules);
-
-void mlx5_del_flow_rules(struct mlx5_flow_handle *handle)
-{
-	int i;
-
-	for (i = handle->num_rules - 1; i >= 0; i--)
-		tree_remove_node(&handle->rule[i]->node);
-	kfree(handle);
-}
-EXPORT_SYMBOL(mlx5_del_flow_rules);
-
-/* Assuming prio->node.children(flow tables) is sorted by level */
-static struct mlx5_flow_table *find_next_ft(struct mlx5_flow_table *ft)
-{
-	struct fs_prio *prio;
-
-	fs_get_obj(prio, ft->node.parent);
-
-	if (!list_is_last(&ft->node.list, &prio->node.children))
-		return list_next_entry(ft, node.list);
-	return find_next_chained_ft(prio);
-}
-
-static int update_root_ft_destroy(struct mlx5_flow_table *ft)
-{
-	struct mlx5_flow_root_namespace *root = find_root(&ft->node);
-	struct mlx5_flow_table *new_root_ft = NULL;
-
-	if (root->root_ft != ft)
-		return 0;
-
-	new_root_ft = find_next_ft(ft);
-	if (new_root_ft) {
-		int err = mlx5_cmd_update_root_ft(root->dev, new_root_ft);
-
-		if (err) {
-			mlx5_core_warn(root->dev, "Update root flow table of id=%u failed\n",
-				       ft->id);
-			return err;
-		}
-	}
-	root->root_ft = new_root_ft;
-	return 0;
-}
-
-/* Connect flow table from previous priority to
- * the next flow table.
- */
-static int disconnect_flow_table(struct mlx5_flow_table *ft)
-{
-	struct mlx5_core_dev *dev = get_dev(&ft->node);
-	struct mlx5_flow_table *next_ft;
-	struct fs_prio *prio;
-	int err = 0;
-
-	err = update_root_ft_destroy(ft);
-	if (err)
-		return err;
-
-	fs_get_obj(prio, ft->node.parent);
-	if  (!(list_first_entry(&prio->node.children,
-				struct mlx5_flow_table,
-				node.list) == ft))
-		return 0;
-
-	next_ft = find_next_chained_ft(prio);
-	err = connect_fwd_rules(dev, next_ft, ft);
-	if (err)
-		return err;
-
-	err = connect_prev_fts(dev, next_ft, prio);
-	if (err)
-		mlx5_core_warn(dev, "Failed to disconnect flow table %d\n",
-			       ft->id);
-	return err;
-}
-
-int mlx5_destroy_flow_table(struct mlx5_flow_table *ft)
-{
-	struct mlx5_flow_root_namespace *root = find_root(&ft->node);
-	int err = 0;
-
-	mutex_lock(&root->chain_lock);
-	err = disconnect_flow_table(ft);
-	if (err) {
-		mutex_unlock(&root->chain_lock);
-		return err;
-	}
-	if (tree_remove_node(&ft->node))
-		mlx5_core_warn(get_dev(&ft->node), "Flow table %d wasn't destroyed, refcount > 1\n",
-			       ft->id);
-	mutex_unlock(&root->chain_lock);
-
-	return err;
-}
-EXPORT_SYMBOL(mlx5_destroy_flow_table);
-
-void mlx5_destroy_flow_group(struct mlx5_flow_group *fg)
-{
-	if (tree_remove_node(&fg->node))
-		mlx5_core_warn(get_dev(&fg->node), "Flow group %d wasn't destroyed, refcount > 1\n",
-			       fg->id);
-}
-
-struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
-						    enum mlx5_flow_namespace_type type)
-{
-	struct mlx5_flow_steering *steering = dev->priv.steering;
-	struct mlx5_flow_root_namespace *root_ns;
-	int prio;
-	struct fs_prio *fs_prio;
-	struct mlx5_flow_namespace *ns;
-
-	if (!steering)
-		return NULL;
-
-	switch (type) {
-	case MLX5_FLOW_NAMESPACE_BYPASS:
-	case MLX5_FLOW_NAMESPACE_LAG:
-	case MLX5_FLOW_NAMESPACE_OFFLOADS:
-	case MLX5_FLOW_NAMESPACE_ETHTOOL:
-	case MLX5_FLOW_NAMESPACE_KERNEL:
-	case MLX5_FLOW_NAMESPACE_LEFTOVERS:
-	case MLX5_FLOW_NAMESPACE_ANCHOR:
-		prio = type;
-		break;
-	case MLX5_FLOW_NAMESPACE_FDB:
-		if (steering->fdb_root_ns)
-			return &steering->fdb_root_ns->ns;
-		else
-			return NULL;
-	case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
-		if (steering->esw_egress_root_ns)
-			return &steering->esw_egress_root_ns->ns;
-		else
-			return NULL;
-	case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
-		if (steering->esw_ingress_root_ns)
-			return &steering->esw_ingress_root_ns->ns;
-		else
-			return NULL;
-	case MLX5_FLOW_NAMESPACE_SNIFFER_RX:
-		if (steering->sniffer_rx_root_ns)
-			return &steering->sniffer_rx_root_ns->ns;
-		else
-			return NULL;
-	case MLX5_FLOW_NAMESPACE_SNIFFER_TX:
-		if (steering->sniffer_tx_root_ns)
-			return &steering->sniffer_tx_root_ns->ns;
-		else
-			return NULL;
-	default:
-		return NULL;
-	}
-
-	root_ns = steering->root_ns;
-	if (!root_ns)
-		return NULL;
-
-	fs_prio = find_prio(&root_ns->ns, prio);
-	if (!fs_prio)
-		return NULL;
-
-	ns = list_first_entry(&fs_prio->node.children,
-			      typeof(*ns),
-			      node.list);
-
-	return ns;
-}
-EXPORT_SYMBOL(mlx5_get_flow_namespace);
-
-static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
-				      unsigned int prio, int num_levels)
-{
-	struct fs_prio *fs_prio;
-
-	fs_prio = kzalloc(sizeof(*fs_prio), GFP_KERNEL);
-	if (!fs_prio)
-		return ERR_PTR(-ENOMEM);
-
-	fs_prio->node.type = FS_TYPE_PRIO;
-	tree_init_node(&fs_prio->node, 1, NULL);
-	tree_add_node(&fs_prio->node, &ns->node);
-	fs_prio->num_levels = num_levels;
-	fs_prio->prio = prio;
-	list_add_tail(&fs_prio->node.list, &ns->node.children);
-
-	return fs_prio;
-}
-
-static struct mlx5_flow_namespace *fs_init_namespace(struct mlx5_flow_namespace
-						     *ns)
-{
-	ns->node.type = FS_TYPE_NAMESPACE;
-
-	return ns;
-}
-
-static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio)
-{
-	struct mlx5_flow_namespace	*ns;
-
-	ns = kzalloc(sizeof(*ns), GFP_KERNEL);
-	if (!ns)
-		return ERR_PTR(-ENOMEM);
-
-	fs_init_namespace(ns);
-	tree_init_node(&ns->node, 1, NULL);
-	tree_add_node(&ns->node, &prio->node);
-	list_add_tail(&ns->node.list, &prio->node.children);
-
-	return ns;
-}
-
-static int create_leaf_prios(struct mlx5_flow_namespace *ns, int prio,
-			     struct init_tree_node *prio_metadata)
-{
-	struct fs_prio *fs_prio;
-	int i;
-
-	for (i = 0; i < prio_metadata->num_leaf_prios; i++) {
-		fs_prio = fs_create_prio(ns, prio++, prio_metadata->num_levels);
-		if (IS_ERR(fs_prio))
-			return PTR_ERR(fs_prio);
-	}
-	return 0;
-}
-
-#define FLOW_TABLE_BIT_SZ 1
-#define GET_FLOW_TABLE_CAP(dev, offset) \
-	((be32_to_cpu(*((__be32 *)(dev->hca_caps_cur[MLX5_CAP_FLOW_TABLE]) +	\
-			offset / 32)) >>					\
-	  (32 - FLOW_TABLE_BIT_SZ - (offset & 0x1f))) & FLOW_TABLE_BIT_SZ)
-static bool has_required_caps(struct mlx5_core_dev *dev, struct node_caps *caps)
-{
-	int i;
-
-	for (i = 0; i < caps->arr_sz; i++) {
-		if (!GET_FLOW_TABLE_CAP(dev, caps->caps[i]))
-			return false;
-	}
-	return true;
-}
-
-static int init_root_tree_recursive(struct mlx5_flow_steering *steering,
-				    struct init_tree_node *init_node,
-				    struct fs_node *fs_parent_node,
-				    struct init_tree_node *init_parent_node,
-				    int prio)
-{
-	int max_ft_level = MLX5_CAP_FLOWTABLE(steering->dev,
-					      flow_table_properties_nic_receive.
-					      max_ft_level);
-	struct mlx5_flow_namespace *fs_ns;
-	struct fs_prio *fs_prio;
-	struct fs_node *base;
-	int i;
-	int err;
-
-	if (init_node->type == FS_TYPE_PRIO) {
-		if ((init_node->min_ft_level > max_ft_level) ||
-		    !has_required_caps(steering->dev, &init_node->caps))
-			return 0;
-
-		fs_get_obj(fs_ns, fs_parent_node);
-		if (init_node->num_leaf_prios)
-			return create_leaf_prios(fs_ns, prio, init_node);
-		fs_prio = fs_create_prio(fs_ns, prio, init_node->num_levels);
-		if (IS_ERR(fs_prio))
-			return PTR_ERR(fs_prio);
-		base = &fs_prio->node;
-	} else if (init_node->type == FS_TYPE_NAMESPACE) {
-		fs_get_obj(fs_prio, fs_parent_node);
-		fs_ns = fs_create_namespace(fs_prio);
-		if (IS_ERR(fs_ns))
-			return PTR_ERR(fs_ns);
-		base = &fs_ns->node;
-	} else {
-		return -EINVAL;
-	}
-	prio = 0;
-	for (i = 0; i < init_node->ar_size; i++) {
-		err = init_root_tree_recursive(steering, &init_node->children[i],
-					       base, init_node, prio);
-		if (err)
-			return err;
-		if (init_node->children[i].type == FS_TYPE_PRIO &&
-		    init_node->children[i].num_leaf_prios) {
-			prio += init_node->children[i].num_leaf_prios;
-		}
-	}
-
-	return 0;
-}
-
-static int init_root_tree(struct mlx5_flow_steering *steering,
-			  struct init_tree_node *init_node,
-			  struct fs_node *fs_parent_node)
-{
-	int i;
-	struct mlx5_flow_namespace *fs_ns;
-	int err;
-
-	fs_get_obj(fs_ns, fs_parent_node);
-	for (i = 0; i < init_node->ar_size; i++) {
-		err = init_root_tree_recursive(steering, &init_node->children[i],
-					       &fs_ns->node,
-					       init_node, i);
-		if (err)
-			return err;
-	}
-	return 0;
-}
-
-static struct mlx5_flow_root_namespace *create_root_ns(struct mlx5_flow_steering *steering,
-						       enum fs_flow_table_type
-						       table_type)
-{
-	struct mlx5_flow_root_namespace *root_ns;
-	struct mlx5_flow_namespace *ns;
-
-	/* Create the root namespace */
-	root_ns = mlx5_vzalloc(sizeof(*root_ns));
-	if (!root_ns)
-		return NULL;
-
-	root_ns->dev = steering->dev;
-	root_ns->table_type = table_type;
-
-	ns = &root_ns->ns;
-	fs_init_namespace(ns);
-	mutex_init(&root_ns->chain_lock);
-	tree_init_node(&ns->node, 1, NULL);
-	tree_add_node(&ns->node, NULL);
-
-	return root_ns;
-}
-
-static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level);
-
-static int set_prio_attrs_in_ns(struct mlx5_flow_namespace *ns, int acc_level)
-{
-	struct fs_prio *prio;
-
-	fs_for_each_prio(prio, ns) {
-		 /* This updates prio start_level and num_levels */
-		set_prio_attrs_in_prio(prio, acc_level);
-		acc_level += prio->num_levels;
-	}
-	return acc_level;
-}
-
-static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level)
-{
-	struct mlx5_flow_namespace *ns;
-	int acc_level_ns = acc_level;
-
-	prio->start_level = acc_level;
-	fs_for_each_ns(ns, prio)
-		/* This updates start_level and num_levels of ns's priority descendants */
-		acc_level_ns = set_prio_attrs_in_ns(ns, acc_level);
-	if (!prio->num_levels)
-		prio->num_levels = acc_level_ns - prio->start_level;
-	WARN_ON(prio->num_levels < acc_level_ns - prio->start_level);
-}
-
-static void set_prio_attrs(struct mlx5_flow_root_namespace *root_ns)
-{
-	struct mlx5_flow_namespace *ns = &root_ns->ns;
-	struct fs_prio *prio;
-	int start_level = 0;
-
-	fs_for_each_prio(prio, ns) {
-		set_prio_attrs_in_prio(prio, start_level);
-		start_level += prio->num_levels;
-	}
-}
-
-#define ANCHOR_PRIO 0
-#define ANCHOR_SIZE 1
-#define ANCHOR_LEVEL 0
-static int create_anchor_flow_table(struct mlx5_flow_steering *steering)
-{
-	struct mlx5_flow_namespace *ns = NULL;
-	struct mlx5_flow_table *ft;
-
-	ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR);
-	if (!ns)
-		return -EINVAL;
-	ft = mlx5_create_flow_table(ns, ANCHOR_PRIO, ANCHOR_SIZE, ANCHOR_LEVEL, 0);
-	if (IS_ERR(ft)) {
-		mlx5_core_err(steering->dev, "Failed to create last anchor flow table");
-		return PTR_ERR(ft);
-	}
-	return 0;
-}
-
-static int init_root_ns(struct mlx5_flow_steering *steering)
-{
-
-	steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX);
-	if (!steering->root_ns)
-		goto cleanup;
-
-	if (init_root_tree(steering, &root_fs, &steering->root_ns->ns.node))
-		goto cleanup;
-
-	set_prio_attrs(steering->root_ns);
-
-	if (create_anchor_flow_table(steering))
-		goto cleanup;
-
-	return 0;
-
-cleanup:
-	mlx5_cleanup_fs(steering->dev);
-	return -ENOMEM;
-}
-
-static void clean_tree(struct fs_node *node)
-{
-	if (node) {
-		struct fs_node *iter;
-		struct fs_node *temp;
-
-		list_for_each_entry_safe(iter, temp, &node->children, list)
-			clean_tree(iter);
-		tree_remove_node(node);
-	}
-}
-
-static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns)
-{
-	if (!root_ns)
-		return;
-
-	clean_tree(&root_ns->ns.node);
-}
-
-void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
-{
-	struct mlx5_flow_steering *steering = dev->priv.steering;
-
-	if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
-		return;
-
-	cleanup_root_ns(steering->root_ns);
-	cleanup_root_ns(steering->esw_egress_root_ns);
-	cleanup_root_ns(steering->esw_ingress_root_ns);
-	cleanup_root_ns(steering->fdb_root_ns);
-	cleanup_root_ns(steering->sniffer_rx_root_ns);
-	cleanup_root_ns(steering->sniffer_tx_root_ns);
-	mlx5_cleanup_fc_stats(dev);
-	kfree(steering);
-}
-
-static int init_sniffer_tx_root_ns(struct mlx5_flow_steering *steering)
-{
-	struct fs_prio *prio;
-
-	steering->sniffer_tx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_TX);
-	if (!steering->sniffer_tx_root_ns)
-		return -ENOMEM;
-
-	/* Create single prio */
-	prio = fs_create_prio(&steering->sniffer_tx_root_ns->ns, 0, 1);
-	if (IS_ERR(prio)) {
-		cleanup_root_ns(steering->sniffer_tx_root_ns);
-		return PTR_ERR(prio);
-	}
-	return 0;
-}
-
-static int init_sniffer_rx_root_ns(struct mlx5_flow_steering *steering)
-{
-	struct fs_prio *prio;
-
-	steering->sniffer_rx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_RX);
-	if (!steering->sniffer_rx_root_ns)
-		return -ENOMEM;
-
-	/* Create single prio */
-	prio = fs_create_prio(&steering->sniffer_rx_root_ns->ns, 0, 1);
-	if (IS_ERR(prio)) {
-		cleanup_root_ns(steering->sniffer_rx_root_ns);
-		return PTR_ERR(prio);
-	}
-	return 0;
-}
-
-static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
-{
-	struct fs_prio *prio;
-
-	steering->fdb_root_ns = create_root_ns(steering, FS_FT_FDB);
-	if (!steering->fdb_root_ns)
-		return -ENOMEM;
-
-	prio = fs_create_prio(&steering->fdb_root_ns->ns, 0, 1);
-	if (IS_ERR(prio))
-		goto out_err;
-
-	prio = fs_create_prio(&steering->fdb_root_ns->ns, 1, 1);
-	if (IS_ERR(prio))
-		goto out_err;
-
-	set_prio_attrs(steering->fdb_root_ns);
-	return 0;
-
-out_err:
-	cleanup_root_ns(steering->fdb_root_ns);
-	steering->fdb_root_ns = NULL;
-	return PTR_ERR(prio);
-}
-
-static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering)
-{
-	struct fs_prio *prio;
-
-	steering->esw_egress_root_ns = create_root_ns(steering, FS_FT_ESW_EGRESS_ACL);
-	if (!steering->esw_egress_root_ns)
-		return -ENOMEM;
-
-	/* create 1 prio*/
-	prio = fs_create_prio(&steering->esw_egress_root_ns->ns, 0,
-			      MLX5_TOTAL_VPORTS(steering->dev));
-	return PTR_ERR_OR_ZERO(prio);
-}
-
-static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering)
-{
-	struct fs_prio *prio;
-
-	steering->esw_ingress_root_ns = create_root_ns(steering, FS_FT_ESW_INGRESS_ACL);
-	if (!steering->esw_ingress_root_ns)
-		return -ENOMEM;
-
-	/* create 1 prio*/
-	prio = fs_create_prio(&steering->esw_ingress_root_ns->ns, 0,
-			      MLX5_TOTAL_VPORTS(steering->dev));
-	return PTR_ERR_OR_ZERO(prio);
-}
-
-int mlx5_init_fs(struct mlx5_core_dev *dev)
-{
-	struct mlx5_flow_steering *steering;
-	int err = 0;
-
-	if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
-		return 0;
-
-	err = mlx5_init_fc_stats(dev);
-	if (err)
-		return err;
-
-	steering = kzalloc(sizeof(*steering), GFP_KERNEL);
-	if (!steering)
-		return -ENOMEM;
-	steering->dev = dev;
-	dev->priv.steering = steering;
-
-	if (MLX5_CAP_GEN(dev, nic_flow_table) &&
-	    MLX5_CAP_FLOWTABLE_NIC_RX(dev, ft_support)) {
-		err = init_root_ns(steering);
-		if (err)
-			goto err;
-	}
-
-	if (MLX5_CAP_GEN(dev, eswitch_flow_table)) {
-		if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) {
-			err = init_fdb_root_ns(steering);
-			if (err)
-				goto err;
-		}
-		if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
-			err = init_egress_acl_root_ns(steering);
-			if (err)
-				goto err;
-		}
-		if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
-			err = init_ingress_acl_root_ns(steering);
-			if (err)
-				goto err;
-		}
-	}
-
-	if (MLX5_CAP_FLOWTABLE_SNIFFER_RX(dev, ft_support)) {
-		err = init_sniffer_rx_root_ns(steering);
-		if (err)
-			goto err;
-	}
-
-	if (MLX5_CAP_FLOWTABLE_SNIFFER_TX(dev, ft_support)) {
-		err = init_sniffer_tx_root_ns(steering);
-		if (err)
-			goto err;
-	}
-
-	return 0;
-err:
-	mlx5_cleanup_fs(dev);
-	return err;
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
deleted file mode 100644
index 8e668c6..0000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ /dev/null
@@ -1,239 +0,0 @@
-/*
- * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef _MLX5_FS_CORE_
-#define _MLX5_FS_CORE_
-
-#include <linux/mlx5/fs.h>
-
-enum fs_node_type {
-	FS_TYPE_NAMESPACE,
-	FS_TYPE_PRIO,
-	FS_TYPE_FLOW_TABLE,
-	FS_TYPE_FLOW_GROUP,
-	FS_TYPE_FLOW_ENTRY,
-	FS_TYPE_FLOW_DEST
-};
-
-enum fs_flow_table_type {
-	FS_FT_NIC_RX          = 0x0,
-	FS_FT_ESW_EGRESS_ACL  = 0x2,
-	FS_FT_ESW_INGRESS_ACL = 0x3,
-	FS_FT_FDB             = 0X4,
-	FS_FT_SNIFFER_RX	= 0X5,
-	FS_FT_SNIFFER_TX	= 0X6,
-};
-
-enum fs_flow_table_op_mod {
-	FS_FT_OP_MOD_NORMAL,
-	FS_FT_OP_MOD_LAG_DEMUX,
-};
-
-enum fs_fte_status {
-	FS_FTE_STATUS_EXISTING = 1UL << 0,
-};
-
-struct mlx5_flow_steering {
-	struct mlx5_core_dev *dev;
-	struct mlx5_flow_root_namespace *root_ns;
-	struct mlx5_flow_root_namespace *fdb_root_ns;
-	struct mlx5_flow_root_namespace *esw_egress_root_ns;
-	struct mlx5_flow_root_namespace *esw_ingress_root_ns;
-	struct mlx5_flow_root_namespace	*sniffer_tx_root_ns;
-	struct mlx5_flow_root_namespace	*sniffer_rx_root_ns;
-};
-
-struct fs_node {
-	struct list_head	list;
-	struct list_head	children;
-	enum fs_node_type	type;
-	struct fs_node		*parent;
-	struct fs_node		*root;
-	/* lock the node for writing and traversing */
-	struct mutex		lock;
-	atomic_t		refcount;
-	void			(*remove_func)(struct fs_node *);
-};
-
-struct mlx5_flow_rule {
-	struct fs_node				node;
-	struct mlx5_flow_destination		dest_attr;
-	/* next_ft should be accessed under chain_lock and only of
-	 * destination type is FWD_NEXT_fT.
-	 */
-	struct list_head			next_ft;
-	u32					sw_action;
-};
-
-struct mlx5_flow_handle {
-	int num_rules;
-	struct mlx5_flow_rule *rule[];
-};
-
-/* Type of children is mlx5_flow_group */
-struct mlx5_flow_table {
-	struct fs_node			node;
-	u32				id;
-	u16				vport;
-	unsigned int			max_fte;
-	unsigned int			level;
-	enum fs_flow_table_type		type;
-	enum fs_flow_table_op_mod	op_mod;
-	struct {
-		bool			active;
-		unsigned int		required_groups;
-		unsigned int		num_groups;
-	} autogroup;
-	/* Protect fwd_rules */
-	struct mutex			lock;
-	/* FWD rules that point on this flow table */
-	struct list_head		fwd_rules;
-	u32				flags;
-};
-
-struct mlx5_fc_cache {
-	u64 packets;
-	u64 bytes;
-	u64 lastuse;
-};
-
-struct mlx5_fc {
-	struct rb_node node;
-	struct list_head list;
-
-	/* last{packets,bytes} members are used when calculating the delta since
-	 * last reading
-	 */
-	u64 lastpackets;
-	u64 lastbytes;
-
-	u16 id;
-	bool deleted;
-	bool aging;
-
-	struct mlx5_fc_cache cache ____cacheline_aligned_in_smp;
-};
-
-/* Type of children is mlx5_flow_rule */
-struct fs_fte {
-	struct fs_node			node;
-	u32				val[MLX5_ST_SZ_DW(fte_match_param)];
-	u32				dests_size;
-	u32				flow_tag;
-	u32				index;
-	u32				action;
-	u32				encap_id;
-	enum fs_fte_status		status;
-	struct mlx5_fc			*counter;
-};
-
-/* Type of children is mlx5_flow_table/namespace */
-struct fs_prio {
-	struct fs_node			node;
-	unsigned int			num_levels;
-	unsigned int			start_level;
-	unsigned int			prio;
-	unsigned int			num_ft;
-};
-
-/* Type of children is fs_prio */
-struct mlx5_flow_namespace {
-	/* parent == NULL => root ns */
-	struct	fs_node			node;
-};
-
-struct mlx5_flow_group_mask {
-	u8	match_criteria_enable;
-	u32	match_criteria[MLX5_ST_SZ_DW(fte_match_param)];
-};
-
-/* Type of children is fs_fte */
-struct mlx5_flow_group {
-	struct fs_node			node;
-	struct mlx5_flow_group_mask	mask;
-	u32				start_index;
-	u32				max_ftes;
-	u32				num_ftes;
-	u32				id;
-};
-
-struct mlx5_flow_root_namespace {
-	struct mlx5_flow_namespace	ns;
-	enum   fs_flow_table_type	table_type;
-	struct mlx5_core_dev		*dev;
-	struct mlx5_flow_table		*root_ft;
-	/* Should be held when chaining flow tables */
-	struct mutex			chain_lock;
-};
-
-int mlx5_init_fc_stats(struct mlx5_core_dev *dev);
-void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev);
-
-int mlx5_init_fs(struct mlx5_core_dev *dev);
-void mlx5_cleanup_fs(struct mlx5_core_dev *dev);
-
-#define fs_get_obj(v, _node)  {v = container_of((_node), typeof(*v), node); }
-
-#define fs_list_for_each_entry(pos, root)		\
-	list_for_each_entry(pos, root, node.list)
-
-#define fs_list_for_each_entry_safe(pos, tmp, root)		\
-	list_for_each_entry_safe(pos, tmp, root, node.list)
-
-#define fs_for_each_ns_or_ft_reverse(pos, prio)				\
-	list_for_each_entry_reverse(pos, &(prio)->node.children, list)
-
-#define fs_for_each_ns_or_ft(pos, prio)					\
-	list_for_each_entry(pos, (&(prio)->node.children), list)
-
-#define fs_for_each_prio(pos, ns)			\
-	fs_list_for_each_entry(pos, &(ns)->node.children)
-
-#define fs_for_each_ns(pos, prio)			\
-	fs_list_for_each_entry(pos, &(prio)->node.children)
-
-#define fs_for_each_ft(pos, prio)			\
-	fs_list_for_each_entry(pos, &(prio)->node.children)
-
-#define fs_for_each_ft_safe(pos, tmp, prio)			\
-	fs_list_for_each_entry_safe(pos, tmp, &(prio)->node.children)
-
-#define fs_for_each_fg(pos, ft)			\
-	fs_list_for_each_entry(pos, &(ft)->node.children)
-
-#define fs_for_each_fte(pos, fg)			\
-	fs_list_for_each_entry(pos, &(fg)->node.children)
-
-#define fs_for_each_dst(pos, fte)			\
-	fs_list_for_each_entry(pos, &(fte)->node.children)
-
-#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
deleted file mode 100644
index 7431f63..0000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ /dev/null
@@ -1,319 +0,0 @@
-/*
- * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/mlx5/driver.h>
-#include <linux/mlx5/fs.h>
-#include <linux/rbtree.h>
-#include "mlx5_core.h"
-#include "fs_core.h"
-#include "fs_cmd.h"
-
-#define MLX5_FC_STATS_PERIOD msecs_to_jiffies(1000)
-
-/* locking scheme:
- *
- * It is the responsibility of the user to prevent concurrent calls or bad
- * ordering to mlx5_fc_create(), mlx5_fc_destroy() and accessing a reference
- * to struct mlx5_fc.
- * e.g en_tc.c is protected by RTNL lock of its caller, and will never call a
- * dump (access to struct mlx5_fc) after a counter is destroyed.
- *
- * access to counter list:
- * - create (user context)
- *   - mlx5_fc_create() only adds to an addlist to be used by
- *     mlx5_fc_stats_query_work(). addlist is protected by a spinlock.
- *   - spawn thread to do the actual destroy
- *
- * - destroy (user context)
- *   - mark a counter as deleted
- *   - spawn thread to do the actual del
- *
- * - dump (user context)
- *   user should not call dump after destroy
- *
- * - query (single thread workqueue context)
- *   destroy/dump - no conflict (see destroy)
- *   query/dump - packets and bytes might be inconsistent (since update is not
- *                atomic)
- *   query/create - no conflict (see create)
- *   since every create/destroy spawn the work, only after necessary time has
- *   elapsed, the thread will actually query the hardware.
- */
-
-static void mlx5_fc_stats_insert(struct rb_root *root, struct mlx5_fc *counter)
-{
-	struct rb_node **new = &root->rb_node;
-	struct rb_node *parent = NULL;
-
-	while (*new) {
-		struct mlx5_fc *this = rb_entry(*new, struct mlx5_fc, node);
-		int result = counter->id - this->id;
-
-		parent = *new;
-		if (result < 0)
-			new = &((*new)->rb_left);
-		else
-			new = &((*new)->rb_right);
-	}
-
-	/* Add new node and rebalance tree. */
-	rb_link_node(&counter->node, parent, new);
-	rb_insert_color(&counter->node, root);
-}
-
-static struct rb_node *mlx5_fc_stats_query(struct mlx5_core_dev *dev,
-					   struct mlx5_fc *first,
-					   u16 last_id)
-{
-	struct mlx5_cmd_fc_bulk *b;
-	struct rb_node *node = NULL;
-	u16 afirst_id;
-	int num;
-	int err;
-	int max_bulk = 1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk);
-
-	/* first id must be aligned to 4 when using bulk query */
-	afirst_id = first->id & ~0x3;
-
-	/* number of counters to query inc. the last counter */
-	num = ALIGN(last_id - afirst_id + 1, 4);
-	if (num > max_bulk) {
-		num = max_bulk;
-		last_id = afirst_id + num - 1;
-	}
-
-	b = mlx5_cmd_fc_bulk_alloc(dev, afirst_id, num);
-	if (!b) {
-		mlx5_core_err(dev, "Error allocating resources for bulk query\n");
-		return NULL;
-	}
-
-	err = mlx5_cmd_fc_bulk_query(dev, b);
-	if (err) {
-		mlx5_core_err(dev, "Error doing bulk query: %d\n", err);
-		goto out;
-	}
-
-	for (node = &first->node; node; node = rb_next(node)) {
-		struct mlx5_fc *counter = rb_entry(node, struct mlx5_fc, node);
-		struct mlx5_fc_cache *c = &counter->cache;
-		u64 packets;
-		u64 bytes;
-
-		if (counter->id > last_id)
-			break;
-
-		mlx5_cmd_fc_bulk_get(dev, b,
-				     counter->id, &packets, &bytes);
-
-		if (c->packets == packets)
-			continue;
-
-		c->packets = packets;
-		c->bytes = bytes;
-		c->lastuse = jiffies;
-	}
-
-out:
-	mlx5_cmd_fc_bulk_free(b);
-
-	return node;
-}
-
-static void mlx5_fc_stats_work(struct work_struct *work)
-{
-	struct mlx5_core_dev *dev = container_of(work, struct mlx5_core_dev,
-						 priv.fc_stats.work.work);
-	struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
-	unsigned long now = jiffies;
-	struct mlx5_fc *counter = NULL;
-	struct mlx5_fc *last = NULL;
-	struct rb_node *node;
-	LIST_HEAD(tmplist);
-
-	spin_lock(&fc_stats->addlist_lock);
-
-	list_splice_tail_init(&fc_stats->addlist, &tmplist);
-
-	if (!list_empty(&tmplist) || !RB_EMPTY_ROOT(&fc_stats->counters))
-		queue_delayed_work(fc_stats->wq, &fc_stats->work, MLX5_FC_STATS_PERIOD);
-
-	spin_unlock(&fc_stats->addlist_lock);
-
-	list_for_each_entry(counter, &tmplist, list)
-		mlx5_fc_stats_insert(&fc_stats->counters, counter);
-
-	node = rb_first(&fc_stats->counters);
-	while (node) {
-		counter = rb_entry(node, struct mlx5_fc, node);
-
-		node = rb_next(node);
-
-		if (counter->deleted) {
-			rb_erase(&counter->node, &fc_stats->counters);
-
-			mlx5_cmd_fc_free(dev, counter->id);
-
-			kfree(counter);
-			continue;
-		}
-
-		last = counter;
-	}
-
-	if (time_before(now, fc_stats->next_query) || !last)
-		return;
-
-	node = rb_first(&fc_stats->counters);
-	while (node) {
-		counter = rb_entry(node, struct mlx5_fc, node);
-
-		node = mlx5_fc_stats_query(dev, counter, last->id);
-	}
-
-	fc_stats->next_query = now + MLX5_FC_STATS_PERIOD;
-}
-
-struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging)
-{
-	struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
-	struct mlx5_fc *counter;
-	int err;
-
-	counter = kzalloc(sizeof(*counter), GFP_KERNEL);
-	if (!counter)
-		return ERR_PTR(-ENOMEM);
-
-	err = mlx5_cmd_fc_alloc(dev, &counter->id);
-	if (err)
-		goto err_out;
-
-	if (aging) {
-		counter->cache.lastuse = jiffies;
-		counter->aging = true;
-
-		spin_lock(&fc_stats->addlist_lock);
-		list_add(&counter->list, &fc_stats->addlist);
-		spin_unlock(&fc_stats->addlist_lock);
-
-		mod_delayed_work(fc_stats->wq, &fc_stats->work, 0);
-	}
-
-	return counter;
-
-err_out:
-	kfree(counter);
-
-	return ERR_PTR(err);
-}
-
-void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
-{
-	struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
-
-	if (!counter)
-		return;
-
-	if (counter->aging) {
-		counter->deleted = true;
-		mod_delayed_work(fc_stats->wq, &fc_stats->work, 0);
-		return;
-	}
-
-	mlx5_cmd_fc_free(dev, counter->id);
-	kfree(counter);
-}
-
-int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
-{
-	struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
-
-	fc_stats->counters = RB_ROOT;
-	INIT_LIST_HEAD(&fc_stats->addlist);
-	spin_lock_init(&fc_stats->addlist_lock);
-
-	fc_stats->wq = create_singlethread_workqueue("mlx5_fc");
-	if (!fc_stats->wq)
-		return -ENOMEM;
-
-	INIT_DELAYED_WORK(&fc_stats->work, mlx5_fc_stats_work);
-
-	return 0;
-}
-
-void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
-{
-	struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
-	struct mlx5_fc *counter;
-	struct mlx5_fc *tmp;
-	struct rb_node *node;
-
-	cancel_delayed_work_sync(&dev->priv.fc_stats.work);
-	destroy_workqueue(dev->priv.fc_stats.wq);
-	dev->priv.fc_stats.wq = NULL;
-
-	list_for_each_entry_safe(counter, tmp, &fc_stats->addlist, list) {
-		list_del(&counter->list);
-
-		mlx5_cmd_fc_free(dev, counter->id);
-
-		kfree(counter);
-	}
-
-	node = rb_first(&fc_stats->counters);
-	while (node) {
-		counter = rb_entry(node, struct mlx5_fc, node);
-
-		node = rb_next(node);
-
-		rb_erase(&counter->node, &fc_stats->counters);
-
-		mlx5_cmd_fc_free(dev, counter->id);
-
-		kfree(counter);
-	}
-}
-
-void mlx5_fc_query_cached(struct mlx5_fc *counter,
-			  u64 *bytes, u64 *packets, u64 *lastuse)
-{
-	struct mlx5_fc_cache c;
-
-	c = counter->cache;
-
-	*bytes = c.bytes - counter->lastbytes;
-	*packets = c.packets - counter->lastpackets;
-	*lastuse = c.lastuse;
-
-	counter->lastbytes = c.bytes;
-	counter->lastpackets = c.packets;
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
deleted file mode 100644
index 5718aad..0000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ /dev/null
@@ -1,176 +0,0 @@
-/*
- * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/mlx5/driver.h>
-#include <linux/mlx5/cmd.h>
-#include <linux/module.h>
-#include "mlx5_core.h"
-
-static int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev, u32 *out,
-				  int outlen)
-{
-	u32 in[MLX5_ST_SZ_DW(query_adapter_in)] = {0};
-
-	MLX5_SET(query_adapter_in, in, opcode, MLX5_CMD_OP_QUERY_ADAPTER);
-	return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
-}
-
-int mlx5_query_board_id(struct mlx5_core_dev *dev)
-{
-	u32 *out;
-	int outlen = MLX5_ST_SZ_BYTES(query_adapter_out);
-	int err;
-
-	out = kzalloc(outlen, GFP_KERNEL);
-	if (!out)
-		return -ENOMEM;
-
-	err = mlx5_cmd_query_adapter(dev, out, outlen);
-	if (err)
-		goto out;
-
-	memcpy(dev->board_id,
-	       MLX5_ADDR_OF(query_adapter_out, out,
-			    query_adapter_struct.vsd_contd_psid),
-	       MLX5_FLD_SZ_BYTES(query_adapter_out,
-				 query_adapter_struct.vsd_contd_psid));
-
-out:
-	kfree(out);
-	return err;
-}
-
-int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id)
-{
-	u32 *out;
-	int outlen = MLX5_ST_SZ_BYTES(query_adapter_out);
-	int err;
-
-	out = kzalloc(outlen, GFP_KERNEL);
-	if (!out)
-		return -ENOMEM;
-
-	err = mlx5_cmd_query_adapter(mdev, out, outlen);
-	if (err)
-		goto out;
-
-	*vendor_id = MLX5_GET(query_adapter_out, out,
-			      query_adapter_struct.ieee_vendor_id);
-out:
-	kfree(out);
-	return err;
-}
-EXPORT_SYMBOL(mlx5_core_query_vendor_id);
-
-int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
-{
-	int err;
-
-	err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL);
-	if (err)
-		return err;
-
-	if (MLX5_CAP_GEN(dev, eth_net_offloads)) {
-		err = mlx5_core_get_caps(dev, MLX5_CAP_ETHERNET_OFFLOADS);
-		if (err)
-			return err;
-	}
-
-	if (MLX5_CAP_GEN(dev, pg)) {
-		err = mlx5_core_get_caps(dev, MLX5_CAP_ODP);
-		if (err)
-			return err;
-	}
-
-	if (MLX5_CAP_GEN(dev, atomic)) {
-		err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC);
-		if (err)
-			return err;
-	}
-
-	if (MLX5_CAP_GEN(dev, roce)) {
-		err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE);
-		if (err)
-			return err;
-	}
-
-	if (MLX5_CAP_GEN(dev, nic_flow_table)) {
-		err = mlx5_core_get_caps(dev, MLX5_CAP_FLOW_TABLE);
-		if (err)
-			return err;
-	}
-
-	if (MLX5_CAP_GEN(dev, vport_group_manager) &&
-	    MLX5_CAP_GEN(dev, eswitch_flow_table)) {
-		err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH_FLOW_TABLE);
-		if (err)
-			return err;
-	}
-
-	if (MLX5_CAP_GEN(dev, eswitch_flow_table)) {
-		err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH);
-		if (err)
-			return err;
-	}
-
-	if (MLX5_CAP_GEN(dev, vector_calc)) {
-		err = mlx5_core_get_caps(dev, MLX5_CAP_VECTOR_CALC);
-		if (err)
-			return err;
-	}
-
-	if (MLX5_CAP_GEN(dev, qos)) {
-		err = mlx5_core_get_caps(dev, MLX5_CAP_QOS);
-		if (err)
-			return err;
-	}
-
-	return 0;
-}
-
-int mlx5_cmd_init_hca(struct mlx5_core_dev *dev)
-{
-	u32 out[MLX5_ST_SZ_DW(init_hca_out)] = {0};
-	u32 in[MLX5_ST_SZ_DW(init_hca_in)]   = {0};
-
-	MLX5_SET(init_hca_in, in, opcode, MLX5_CMD_OP_INIT_HCA);
-	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-}
-
-int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev)
-{
-	u32 out[MLX5_ST_SZ_DW(teardown_hca_out)] = {0};
-	u32 in[MLX5_ST_SZ_DW(teardown_hca_in)]   = {0};
-
-	MLX5_SET(teardown_hca_in, in, opcode, MLX5_CMD_OP_TEARDOWN_HCA);
-	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
deleted file mode 100644
index 5bcf934..0000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ /dev/null
@@ -1,386 +0,0 @@
-/*
- * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/random.h>
-#include <linux/vmalloc.h>
-#include <linux/hardirq.h>
-#include <linux/mlx5/driver.h>
-#include <linux/mlx5/cmd.h>
-#include "mlx5_core.h"
-
-enum {
-	MLX5_HEALTH_POLL_INTERVAL	= 2 * HZ,
-	MAX_MISSES			= 3,
-};
-
-enum {
-	MLX5_HEALTH_SYNDR_FW_ERR		= 0x1,
-	MLX5_HEALTH_SYNDR_IRISC_ERR		= 0x7,
-	MLX5_HEALTH_SYNDR_HW_UNRECOVERABLE_ERR	= 0x8,
-	MLX5_HEALTH_SYNDR_CRC_ERR		= 0x9,
-	MLX5_HEALTH_SYNDR_FETCH_PCI_ERR		= 0xa,
-	MLX5_HEALTH_SYNDR_HW_FTL_ERR		= 0xb,
-	MLX5_HEALTH_SYNDR_ASYNC_EQ_OVERRUN_ERR	= 0xc,
-	MLX5_HEALTH_SYNDR_EQ_ERR		= 0xd,
-	MLX5_HEALTH_SYNDR_EQ_INV		= 0xe,
-	MLX5_HEALTH_SYNDR_FFSER_ERR		= 0xf,
-	MLX5_HEALTH_SYNDR_HIGH_TEMP		= 0x10
-};
-
-enum {
-	MLX5_NIC_IFC_FULL		= 0,
-	MLX5_NIC_IFC_DISABLED		= 1,
-	MLX5_NIC_IFC_NO_DRAM_NIC	= 2,
-	MLX5_NIC_IFC_INVALID		= 3
-};
-
-enum {
-	MLX5_DROP_NEW_HEALTH_WORK,
-};
-
-static u8 get_nic_state(struct mlx5_core_dev *dev)
-{
-	return (ioread32be(&dev->iseg->cmdq_addr_l_sz) >> 8) & 3;
-}
-
-static void trigger_cmd_completions(struct mlx5_core_dev *dev)
-{
-	unsigned long flags;
-	u64 vector;
-
-	/* wait for pending handlers to complete */
-	synchronize_irq(dev->priv.msix_arr[MLX5_EQ_VEC_CMD].vector);
-	spin_lock_irqsave(&dev->cmd.alloc_lock, flags);
-	vector = ~dev->cmd.bitmask & ((1ul << (1 << dev->cmd.log_sz)) - 1);
-	if (!vector)
-		goto no_trig;
-
-	vector |= MLX5_TRIGGERED_CMD_COMP;
-	spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
-
-	mlx5_core_dbg(dev, "vector 0x%llx\n", vector);
-	mlx5_cmd_comp_handler(dev, vector);
-	return;
-
-no_trig:
-	spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
-}
-
-static int in_fatal(struct mlx5_core_dev *dev)
-{
-	struct mlx5_core_health *health = &dev->priv.health;
-	struct health_buffer __iomem *h = health->health;
-
-	if (get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
-		return 1;
-
-	if (ioread32be(&h->fw_ver) == 0xffffffff)
-		return 1;
-
-	return 0;
-}
-
-void mlx5_enter_error_state(struct mlx5_core_dev *dev)
-{
-	mutex_lock(&dev->intf_state_mutex);
-	if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
-		goto unlock;
-
-	mlx5_core_err(dev, "start\n");
-	if (pci_channel_offline(dev->pdev) || in_fatal(dev)) {
-		dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
-		trigger_cmd_completions(dev);
-	}
-
-	mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 0);
-	mlx5_core_err(dev, "end\n");
-
-unlock:
-	mutex_unlock(&dev->intf_state_mutex);
-}
-
-static void mlx5_handle_bad_state(struct mlx5_core_dev *dev)
-{
-	u8 nic_interface = get_nic_state(dev);
-
-	switch (nic_interface) {
-	case MLX5_NIC_IFC_FULL:
-		mlx5_core_warn(dev, "Expected to see disabled NIC but it is full driver\n");
-		break;
-
-	case MLX5_NIC_IFC_DISABLED:
-		mlx5_core_warn(dev, "starting teardown\n");
-		break;
-
-	case MLX5_NIC_IFC_NO_DRAM_NIC:
-		mlx5_core_warn(dev, "Expected to see disabled NIC but it is no dram nic\n");
-		break;
-	default:
-		mlx5_core_warn(dev, "Expected to see disabled NIC but it is has invalid value %d\n",
-			       nic_interface);
-	}
-
-	mlx5_disable_device(dev);
-}
-
-static void health_recover(struct work_struct *work)
-{
-	struct mlx5_core_health *health;
-	struct delayed_work *dwork;
-	struct mlx5_core_dev *dev;
-	struct mlx5_priv *priv;
-	u8 nic_state;
-
-	dwork = container_of(work, struct delayed_work, work);
-	health = container_of(dwork, struct mlx5_core_health, recover_work);
-	priv = container_of(health, struct mlx5_priv, health);
-	dev = container_of(priv, struct mlx5_core_dev, priv);
-
-	nic_state = get_nic_state(dev);
-	if (nic_state == MLX5_NIC_IFC_INVALID) {
-		dev_err(&dev->pdev->dev, "health recovery flow aborted since the nic state is invalid\n");
-		return;
-	}
-
-	dev_err(&dev->pdev->dev, "starting health recovery flow\n");
-	mlx5_recover_device(dev);
-}
-
-/* How much time to wait until health resetting the driver (in msecs) */
-#define MLX5_RECOVERY_DELAY_MSECS 60000
-static void health_care(struct work_struct *work)
-{
-	unsigned long recover_delay = msecs_to_jiffies(MLX5_RECOVERY_DELAY_MSECS);
-	struct mlx5_core_health *health;
-	struct mlx5_core_dev *dev;
-	struct mlx5_priv *priv;
-
-	health = container_of(work, struct mlx5_core_health, work);
-	priv = container_of(health, struct mlx5_priv, health);
-	dev = container_of(priv, struct mlx5_core_dev, priv);
-	mlx5_core_warn(dev, "handling bad device here\n");
-	mlx5_handle_bad_state(dev);
-
-	spin_lock(&health->wq_lock);
-	if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags))
-		schedule_delayed_work(&health->recover_work, recover_delay);
-	else
-		dev_err(&dev->pdev->dev,
-			"new health works are not permitted at this stage\n");
-	spin_unlock(&health->wq_lock);
-}
-
-static const char *hsynd_str(u8 synd)
-{
-	switch (synd) {
-	case MLX5_HEALTH_SYNDR_FW_ERR:
-		return "firmware internal error";
-	case MLX5_HEALTH_SYNDR_IRISC_ERR:
-		return "irisc not responding";
-	case MLX5_HEALTH_SYNDR_HW_UNRECOVERABLE_ERR:
-		return "unrecoverable hardware error";
-	case MLX5_HEALTH_SYNDR_CRC_ERR:
-		return "firmware CRC error";
-	case MLX5_HEALTH_SYNDR_FETCH_PCI_ERR:
-		return "ICM fetch PCI error";
-	case MLX5_HEALTH_SYNDR_HW_FTL_ERR:
-		return "HW fatal error\n";
-	case MLX5_HEALTH_SYNDR_ASYNC_EQ_OVERRUN_ERR:
-		return "async EQ buffer overrun";
-	case MLX5_HEALTH_SYNDR_EQ_ERR:
-		return "EQ error";
-	case MLX5_HEALTH_SYNDR_EQ_INV:
-		return "Invalid EQ referenced";
-	case MLX5_HEALTH_SYNDR_FFSER_ERR:
-		return "FFSER error";
-	case MLX5_HEALTH_SYNDR_HIGH_TEMP:
-		return "High temperature";
-	default:
-		return "unrecognized error";
-	}
-}
-
-static u16 get_maj(u32 fw)
-{
-	return fw >> 28;
-}
-
-static u16 get_min(u32 fw)
-{
-	return fw >> 16 & 0xfff;
-}
-
-static u16 get_sub(u32 fw)
-{
-	return fw & 0xffff;
-}
-
-static void print_health_info(struct mlx5_core_dev *dev)
-{
-	struct mlx5_core_health *health = &dev->priv.health;
-	struct health_buffer __iomem *h = health->health;
-	char fw_str[18];
-	u32 fw;
-	int i;
-
-	/* If the syndrom is 0, the device is OK and no need to print buffer */
-	if (!ioread8(&h->synd))
-		return;
-
-	for (i = 0; i < ARRAY_SIZE(h->assert_var); i++)
-		dev_err(&dev->pdev->dev, "assert_var[%d] 0x%08x\n", i, ioread32be(h->assert_var + i));
-
-	dev_err(&dev->pdev->dev, "assert_exit_ptr 0x%08x\n", ioread32be(&h->assert_exit_ptr));
-	dev_err(&dev->pdev->dev, "assert_callra 0x%08x\n", ioread32be(&h->assert_callra));
-	fw = ioread32be(&h->fw_ver);
-	sprintf(fw_str, "%d.%d.%d", get_maj(fw), get_min(fw), get_sub(fw));
-	dev_err(&dev->pdev->dev, "fw_ver %s\n", fw_str);
-	dev_err(&dev->pdev->dev, "hw_id 0x%08x\n", ioread32be(&h->hw_id));
-	dev_err(&dev->pdev->dev, "irisc_index %d\n", ioread8(&h->irisc_index));
-	dev_err(&dev->pdev->dev, "synd 0x%x: %s\n", ioread8(&h->synd), hsynd_str(ioread8(&h->synd)));
-	dev_err(&dev->pdev->dev, "ext_synd 0x%04x\n", ioread16be(&h->ext_synd));
-}
-
-static unsigned long get_next_poll_jiffies(void)
-{
-	unsigned long next;
-
-	get_random_bytes(&next, sizeof(next));
-	next %= HZ;
-	next += jiffies + MLX5_HEALTH_POLL_INTERVAL;
-
-	return next;
-}
-
-static void poll_health(unsigned long data)
-{
-	struct mlx5_core_dev *dev = (struct mlx5_core_dev *)data;
-	struct mlx5_core_health *health = &dev->priv.health;
-	u32 count;
-
-	if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
-		mod_timer(&health->timer, get_next_poll_jiffies());
-		return;
-	}
-
-	count = ioread32be(health->health_counter);
-	if (count == health->prev)
-		++health->miss_counter;
-	else
-		health->miss_counter = 0;
-
-	health->prev = count;
-	if (health->miss_counter == MAX_MISSES) {
-		dev_err(&dev->pdev->dev, "device's health compromised - reached miss count\n");
-		print_health_info(dev);
-	} else {
-		mod_timer(&health->timer, get_next_poll_jiffies());
-	}
-
-	if (in_fatal(dev) && !health->sick) {
-		health->sick = true;
-		print_health_info(dev);
-		spin_lock(&health->wq_lock);
-		if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags))
-			queue_work(health->wq, &health->work);
-		else
-			dev_err(&dev->pdev->dev,
-				"new health works are not permitted at this stage\n");
-		spin_unlock(&health->wq_lock);
-	}
-}
-
-void mlx5_start_health_poll(struct mlx5_core_dev *dev)
-{
-	struct mlx5_core_health *health = &dev->priv.health;
-
-	init_timer(&health->timer);
-	health->sick = 0;
-	clear_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
-	health->health = &dev->iseg->health;
-	health->health_counter = &dev->iseg->health_counter;
-
-	health->timer.data = (unsigned long)dev;
-	health->timer.function = poll_health;
-	health->timer.expires = round_jiffies(jiffies + MLX5_HEALTH_POLL_INTERVAL);
-	add_timer(&health->timer);
-}
-
-void mlx5_stop_health_poll(struct mlx5_core_dev *dev)
-{
-	struct mlx5_core_health *health = &dev->priv.health;
-
-	del_timer_sync(&health->timer);
-}
-
-void mlx5_drain_health_wq(struct mlx5_core_dev *dev)
-{
-	struct mlx5_core_health *health = &dev->priv.health;
-
-	spin_lock(&health->wq_lock);
-	set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
-	spin_unlock(&health->wq_lock);
-	cancel_delayed_work_sync(&health->recover_work);
-	cancel_work_sync(&health->work);
-}
-
-void mlx5_health_cleanup(struct mlx5_core_dev *dev)
-{
-	struct mlx5_core_health *health = &dev->priv.health;
-
-	destroy_workqueue(health->wq);
-}
-
-int mlx5_health_init(struct mlx5_core_dev *dev)
-{
-	struct mlx5_core_health *health;
-	char *name;
-
-	health = &dev->priv.health;
-	name = kmalloc(64, GFP_KERNEL);
-	if (!name)
-		return -ENOMEM;
-
-	strcpy(name, "mlx5_health");
-	strcat(name, dev_name(&dev->pdev->dev));
-	health->wq = create_singlethread_workqueue(name);
-	kfree(name);
-	if (!health->wq)
-		return -ENOMEM;
-	spin_lock_init(&health->wq_lock);
-	INIT_WORK(&health->work, health_care);
-	INIT_DELAYED_WORK(&health->recover_work, health_recover);
-
-	return 0;
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
deleted file mode 100644
index 5595724..0000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+++ /dev/null
@@ -1,588 +0,0 @@
-/*
- * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/netdevice.h>
-#include <linux/mlx5/driver.h>
-#include <linux/mlx5/vport.h>
-#include "mlx5_core.h"
-
-enum {
-	MLX5_LAG_FLAG_BONDED = 1 << 0,
-};
-
-struct lag_func {
-	struct mlx5_core_dev *dev;
-	struct net_device    *netdev;
-};
-
-/* Used for collection of netdev event info. */
-struct lag_tracker {
-	enum   netdev_lag_tx_type           tx_type;
-	struct netdev_lag_lower_state_info  netdev_state[MLX5_MAX_PORTS];
-	bool is_bonded;
-};
-
-/* LAG data of a ConnectX card.
- * It serves both its phys functions.
- */
-struct mlx5_lag {
-	u8                        flags;
-	u8                        v2p_map[MLX5_MAX_PORTS];
-	struct lag_func           pf[MLX5_MAX_PORTS];
-	struct lag_tracker        tracker;
-	struct delayed_work       bond_work;
-	struct notifier_block     nb;
-};
-
-/* General purpose, use for short periods of time.
- * Beware of lock dependencies (preferably, no locks should be acquired
- * under it).
- */
-static DEFINE_MUTEX(lag_mutex);
-
-static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 remap_port1,
-			       u8 remap_port2)
-{
-	u32   in[MLX5_ST_SZ_DW(create_lag_in)]   = {0};
-	u32   out[MLX5_ST_SZ_DW(create_lag_out)] = {0};
-	void *lag_ctx = MLX5_ADDR_OF(create_lag_in, in, ctx);
-
-	MLX5_SET(create_lag_in, in, opcode, MLX5_CMD_OP_CREATE_LAG);
-
-	MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, remap_port1);
-	MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, remap_port2);
-
-	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-}
-
-static int mlx5_cmd_modify_lag(struct mlx5_core_dev *dev, u8 remap_port1,
-			       u8 remap_port2)
-{
-	u32   in[MLX5_ST_SZ_DW(modify_lag_in)]   = {0};
-	u32   out[MLX5_ST_SZ_DW(modify_lag_out)] = {0};
-	void *lag_ctx = MLX5_ADDR_OF(modify_lag_in, in, ctx);
-
-	MLX5_SET(modify_lag_in, in, opcode, MLX5_CMD_OP_MODIFY_LAG);
-	MLX5_SET(modify_lag_in, in, field_select, 0x1);
-
-	MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, remap_port1);
-	MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, remap_port2);
-
-	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-}
-
-static int mlx5_cmd_destroy_lag(struct mlx5_core_dev *dev)
-{
-	u32  in[MLX5_ST_SZ_DW(destroy_lag_in)]  = {0};
-	u32 out[MLX5_ST_SZ_DW(destroy_lag_out)] = {0};
-
-	MLX5_SET(destroy_lag_in, in, opcode, MLX5_CMD_OP_DESTROY_LAG);
-
-	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-}
-
-int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev)
-{
-	u32  in[MLX5_ST_SZ_DW(create_vport_lag_in)]  = {0};
-	u32 out[MLX5_ST_SZ_DW(create_vport_lag_out)] = {0};
-
-	MLX5_SET(create_vport_lag_in, in, opcode, MLX5_CMD_OP_CREATE_VPORT_LAG);
-
-	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-}
-EXPORT_SYMBOL(mlx5_cmd_create_vport_lag);
-
-int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev)
-{
-	u32  in[MLX5_ST_SZ_DW(destroy_vport_lag_in)]  = {0};
-	u32 out[MLX5_ST_SZ_DW(destroy_vport_lag_out)] = {0};
-
-	MLX5_SET(destroy_vport_lag_in, in, opcode, MLX5_CMD_OP_DESTROY_VPORT_LAG);
-
-	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-}
-EXPORT_SYMBOL(mlx5_cmd_destroy_vport_lag);
-
-static struct mlx5_lag *mlx5_lag_dev_get(struct mlx5_core_dev *dev)
-{
-	return dev->priv.lag;
-}
-
-static int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev,
-				       struct net_device *ndev)
-{
-	int i;
-
-	for (i = 0; i < MLX5_MAX_PORTS; i++)
-		if (ldev->pf[i].netdev == ndev)
-			return i;
-
-	return -1;
-}
-
-static bool mlx5_lag_is_bonded(struct mlx5_lag *ldev)
-{
-	return !!(ldev->flags & MLX5_LAG_FLAG_BONDED);
-}
-
-static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
-					   u8 *port1, u8 *port2)
-{
-	if (tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) {
-		if (tracker->netdev_state[0].tx_enabled) {
-			*port1 = 1;
-			*port2 = 1;
-		} else {
-			*port1 = 2;
-			*port2 = 2;
-		}
-	} else {
-		*port1 = 1;
-		*port2 = 2;
-		if (!tracker->netdev_state[0].link_up)
-			*port1 = 2;
-		else if (!tracker->netdev_state[1].link_up)
-			*port2 = 1;
-	}
-}
-
-static void mlx5_activate_lag(struct mlx5_lag *ldev,
-			      struct lag_tracker *tracker)
-{
-	struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
-	int err;
-
-	ldev->flags |= MLX5_LAG_FLAG_BONDED;
-
-	mlx5_infer_tx_affinity_mapping(tracker, &ldev->v2p_map[0],
-				       &ldev->v2p_map[1]);
-
-	err = mlx5_cmd_create_lag(dev0, ldev->v2p_map[0], ldev->v2p_map[1]);
-	if (err)
-		mlx5_core_err(dev0,
-			      "Failed to create LAG (%d)\n",
-			      err);
-}
-
-static void mlx5_deactivate_lag(struct mlx5_lag *ldev)
-{
-	struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
-	int err;
-
-	ldev->flags &= ~MLX5_LAG_FLAG_BONDED;
-
-	err = mlx5_cmd_destroy_lag(dev0);
-	if (err)
-		mlx5_core_err(dev0,
-			      "Failed to destroy LAG (%d)\n",
-			      err);
-}
-
-static void mlx5_do_bond(struct mlx5_lag *ldev)
-{
-	struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
-	struct mlx5_core_dev *dev1 = ldev->pf[1].dev;
-	struct lag_tracker tracker;
-	u8 v2p_port1, v2p_port2;
-	int i, err;
-
-	if (!dev0 || !dev1)
-		return;
-
-	mutex_lock(&lag_mutex);
-	tracker = ldev->tracker;
-	mutex_unlock(&lag_mutex);
-
-	if (tracker.is_bonded && !mlx5_lag_is_bonded(ldev)) {
-		if (mlx5_sriov_is_enabled(dev0) ||
-		    mlx5_sriov_is_enabled(dev1)) {
-			mlx5_core_warn(dev0, "LAG is not supported with SRIOV");
-			return;
-		}
-
-		for (i = 0; i < MLX5_MAX_PORTS; i++)
-			mlx5_remove_dev_by_protocol(ldev->pf[i].dev,
-						    MLX5_INTERFACE_PROTOCOL_IB);
-
-		mlx5_activate_lag(ldev, &tracker);
-
-		mlx5_add_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB);
-		mlx5_nic_vport_enable_roce(dev1);
-	} else if (tracker.is_bonded && mlx5_lag_is_bonded(ldev)) {
-		mlx5_infer_tx_affinity_mapping(&tracker, &v2p_port1,
-					       &v2p_port2);
-
-		if ((v2p_port1 != ldev->v2p_map[0]) ||
-		    (v2p_port2 != ldev->v2p_map[1])) {
-			ldev->v2p_map[0] = v2p_port1;
-			ldev->v2p_map[1] = v2p_port2;
-
-			err = mlx5_cmd_modify_lag(dev0, v2p_port1, v2p_port2);
-			if (err)
-				mlx5_core_err(dev0,
-					      "Failed to modify LAG (%d)\n",
-					      err);
-		}
-	} else if (!tracker.is_bonded && mlx5_lag_is_bonded(ldev)) {
-		mlx5_remove_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB);
-		mlx5_nic_vport_disable_roce(dev1);
-
-		mlx5_deactivate_lag(ldev);
-
-		for (i = 0; i < MLX5_MAX_PORTS; i++)
-			if (ldev->pf[i].dev)
-				mlx5_add_dev_by_protocol(ldev->pf[i].dev,
-							 MLX5_INTERFACE_PROTOCOL_IB);
-	}
-}
-
-static void mlx5_queue_bond_work(struct mlx5_lag *ldev, unsigned long delay)
-{
-	schedule_delayed_work(&ldev->bond_work, delay);
-}
-
-static void mlx5_do_bond_work(struct work_struct *work)
-{
-	struct delayed_work *delayed_work = to_delayed_work(work);
-	struct mlx5_lag *ldev = container_of(delayed_work, struct mlx5_lag,
-					     bond_work);
-	int status;
-
-	status = mlx5_dev_list_trylock();
-	if (!status) {
-		/* 1 sec delay. */
-		mlx5_queue_bond_work(ldev, HZ);
-		return;
-	}
-
-	mlx5_do_bond(ldev);
-	mlx5_dev_list_unlock();
-}
-
-static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
-					 struct lag_tracker *tracker,
-					 struct net_device *ndev,
-					 struct netdev_notifier_changeupper_info *info)
-{
-	struct net_device *upper = info->upper_dev, *ndev_tmp;
-	struct netdev_lag_upper_info *lag_upper_info;
-	bool is_bonded;
-	int bond_status = 0;
-	int num_slaves = 0;
-	int idx;
-
-	if (!netif_is_lag_master(upper))
-		return 0;
-
-	lag_upper_info = info->upper_info;
-
-	/* The event may still be of interest if the slave does not belong to
-	 * us, but is enslaved to a master which has one or more of our netdevs
-	 * as slaves (e.g., if a new slave is added to a master that bonds two
-	 * of our netdevs, we should unbond).
-	 */
-	rcu_read_lock();
-	for_each_netdev_in_bond_rcu(upper, ndev_tmp) {
-		idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev_tmp);
-		if (idx > -1)
-			bond_status |= (1 << idx);
-
-		num_slaves++;
-	}
-	rcu_read_unlock();
-
-	/* None of this lagdev's netdevs are slaves of this master. */
-	if (!(bond_status & 0x3))
-		return 0;
-
-	if (lag_upper_info)
-		tracker->tx_type = lag_upper_info->tx_type;
-
-	/* Determine bonding status:
-	 * A device is considered bonded if both its physical ports are slaves
-	 * of the same lag master, and only them.
-	 * Lag mode must be activebackup or hash.
-	 */
-	is_bonded = (num_slaves == MLX5_MAX_PORTS) &&
-		    (bond_status == 0x3) &&
-		    ((tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) ||
-		     (tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH));
-
-	if (tracker->is_bonded != is_bonded) {
-		tracker->is_bonded = is_bonded;
-		return 1;
-	}
-
-	return 0;
-}
-
-static int mlx5_handle_changelowerstate_event(struct mlx5_lag *ldev,
-					      struct lag_tracker *tracker,
-					      struct net_device *ndev,
-					      struct netdev_notifier_changelowerstate_info *info)
-{
-	struct netdev_lag_lower_state_info *lag_lower_info;
-	int idx;
-
-	if (!netif_is_lag_port(ndev))
-		return 0;
-
-	idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev);
-	if (idx == -1)
-		return 0;
-
-	/* This information is used to determine virtual to physical
-	 * port mapping.
-	 */
-	lag_lower_info = info->lower_state_info;
-	if (!lag_lower_info)
-		return 0;
-
-	tracker->netdev_state[idx] = *lag_lower_info;
-
-	return 1;
-}
-
-static int mlx5_lag_netdev_event(struct notifier_block *this,
-				 unsigned long event, void *ptr)
-{
-	struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
-	struct lag_tracker tracker;
-	struct mlx5_lag *ldev;
-	int changed = 0;
-
-	if (!net_eq(dev_net(ndev), &init_net))
-		return NOTIFY_DONE;
-
-	if ((event != NETDEV_CHANGEUPPER) && (event != NETDEV_CHANGELOWERSTATE))
-		return NOTIFY_DONE;
-
-	ldev    = container_of(this, struct mlx5_lag, nb);
-	tracker = ldev->tracker;
-
-	switch (event) {
-	case NETDEV_CHANGEUPPER:
-		changed = mlx5_handle_changeupper_event(ldev, &tracker, ndev,
-							ptr);
-		break;
-	case NETDEV_CHANGELOWERSTATE:
-		changed = mlx5_handle_changelowerstate_event(ldev, &tracker,
-							     ndev, ptr);
-		break;
-	}
-
-	mutex_lock(&lag_mutex);
-	ldev->tracker = tracker;
-	mutex_unlock(&lag_mutex);
-
-	if (changed)
-		mlx5_queue_bond_work(ldev, 0);
-
-	return NOTIFY_DONE;
-}
-
-static struct mlx5_lag *mlx5_lag_dev_alloc(void)
-{
-	struct mlx5_lag *ldev;
-
-	ldev = kzalloc(sizeof(*ldev), GFP_KERNEL);
-	if (!ldev)
-		return NULL;
-
-	INIT_DELAYED_WORK(&ldev->bond_work, mlx5_do_bond_work);
-
-	return ldev;
-}
-
-static void mlx5_lag_dev_free(struct mlx5_lag *ldev)
-{
-	kfree(ldev);
-}
-
-static void mlx5_lag_dev_add_pf(struct mlx5_lag *ldev,
-				struct mlx5_core_dev *dev,
-				struct net_device *netdev)
-{
-	unsigned int fn = PCI_FUNC(dev->pdev->devfn);
-
-	if (fn >= MLX5_MAX_PORTS)
-		return;
-
-	mutex_lock(&lag_mutex);
-	ldev->pf[fn].dev    = dev;
-	ldev->pf[fn].netdev = netdev;
-	ldev->tracker.netdev_state[fn].link_up = 0;
-	ldev->tracker.netdev_state[fn].tx_enabled = 0;
-
-	dev->priv.lag = ldev;
-	mutex_unlock(&lag_mutex);
-}
-
-static void mlx5_lag_dev_remove_pf(struct mlx5_lag *ldev,
-				   struct mlx5_core_dev *dev)
-{
-	int i;
-
-	for (i = 0; i < MLX5_MAX_PORTS; i++)
-		if (ldev->pf[i].dev == dev)
-			break;
-
-	if (i == MLX5_MAX_PORTS)
-		return;
-
-	mutex_lock(&lag_mutex);
-	memset(&ldev->pf[i], 0, sizeof(*ldev->pf));
-
-	dev->priv.lag = NULL;
-	mutex_unlock(&lag_mutex);
-}
-
-
-/* Must be called with intf_mutex held */
-void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
-{
-	struct mlx5_lag *ldev = NULL;
-	struct mlx5_core_dev *tmp_dev;
-
-	if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
-	    !MLX5_CAP_GEN(dev, lag_master) ||
-	    (MLX5_CAP_GEN(dev, num_lag_ports) != MLX5_MAX_PORTS))
-		return;
-
-	tmp_dev = mlx5_get_next_phys_dev(dev);
-	if (tmp_dev)
-		ldev = tmp_dev->priv.lag;
-
-	if (!ldev) {
-		ldev = mlx5_lag_dev_alloc();
-		if (!ldev) {
-			mlx5_core_err(dev, "Failed to alloc lag dev\n");
-			return;
-		}
-	}
-
-	mlx5_lag_dev_add_pf(ldev, dev, netdev);
-
-	if (!ldev->nb.notifier_call) {
-		ldev->nb.notifier_call = mlx5_lag_netdev_event;
-		if (register_netdevice_notifier(&ldev->nb)) {
-			ldev->nb.notifier_call = NULL;
-			mlx5_core_err(dev, "Failed to register LAG netdev notifier\n");
-		}
-	}
-}
-
-/* Must be called with intf_mutex held */
-void mlx5_lag_remove(struct mlx5_core_dev *dev)
-{
-	struct mlx5_lag *ldev;
-	int i;
-
-	ldev = mlx5_lag_dev_get(dev);
-	if (!ldev)
-		return;
-
-	if (mlx5_lag_is_bonded(ldev))
-		mlx5_deactivate_lag(ldev);
-
-	mlx5_lag_dev_remove_pf(ldev, dev);
-
-	for (i = 0; i < MLX5_MAX_PORTS; i++)
-		if (ldev->pf[i].dev)
-			break;
-
-	if (i == MLX5_MAX_PORTS) {
-		if (ldev->nb.notifier_call)
-			unregister_netdevice_notifier(&ldev->nb);
-		cancel_delayed_work_sync(&ldev->bond_work);
-		mlx5_lag_dev_free(ldev);
-	}
-}
-
-bool mlx5_lag_is_active(struct mlx5_core_dev *dev)
-{
-	struct mlx5_lag *ldev;
-	bool res;
-
-	mutex_lock(&lag_mutex);
-	ldev = mlx5_lag_dev_get(dev);
-	res  = ldev && mlx5_lag_is_bonded(ldev);
-	mutex_unlock(&lag_mutex);
-
-	return res;
-}
-EXPORT_SYMBOL(mlx5_lag_is_active);
-
-struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
-{
-	struct net_device *ndev = NULL;
-	struct mlx5_lag *ldev;
-
-	mutex_lock(&lag_mutex);
-	ldev = mlx5_lag_dev_get(dev);
-
-	if (!(ldev && mlx5_lag_is_bonded(ldev)))
-		goto unlock;
-
-	if (ldev->tracker.tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) {
-		ndev = ldev->tracker.netdev_state[0].tx_enabled ?
-		       ldev->pf[0].netdev : ldev->pf[1].netdev;
-	} else {
-		ndev = ldev->pf[0].netdev;
-	}
-	if (ndev)
-		dev_hold(ndev);
-
-unlock:
-	mutex_unlock(&lag_mutex);
-
-	return ndev;
-}
-EXPORT_SYMBOL(mlx5_lag_get_roce_netdev);
-
-bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv)
-{
-	struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev,
-						 priv);
-	struct mlx5_lag *ldev;
-
-	if (intf->protocol != MLX5_INTERFACE_PROTOCOL_IB)
-		return true;
-
-	ldev = mlx5_lag_dev_get(dev);
-	if (!ldev || !mlx5_lag_is_bonded(ldev) || ldev->pf[0].dev == dev)
-		return true;
-
-	/* If bonded, we do not add an IB device for PF1. */
-	return false;
-}
-
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mad.c b/drivers/net/ethernet/mellanox/mlx5/core/mad.c
deleted file mode 100644
index 3a3b000..0000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/mad.c
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/mlx5/driver.h>
-#include <linux/mlx5/cmd.h>
-#include "mlx5_core.h"
-
-int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
-		      u16 opmod, u8 port)
-{
-	int outlen = MLX5_ST_SZ_BYTES(mad_ifc_out);
-	int inlen = MLX5_ST_SZ_BYTES(mad_ifc_in);
-	int err = -ENOMEM;
-	void *data;
-	void *resp;
-	u32 *out;
-	u32 *in;
-
-	in = kzalloc(inlen, GFP_KERNEL);
-	out = kzalloc(outlen, GFP_KERNEL);
-	if (!in || !out)
-		goto out;
-
-	MLX5_SET(mad_ifc_in, in, opcode, MLX5_CMD_OP_MAD_IFC);
-	MLX5_SET(mad_ifc_in, in, op_mod, opmod);
-	MLX5_SET(mad_ifc_in, in, port, port);
-
-	data = MLX5_ADDR_OF(mad_ifc_in, in, mad);
-	memcpy(data, inb, MLX5_FLD_SZ_BYTES(mad_ifc_in, mad));
-
-	err = mlx5_cmd_exec(dev, in, inlen, out, outlen);
-	if (err)
-		goto out;
-
-	resp = MLX5_ADDR_OF(mad_ifc_out, out, response_mad_packet);
-	memcpy(outb, resp,
-	       MLX5_FLD_SZ_BYTES(mad_ifc_out, response_mad_packet));
-
-out:
-	kfree(out);
-	kfree(in);
-	return err;
-}
-EXPORT_SYMBOL_GPL(mlx5_core_mad_ifc);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
deleted file mode 100644
index a16ee16..0000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ /dev/null
@@ -1,1575 +0,0 @@
-/*
- * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/highmem.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/errno.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/slab.h>
-#include <linux/io-mapping.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/mlx5/driver.h>
-#include <linux/mlx5/cq.h>
-#include <linux/mlx5/qp.h>
-#include <linux/mlx5/srq.h>
-#include <linux/debugfs.h>
-#include <linux/kmod.h>
-#include <linux/mlx5/mlx5_ifc.h>
-#ifdef CONFIG_RFS_ACCEL
-#include <linux/cpu_rmap.h>
-#endif
-#include <net/devlink.h>
-#include "mlx5_core.h"
-#include "fs_core.h"
-#ifdef CONFIG_MLX5_CORE_EN
-#include "eswitch.h"
-#endif
-
-MODULE_AUTHOR("Eli Cohen <eli@...lanox.com>");
-MODULE_DESCRIPTION("Mellanox Connect-IB, ConnectX-4 core driver");
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_VERSION(DRIVER_VERSION);
-
-unsigned int mlx5_core_debug_mask;
-module_param_named(debug_mask, mlx5_core_debug_mask, uint, 0644);
-MODULE_PARM_DESC(debug_mask, "debug mask: 1 = dump cmd data, 2 = dump cmd exec time, 3 = both. Default=0");
-
-#define MLX5_DEFAULT_PROF	2
-static unsigned int prof_sel = MLX5_DEFAULT_PROF;
-module_param_named(prof_sel, prof_sel, uint, 0444);
-MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2");
-
-enum {
-	MLX5_ATOMIC_REQ_MODE_BE = 0x0,
-	MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS = 0x1,
-};
-
-static struct mlx5_profile profile[] = {
-	[0] = {
-		.mask           = 0,
-	},
-	[1] = {
-		.mask		= MLX5_PROF_MASK_QP_SIZE,
-		.log_max_qp	= 12,
-	},
-	[2] = {
-		.mask		= MLX5_PROF_MASK_QP_SIZE |
-				  MLX5_PROF_MASK_MR_CACHE,
-		.log_max_qp	= 17,
-		.mr_cache[0]	= {
-			.size	= 500,
-			.limit	= 250
-		},
-		.mr_cache[1]	= {
-			.size	= 500,
-			.limit	= 250
-		},
-		.mr_cache[2]	= {
-			.size	= 500,
-			.limit	= 250
-		},
-		.mr_cache[3]	= {
-			.size	= 500,
-			.limit	= 250
-		},
-		.mr_cache[4]	= {
-			.size	= 500,
-			.limit	= 250
-		},
-		.mr_cache[5]	= {
-			.size	= 500,
-			.limit	= 250
-		},
-		.mr_cache[6]	= {
-			.size	= 500,
-			.limit	= 250
-		},
-		.mr_cache[7]	= {
-			.size	= 500,
-			.limit	= 250
-		},
-		.mr_cache[8]	= {
-			.size	= 500,
-			.limit	= 250
-		},
-		.mr_cache[9]	= {
-			.size	= 500,
-			.limit	= 250
-		},
-		.mr_cache[10]	= {
-			.size	= 500,
-			.limit	= 250
-		},
-		.mr_cache[11]	= {
-			.size	= 500,
-			.limit	= 250
-		},
-		.mr_cache[12]	= {
-			.size	= 64,
-			.limit	= 32
-		},
-		.mr_cache[13]	= {
-			.size	= 32,
-			.limit	= 16
-		},
-		.mr_cache[14]	= {
-			.size	= 16,
-			.limit	= 8
-		},
-		.mr_cache[15]	= {
-			.size	= 8,
-			.limit	= 4
-		},
-		.mr_cache[16]	= {
-			.size	= 8,
-			.limit	= 4
-		},
-		.mr_cache[17]	= {
-			.size	= 8,
-			.limit	= 4
-		},
-		.mr_cache[18]	= {
-			.size	= 8,
-			.limit	= 4
-		},
-		.mr_cache[19]	= {
-			.size	= 4,
-			.limit	= 2
-		},
-		.mr_cache[20]	= {
-			.size	= 4,
-			.limit	= 2
-		},
-	},
-};
-
-#define FW_INIT_TIMEOUT_MILI	2000
-#define FW_INIT_WAIT_MS		2
-
-static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili)
-{
-	unsigned long end = jiffies + msecs_to_jiffies(max_wait_mili);
-	int err = 0;
-
-	while (fw_initializing(dev)) {
-		if (time_after(jiffies, end)) {
-			err = -EBUSY;
-			break;
-		}
-		msleep(FW_INIT_WAIT_MS);
-	}
-
-	return err;
-}
-
-static void mlx5_set_driver_version(struct mlx5_core_dev *dev)
-{
-	int driver_ver_sz = MLX5_FLD_SZ_BYTES(set_driver_version_in,
-					      driver_version);
-	u8 in[MLX5_ST_SZ_BYTES(set_driver_version_in)] = {0};
-	u8 out[MLX5_ST_SZ_BYTES(set_driver_version_out)] = {0};
-	int remaining_size = driver_ver_sz;
-	char *string;
-
-	if (!MLX5_CAP_GEN(dev, driver_version))
-		return;
-
-	string = MLX5_ADDR_OF(set_driver_version_in, in, driver_version);
-
-	strncpy(string, "Linux", remaining_size);
-
-	remaining_size = max_t(int, 0, driver_ver_sz - strlen(string));
-	strncat(string, ",", remaining_size);
-
-	remaining_size = max_t(int, 0, driver_ver_sz - strlen(string));
-	strncat(string, DRIVER_NAME, remaining_size);
-
-	remaining_size = max_t(int, 0, driver_ver_sz - strlen(string));
-	strncat(string, ",", remaining_size);
-
-	remaining_size = max_t(int, 0, driver_ver_sz - strlen(string));
-	strncat(string, DRIVER_VERSION, remaining_size);
-
-	/*Send the command*/
-	MLX5_SET(set_driver_version_in, in, opcode,
-		 MLX5_CMD_OP_SET_DRIVER_VERSION);
-
-	mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-}
-
-static int set_dma_caps(struct pci_dev *pdev)
-{
-	int err;
-
-	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
-	if (err) {
-		dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n");
-		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
-		if (err) {
-			dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n");
-			return err;
-		}
-	}
-
-	err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
-	if (err) {
-		dev_warn(&pdev->dev,
-			 "Warning: couldn't set 64-bit consistent PCI DMA mask\n");
-		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
-		if (err) {
-			dev_err(&pdev->dev,
-				"Can't set consistent PCI DMA mask, aborting\n");
-			return err;
-		}
-	}
-
-	dma_set_max_seg_size(&pdev->dev, 2u * 1024 * 1024 * 1024);
-	return err;
-}
-
-static int mlx5_pci_enable_device(struct mlx5_core_dev *dev)
-{
-	struct pci_dev *pdev = dev->pdev;
-	int err = 0;
-
-	mutex_lock(&dev->pci_status_mutex);
-	if (dev->pci_status == MLX5_PCI_STATUS_DISABLED) {
-		err = pci_enable_device(pdev);
-		if (!err)
-			dev->pci_status = MLX5_PCI_STATUS_ENABLED;
-	}
-	mutex_unlock(&dev->pci_status_mutex);
-
-	return err;
-}
-
-static void mlx5_pci_disable_device(struct mlx5_core_dev *dev)
-{
-	struct pci_dev *pdev = dev->pdev;
-
-	mutex_lock(&dev->pci_status_mutex);
-	if (dev->pci_status == MLX5_PCI_STATUS_ENABLED) {
-		pci_disable_device(pdev);
-		dev->pci_status = MLX5_PCI_STATUS_DISABLED;
-	}
-	mutex_unlock(&dev->pci_status_mutex);
-}
-
-static int request_bar(struct pci_dev *pdev)
-{
-	int err = 0;
-
-	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
-		dev_err(&pdev->dev, "Missing registers BAR, aborting\n");
-		return -ENODEV;
-	}
-
-	err = pci_request_regions(pdev, DRIVER_NAME);
-	if (err)
-		dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n");
-
-	return err;
-}
-
-static void release_bar(struct pci_dev *pdev)
-{
-	pci_release_regions(pdev);
-}
-
-static int mlx5_enable_msix(struct mlx5_core_dev *dev)
-{
-	struct mlx5_priv *priv = &dev->priv;
-	struct mlx5_eq_table *table = &priv->eq_table;
-	int num_eqs = 1 << MLX5_CAP_GEN(dev, log_max_eq);
-	int nvec;
-	int i;
-
-	nvec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() +
-	       MLX5_EQ_VEC_COMP_BASE;
-	nvec = min_t(int, nvec, num_eqs);
-	if (nvec <= MLX5_EQ_VEC_COMP_BASE)
-		return -ENOMEM;
-
-	priv->msix_arr = kcalloc(nvec, sizeof(*priv->msix_arr), GFP_KERNEL);
-
-	priv->irq_info = kcalloc(nvec, sizeof(*priv->irq_info), GFP_KERNEL);
-	if (!priv->msix_arr || !priv->irq_info)
-		goto err_free_msix;
-
-	for (i = 0; i < nvec; i++)
-		priv->msix_arr[i].entry = i;
-
-	nvec = pci_enable_msix_range(dev->pdev, priv->msix_arr,
-				     MLX5_EQ_VEC_COMP_BASE + 1, nvec);
-	if (nvec < 0)
-		return nvec;
-
-	table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE;
-
-	return 0;
-
-err_free_msix:
-	kfree(priv->irq_info);
-	kfree(priv->msix_arr);
-	return -ENOMEM;
-}
-
-static void mlx5_disable_msix(struct mlx5_core_dev *dev)
-{
-	struct mlx5_priv *priv = &dev->priv;
-
-	pci_disable_msix(dev->pdev);
-	kfree(priv->irq_info);
-	kfree(priv->msix_arr);
-}
-
-struct mlx5_reg_host_endianess {
-	u8	he;
-	u8      rsvd[15];
-};
-
-
-#define CAP_MASK(pos, size) ((u64)((1 << (size)) - 1) << (pos))
-
-enum {
-	MLX5_CAP_BITS_RW_MASK = CAP_MASK(MLX5_CAP_OFF_CMDIF_CSUM, 2) |
-				MLX5_DEV_CAP_FLAG_DCT,
-};
-
-static u16 to_fw_pkey_sz(struct mlx5_core_dev *dev, u32 size)
-{
-	switch (size) {
-	case 128:
-		return 0;
-	case 256:
-		return 1;
-	case 512:
-		return 2;
-	case 1024:
-		return 3;
-	case 2048:
-		return 4;
-	case 4096:
-		return 5;
-	default:
-		mlx5_core_warn(dev, "invalid pkey table size %d\n", size);
-		return 0;
-	}
-}
-
-static int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev,
-				   enum mlx5_cap_type cap_type,
-				   enum mlx5_cap_mode cap_mode)
-{
-	u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)];
-	int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
-	void *out, *hca_caps;
-	u16 opmod = (cap_type << 1) | (cap_mode & 0x01);
-	int err;
-
-	memset(in, 0, sizeof(in));
-	out = kzalloc(out_sz, GFP_KERNEL);
-	if (!out)
-		return -ENOMEM;
-
-	MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
-	MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
-	err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
-	if (err) {
-		mlx5_core_warn(dev,
-			       "QUERY_HCA_CAP : type(%x) opmode(%x) Failed(%d)\n",
-			       cap_type, cap_mode, err);
-		goto query_ex;
-	}
-
-	hca_caps =  MLX5_ADDR_OF(query_hca_cap_out, out, capability);
-
-	switch (cap_mode) {
-	case HCA_CAP_OPMOD_GET_MAX:
-		memcpy(dev->hca_caps_max[cap_type], hca_caps,
-		       MLX5_UN_SZ_BYTES(hca_cap_union));
-		break;
-	case HCA_CAP_OPMOD_GET_CUR:
-		memcpy(dev->hca_caps_cur[cap_type], hca_caps,
-		       MLX5_UN_SZ_BYTES(hca_cap_union));
-		break;
-	default:
-		mlx5_core_warn(dev,
-			       "Tried to query dev cap type(%x) with wrong opmode(%x)\n",
-			       cap_type, cap_mode);
-		err = -EINVAL;
-		break;
-	}
-query_ex:
-	kfree(out);
-	return err;
-}
-
-int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type)
-{
-	int ret;
-
-	ret = mlx5_core_get_caps_mode(dev, cap_type, HCA_CAP_OPMOD_GET_CUR);
-	if (ret)
-		return ret;
-	return mlx5_core_get_caps_mode(dev, cap_type, HCA_CAP_OPMOD_GET_MAX);
-}
-
-static int set_caps(struct mlx5_core_dev *dev, void *in, int in_sz, int opmod)
-{
-	u32 out[MLX5_ST_SZ_DW(set_hca_cap_out)] = {0};
-
-	MLX5_SET(set_hca_cap_in, in, opcode, MLX5_CMD_OP_SET_HCA_CAP);
-	MLX5_SET(set_hca_cap_in, in, op_mod, opmod << 1);
-	return mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
-}
-
-static int handle_hca_cap_atomic(struct mlx5_core_dev *dev)
-{
-	void *set_ctx;
-	void *set_hca_cap;
-	int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
-	int req_endianness;
-	int err;
-
-	if (MLX5_CAP_GEN(dev, atomic)) {
-		err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC);
-		if (err)
-			return err;
-	} else {
-		return 0;
-	}
-
-	req_endianness =
-		MLX5_CAP_ATOMIC(dev,
-				supported_atomic_req_8B_endianess_mode_1);
-
-	if (req_endianness != MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS)
-		return 0;
-
-	set_ctx = kzalloc(set_sz, GFP_KERNEL);
-	if (!set_ctx)
-		return -ENOMEM;
-
-	set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability);
-
-	/* Set requestor to host endianness */
-	MLX5_SET(atomic_caps, set_hca_cap, atomic_req_8B_endianess_mode,
-		 MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS);
-
-	err = set_caps(dev, set_ctx, set_sz, MLX5_SET_HCA_CAP_OP_MOD_ATOMIC);
-
-	kfree(set_ctx);
-	return err;
-}
-
-static int handle_hca_cap(struct mlx5_core_dev *dev)
-{
-	void *set_ctx = NULL;
-	struct mlx5_profile *prof = dev->profile;
-	int err = -ENOMEM;
-	int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
-	void *set_hca_cap;
-
-	set_ctx = kzalloc(set_sz, GFP_KERNEL);
-	if (!set_ctx)
-		goto query_ex;
-
-	err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL);
-	if (err)
-		goto query_ex;
-
-	set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx,
-				   capability);
-	memcpy(set_hca_cap, dev->hca_caps_cur[MLX5_CAP_GENERAL],
-	       MLX5_ST_SZ_BYTES(cmd_hca_cap));
-
-	mlx5_core_dbg(dev, "Current Pkey table size %d Setting new size %d\n",
-		      mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size)),
-		      128);
-	/* we limit the size of the pkey table to 128 entries for now */
-	MLX5_SET(cmd_hca_cap, set_hca_cap, pkey_table_size,
-		 to_fw_pkey_sz(dev, 128));
-
-	if (prof->mask & MLX5_PROF_MASK_QP_SIZE)
-		MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp,
-			 prof->log_max_qp);
-
-	/* disable cmdif checksum */
-	MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0);
-
-	/* If the HCA supports 4K UARs use it */
-	if (MLX5_CAP_GEN_MAX(dev, uar_4k))
-		MLX5_SET(cmd_hca_cap, set_hca_cap, uar_4k, 1);
-
-	MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12);
-
-	err = set_caps(dev, set_ctx, set_sz,
-		       MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE);
-
-query_ex:
-	kfree(set_ctx);
-	return err;
-}
-
-static int set_hca_ctrl(struct mlx5_core_dev *dev)
-{
-	struct mlx5_reg_host_endianess he_in;
-	struct mlx5_reg_host_endianess he_out;
-	int err;
-
-	if (!mlx5_core_is_pf(dev))
-		return 0;
-
-	memset(&he_in, 0, sizeof(he_in));
-	he_in.he = MLX5_SET_HOST_ENDIANNESS;
-	err = mlx5_core_access_reg(dev, &he_in,  sizeof(he_in),
-					&he_out, sizeof(he_out),
-					MLX5_REG_HOST_ENDIANNESS, 0, 1);
-	return err;
-}
-
-int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id)
-{
-	u32 out[MLX5_ST_SZ_DW(enable_hca_out)] = {0};
-	u32 in[MLX5_ST_SZ_DW(enable_hca_in)]   = {0};
-
-	MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA);
-	MLX5_SET(enable_hca_in, in, function_id, func_id);
-	return mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
-}
-
-int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id)
-{
-	u32 out[MLX5_ST_SZ_DW(disable_hca_out)] = {0};
-	u32 in[MLX5_ST_SZ_DW(disable_hca_in)]   = {0};
-
-	MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA);
-	MLX5_SET(disable_hca_in, in, function_id, func_id);
-	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-}
-
-u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev)
-{
-	u32 timer_h, timer_h1, timer_l;
-
-	timer_h = ioread32be(&dev->iseg->internal_timer_h);
-	timer_l = ioread32be(&dev->iseg->internal_timer_l);
-	timer_h1 = ioread32be(&dev->iseg->internal_timer_h);
-	if (timer_h != timer_h1) /* wrap around */
-		timer_l = ioread32be(&dev->iseg->internal_timer_l);
-
-	return (u64)timer_l | (u64)timer_h1 << 32;
-}
-
-static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
-{
-	struct mlx5_priv *priv  = &mdev->priv;
-	struct msix_entry *msix = priv->msix_arr;
-	int irq                 = msix[i + MLX5_EQ_VEC_COMP_BASE].vector;
-	int numa_node           = priv->numa_node;
-	int err;
-
-	if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) {
-		mlx5_core_warn(mdev, "zalloc_cpumask_var failed");
-		return -ENOMEM;
-	}
-
-	cpumask_set_cpu(cpumask_local_spread(i, numa_node),
-			priv->irq_info[i].mask);
-
-	err = irq_set_affinity_hint(irq, priv->irq_info[i].mask);
-	if (err) {
-		mlx5_core_warn(mdev, "irq_set_affinity_hint failed,irq 0x%.4x",
-			       irq);
-		goto err_clear_mask;
-	}
-
-	return 0;
-
-err_clear_mask:
-	free_cpumask_var(priv->irq_info[i].mask);
-	return err;
-}
-
-static void mlx5_irq_clear_affinity_hint(struct mlx5_core_dev *mdev, int i)
-{
-	struct mlx5_priv *priv  = &mdev->priv;
-	struct msix_entry *msix = priv->msix_arr;
-	int irq                 = msix[i + MLX5_EQ_VEC_COMP_BASE].vector;
-
-	irq_set_affinity_hint(irq, NULL);
-	free_cpumask_var(priv->irq_info[i].mask);
-}
-
-static int mlx5_irq_set_affinity_hints(struct mlx5_core_dev *mdev)
-{
-	int err;
-	int i;
-
-	for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++) {
-		err = mlx5_irq_set_affinity_hint(mdev, i);
-		if (err)
-			goto err_out;
-	}
-
-	return 0;
-
-err_out:
-	for (i--; i >= 0; i--)
-		mlx5_irq_clear_affinity_hint(mdev, i);
-
-	return err;
-}
-
-static void mlx5_irq_clear_affinity_hints(struct mlx5_core_dev *mdev)
-{
-	int i;
-
-	for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++)
-		mlx5_irq_clear_affinity_hint(mdev, i);
-}
-
-int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
-		    unsigned int *irqn)
-{
-	struct mlx5_eq_table *table = &dev->priv.eq_table;
-	struct mlx5_eq *eq, *n;
-	int err = -ENOENT;
-
-	spin_lock(&table->lock);
-	list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
-		if (eq->index == vector) {
-			*eqn = eq->eqn;
-			*irqn = eq->irqn;
-			err = 0;
-			break;
-		}
-	}
-	spin_unlock(&table->lock);
-
-	return err;
-}
-EXPORT_SYMBOL(mlx5_vector2eqn);
-
-struct mlx5_eq *mlx5_eqn2eq(struct mlx5_core_dev *dev, int eqn)
-{
-	struct mlx5_eq_table *table = &dev->priv.eq_table;
-	struct mlx5_eq *eq;
-
-	spin_lock(&table->lock);
-	list_for_each_entry(eq, &table->comp_eqs_list, list)
-		if (eq->eqn == eqn) {
-			spin_unlock(&table->lock);
-			return eq;
-		}
-
-	spin_unlock(&table->lock);
-
-	return ERR_PTR(-ENOENT);
-}
-
-static void free_comp_eqs(struct mlx5_core_dev *dev)
-{
-	struct mlx5_eq_table *table = &dev->priv.eq_table;
-	struct mlx5_eq *eq, *n;
-
-#ifdef CONFIG_RFS_ACCEL
-	if (dev->rmap) {
-		free_irq_cpu_rmap(dev->rmap);
-		dev->rmap = NULL;
-	}
-#endif
-	spin_lock(&table->lock);
-	list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
-		list_del(&eq->list);
-		spin_unlock(&table->lock);
-		if (mlx5_destroy_unmap_eq(dev, eq))
-			mlx5_core_warn(dev, "failed to destroy EQ 0x%x\n",
-				       eq->eqn);
-		kfree(eq);
-		spin_lock(&table->lock);
-	}
-	spin_unlock(&table->lock);
-}
-
-static int alloc_comp_eqs(struct mlx5_core_dev *dev)
-{
-	struct mlx5_eq_table *table = &dev->priv.eq_table;
-	char name[MLX5_MAX_IRQ_NAME];
-	struct mlx5_eq *eq;
-	int ncomp_vec;
-	int nent;
-	int err;
-	int i;
-
-	INIT_LIST_HEAD(&table->comp_eqs_list);
-	ncomp_vec = table->num_comp_vectors;
-	nent = MLX5_COMP_EQ_SIZE;
-#ifdef CONFIG_RFS_ACCEL
-	dev->rmap = alloc_irq_cpu_rmap(ncomp_vec);
-	if (!dev->rmap)
-		return -ENOMEM;
-#endif
-	for (i = 0; i < ncomp_vec; i++) {
-		eq = kzalloc(sizeof(*eq), GFP_KERNEL);
-		if (!eq) {
-			err = -ENOMEM;
-			goto clean;
-		}
-
-#ifdef CONFIG_RFS_ACCEL
-		irq_cpu_rmap_add(dev->rmap,
-				 dev->priv.msix_arr[i + MLX5_EQ_VEC_COMP_BASE].vector);
-#endif
-		snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", i);
-		err = mlx5_create_map_eq(dev, eq,
-					 i + MLX5_EQ_VEC_COMP_BASE, nent, 0,
-					 name, MLX5_EQ_TYPE_COMP);
-		if (err) {
-			kfree(eq);
-			goto clean;
-		}
-		mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->eqn);
-		eq->index = i;
-		spin_lock(&table->lock);
-		list_add_tail(&eq->list, &table->comp_eqs_list);
-		spin_unlock(&table->lock);
-	}
-
-	return 0;
-
-clean:
-	free_comp_eqs(dev);
-	return err;
-}
-
-static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
-{
-	u32 query_in[MLX5_ST_SZ_DW(query_issi_in)]   = {0};
-	u32 query_out[MLX5_ST_SZ_DW(query_issi_out)] = {0};
-	u32 sup_issi;
-	int err;
-
-	MLX5_SET(query_issi_in, query_in, opcode, MLX5_CMD_OP_QUERY_ISSI);
-	err = mlx5_cmd_exec(dev, query_in, sizeof(query_in),
-			    query_out, sizeof(query_out));
-	if (err) {
-		u32 syndrome;
-		u8 status;
-
-		mlx5_cmd_mbox_status(query_out, &status, &syndrome);
-		if (!status || syndrome == MLX5_DRIVER_SYND) {
-			mlx5_core_err(dev, "Failed to query ISSI err(%d) status(%d) synd(%d)\n",
-				      err, status, syndrome);
-			return err;
-		}
-
-		mlx5_core_warn(dev, "Query ISSI is not supported by FW, ISSI is 0\n");
-		dev->issi = 0;
-		return 0;
-	}
-
-	sup_issi = MLX5_GET(query_issi_out, query_out, supported_issi_dw0);
-
-	if (sup_issi & (1 << 1)) {
-		u32 set_in[MLX5_ST_SZ_DW(set_issi_in)]   = {0};
-		u32 set_out[MLX5_ST_SZ_DW(set_issi_out)] = {0};
-
-		MLX5_SET(set_issi_in, set_in, opcode, MLX5_CMD_OP_SET_ISSI);
-		MLX5_SET(set_issi_in, set_in, current_issi, 1);
-		err = mlx5_cmd_exec(dev, set_in, sizeof(set_in),
-				    set_out, sizeof(set_out));
-		if (err) {
-			mlx5_core_err(dev, "Failed to set ISSI to 1 err(%d)\n",
-				      err);
-			return err;
-		}
-
-		dev->issi = 1;
-
-		return 0;
-	} else if (sup_issi & (1 << 0) || !sup_issi) {
-		return 0;
-	}
-
-	return -ENOTSUPP;
-}
-
-
-static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
-{
-	struct pci_dev *pdev = dev->pdev;
-	int err = 0;
-
-	pci_set_drvdata(dev->pdev, dev);
-	strncpy(priv->name, dev_name(&pdev->dev), MLX5_MAX_NAME_LEN);
-	priv->name[MLX5_MAX_NAME_LEN - 1] = 0;
-
-	mutex_init(&priv->pgdir_mutex);
-	INIT_LIST_HEAD(&priv->pgdir_list);
-	spin_lock_init(&priv->mkey_lock);
-
-	mutex_init(&priv->alloc_mutex);
-
-	priv->numa_node = dev_to_node(&dev->pdev->dev);
-
-	priv->dbg_root = debugfs_create_dir(dev_name(&pdev->dev), mlx5_debugfs_root);
-	if (!priv->dbg_root)
-		return -ENOMEM;
-
-	err = mlx5_pci_enable_device(dev);
-	if (err) {
-		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
-		goto err_dbg;
-	}
-
-	err = request_bar(pdev);
-	if (err) {
-		dev_err(&pdev->dev, "error requesting BARs, aborting\n");
-		goto err_disable;
-	}
-
-	pci_set_master(pdev);
-
-	err = set_dma_caps(pdev);
-	if (err) {
-		dev_err(&pdev->dev, "Failed setting DMA capabilities mask, aborting\n");
-		goto err_clr_master;
-	}
-
-	dev->iseg_base = pci_resource_start(dev->pdev, 0);
-	dev->iseg = ioremap(dev->iseg_base, sizeof(*dev->iseg));
-	if (!dev->iseg) {
-		err = -ENOMEM;
-		dev_err(&pdev->dev, "Failed mapping initialization segment, aborting\n");
-		goto err_clr_master;
-	}
-
-	return 0;
-
-err_clr_master:
-	pci_clear_master(dev->pdev);
-	release_bar(dev->pdev);
-err_disable:
-	mlx5_pci_disable_device(dev);
-
-err_dbg:
-	debugfs_remove(priv->dbg_root);
-	return err;
-}
-
-static void mlx5_pci_close(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
-{
-	iounmap(dev->iseg);
-	pci_clear_master(dev->pdev);
-	release_bar(dev->pdev);
-	mlx5_pci_disable_device(dev);
-	debugfs_remove(priv->dbg_root);
-}
-
-static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
-{
-	struct pci_dev *pdev = dev->pdev;
-	int err;
-
-	err = mlx5_query_board_id(dev);
-	if (err) {
-		dev_err(&pdev->dev, "query board id failed\n");
-		goto out;
-	}
-
-	err = mlx5_eq_init(dev);
-	if (err) {
-		dev_err(&pdev->dev, "failed to initialize eq\n");
-		goto out;
-	}
-
-	err = mlx5_init_cq_table(dev);
-	if (err) {
-		dev_err(&pdev->dev, "failed to initialize cq table\n");
-		goto err_eq_cleanup;
-	}
-
-	mlx5_init_qp_table(dev);
-
-	mlx5_init_srq_table(dev);
-
-	mlx5_init_mkey_table(dev);
-
-	err = mlx5_init_rl_table(dev);
-	if (err) {
-		dev_err(&pdev->dev, "Failed to init rate limiting\n");
-		goto err_tables_cleanup;
-	}
-
-#ifdef CONFIG_MLX5_CORE_EN
-	err = mlx5_eswitch_init(dev);
-	if (err) {
-		dev_err(&pdev->dev, "Failed to init eswitch %d\n", err);
-		goto err_rl_cleanup;
-	}
-#endif
-
-	err = mlx5_sriov_init(dev);
-	if (err) {
-		dev_err(&pdev->dev, "Failed to init sriov %d\n", err);
-		goto err_eswitch_cleanup;
-	}
-
-	return 0;
-
-err_eswitch_cleanup:
-#ifdef CONFIG_MLX5_CORE_EN
-	mlx5_eswitch_cleanup(dev->priv.eswitch);
-
-err_rl_cleanup:
-#endif
-	mlx5_cleanup_rl_table(dev);
-
-err_tables_cleanup:
-	mlx5_cleanup_mkey_table(dev);
-	mlx5_cleanup_srq_table(dev);
-	mlx5_cleanup_qp_table(dev);
-	mlx5_cleanup_cq_table(dev);
-
-err_eq_cleanup:
-	mlx5_eq_cleanup(dev);
-
-out:
-	return err;
-}
-
-static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
-{
-	mlx5_sriov_cleanup(dev);
-#ifdef CONFIG_MLX5_CORE_EN
-	mlx5_eswitch_cleanup(dev->priv.eswitch);
-#endif
-	mlx5_cleanup_rl_table(dev);
-	mlx5_cleanup_mkey_table(dev);
-	mlx5_cleanup_srq_table(dev);
-	mlx5_cleanup_qp_table(dev);
-	mlx5_cleanup_cq_table(dev);
-	mlx5_eq_cleanup(dev);
-}
-
-static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
-			 bool boot)
-{
-	struct pci_dev *pdev = dev->pdev;
-	int err;
-
-	mutex_lock(&dev->intf_state_mutex);
-	if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
-		dev_warn(&dev->pdev->dev, "%s: interface is up, NOP\n",
-			 __func__);
-		goto out;
-	}
-
-	dev_info(&pdev->dev, "firmware version: %d.%d.%d\n", fw_rev_maj(dev),
-		 fw_rev_min(dev), fw_rev_sub(dev));
-
-	/* on load removing any previous indication of internal error, device is
-	 * up
-	 */
-	dev->state = MLX5_DEVICE_STATE_UP;
-
-	err = mlx5_cmd_init(dev);
-	if (err) {
-		dev_err(&pdev->dev, "Failed initializing command interface, aborting\n");
-		goto out_err;
-	}
-
-	err = wait_fw_init(dev, FW_INIT_TIMEOUT_MILI);
-	if (err) {
-		dev_err(&dev->pdev->dev, "Firmware over %d MS in initializing state, aborting\n",
-			FW_INIT_TIMEOUT_MILI);
-		goto out_err;
-	}
-
-	err = mlx5_core_enable_hca(dev, 0);
-	if (err) {
-		dev_err(&pdev->dev, "enable hca failed\n");
-		goto err_cmd_cleanup;
-	}
-
-	err = mlx5_core_set_issi(dev);
-	if (err) {
-		dev_err(&pdev->dev, "failed to set issi\n");
-		goto err_disable_hca;
-	}
-
-	err = mlx5_satisfy_startup_pages(dev, 1);
-	if (err) {
-		dev_err(&pdev->dev, "failed to allocate boot pages\n");
-		goto err_disable_hca;
-	}
-
-	err = set_hca_ctrl(dev);
-	if (err) {
-		dev_err(&pdev->dev, "set_hca_ctrl failed\n");
-		goto reclaim_boot_pages;
-	}
-
-	err = handle_hca_cap(dev);
-	if (err) {
-		dev_err(&pdev->dev, "handle_hca_cap failed\n");
-		goto reclaim_boot_pages;
-	}
-
-	err = handle_hca_cap_atomic(dev);
-	if (err) {
-		dev_err(&pdev->dev, "handle_hca_cap_atomic failed\n");
-		goto reclaim_boot_pages;
-	}
-
-	err = mlx5_satisfy_startup_pages(dev, 0);
-	if (err) {
-		dev_err(&pdev->dev, "failed to allocate init pages\n");
-		goto reclaim_boot_pages;
-	}
-
-	err = mlx5_pagealloc_start(dev);
-	if (err) {
-		dev_err(&pdev->dev, "mlx5_pagealloc_start failed\n");
-		goto reclaim_boot_pages;
-	}
-
-	err = mlx5_cmd_init_hca(dev);
-	if (err) {
-		dev_err(&pdev->dev, "init hca failed\n");
-		goto err_pagealloc_stop;
-	}
-
-	mlx5_set_driver_version(dev);
-
-	mlx5_start_health_poll(dev);
-
-	err = mlx5_query_hca_caps(dev);
-	if (err) {
-		dev_err(&pdev->dev, "query hca failed\n");
-		goto err_stop_poll;
-	}
-
-	if (boot && mlx5_init_once(dev, priv)) {
-		dev_err(&pdev->dev, "sw objs init failed\n");
-		goto err_stop_poll;
-	}
-
-	err = mlx5_enable_msix(dev);
-	if (err) {
-		dev_err(&pdev->dev, "enable msix failed\n");
-		goto err_cleanup_once;
-	}
-
-	dev->priv.uar = mlx5_get_uars_page(dev);
-	if (!dev->priv.uar) {
-		dev_err(&pdev->dev, "Failed allocating uar, aborting\n");
-		goto err_disable_msix;
-	}
-
-	err = mlx5_start_eqs(dev);
-	if (err) {
-		dev_err(&pdev->dev, "Failed to start pages and async EQs\n");
-		goto err_put_uars;
-	}
-
-	err = alloc_comp_eqs(dev);
-	if (err) {
-		dev_err(&pdev->dev, "Failed to alloc completion EQs\n");
-		goto err_stop_eqs;
-	}
-
-	err = mlx5_irq_set_affinity_hints(dev);
-	if (err) {
-		dev_err(&pdev->dev, "Failed to alloc affinity hint cpumask\n");
-		goto err_affinity_hints;
-	}
-
-	err = mlx5_init_fs(dev);
-	if (err) {
-		dev_err(&pdev->dev, "Failed to init flow steering\n");
-		goto err_fs;
-	}
-
-#ifdef CONFIG_MLX5_CORE_EN
-	mlx5_eswitch_attach(dev->priv.eswitch);
-#endif
-
-	err = mlx5_sriov_attach(dev);
-	if (err) {
-		dev_err(&pdev->dev, "sriov init failed %d\n", err);
-		goto err_sriov;
-	}
-
-	if (mlx5_device_registered(dev)) {
-		mlx5_attach_device(dev);
-	} else {
-		err = mlx5_register_device(dev);
-		if (err) {
-			dev_err(&pdev->dev, "mlx5_register_device failed %d\n", err);
-			goto err_reg_dev;
-		}
-	}
-
-	clear_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state);
-	set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
-out:
-	mutex_unlock(&dev->intf_state_mutex);
-
-	return 0;
-
-err_reg_dev:
-	mlx5_sriov_detach(dev);
-
-err_sriov:
-#ifdef CONFIG_MLX5_CORE_EN
-	mlx5_eswitch_detach(dev->priv.eswitch);
-#endif
-	mlx5_cleanup_fs(dev);
-
-err_fs:
-	mlx5_irq_clear_affinity_hints(dev);
-
-err_affinity_hints:
-	free_comp_eqs(dev);
-
-err_stop_eqs:
-	mlx5_stop_eqs(dev);
-
-err_put_uars:
-	mlx5_put_uars_page(dev, priv->uar);
-
-err_disable_msix:
-	mlx5_disable_msix(dev);
-
-err_cleanup_once:
-	if (boot)
-		mlx5_cleanup_once(dev);
-
-err_stop_poll:
-	mlx5_stop_health_poll(dev);
-	if (mlx5_cmd_teardown_hca(dev)) {
-		dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n");
-		goto out_err;
-	}
-
-err_pagealloc_stop:
-	mlx5_pagealloc_stop(dev);
-
-reclaim_boot_pages:
-	mlx5_reclaim_startup_pages(dev);
-
-err_disable_hca:
-	mlx5_core_disable_hca(dev, 0);
-
-err_cmd_cleanup:
-	mlx5_cmd_cleanup(dev);
-
-out_err:
-	dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
-	mutex_unlock(&dev->intf_state_mutex);
-
-	return err;
-}
-
-static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
-			   bool cleanup)
-{
-	int err = 0;
-
-	mutex_lock(&dev->intf_state_mutex);
-	if (test_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state)) {
-		dev_warn(&dev->pdev->dev, "%s: interface is down, NOP\n",
-			 __func__);
-		if (cleanup)
-			mlx5_cleanup_once(dev);
-		goto out;
-	}
-
-	if (mlx5_device_registered(dev))
-		mlx5_detach_device(dev);
-
-	mlx5_sriov_detach(dev);
-#ifdef CONFIG_MLX5_CORE_EN
-	mlx5_eswitch_detach(dev->priv.eswitch);
-#endif
-	mlx5_cleanup_fs(dev);
-	mlx5_irq_clear_affinity_hints(dev);
-	free_comp_eqs(dev);
-	mlx5_stop_eqs(dev);
-	mlx5_put_uars_page(dev, priv->uar);
-	mlx5_disable_msix(dev);
-	if (cleanup)
-		mlx5_cleanup_once(dev);
-	mlx5_stop_health_poll(dev);
-	err = mlx5_cmd_teardown_hca(dev);
-	if (err) {
-		dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n");
-		goto out;
-	}
-	mlx5_pagealloc_stop(dev);
-	mlx5_reclaim_startup_pages(dev);
-	mlx5_core_disable_hca(dev, 0);
-	mlx5_cmd_cleanup(dev);
-
-out:
-	clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
-	set_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state);
-	mutex_unlock(&dev->intf_state_mutex);
-	return err;
-}
-
-struct mlx5_core_event_handler {
-	void (*event)(struct mlx5_core_dev *dev,
-		      enum mlx5_dev_event event,
-		      void *data);
-};
-
-static const struct devlink_ops mlx5_devlink_ops = {
-#ifdef CONFIG_MLX5_CORE_EN
-	.eswitch_mode_set = mlx5_devlink_eswitch_mode_set,
-	.eswitch_mode_get = mlx5_devlink_eswitch_mode_get,
-	.eswitch_inline_mode_set = mlx5_devlink_eswitch_inline_mode_set,
-	.eswitch_inline_mode_get = mlx5_devlink_eswitch_inline_mode_get,
-#endif
-};
-
-#define MLX5_IB_MOD "mlx5_ib"
-static int init_one(struct pci_dev *pdev,
-		    const struct pci_device_id *id)
-{
-	struct mlx5_core_dev *dev;
-	struct devlink *devlink;
-	struct mlx5_priv *priv;
-	int err;
-
-	devlink = devlink_alloc(&mlx5_devlink_ops, sizeof(*dev));
-	if (!devlink) {
-		dev_err(&pdev->dev, "kzalloc failed\n");
-		return -ENOMEM;
-	}
-
-	dev = devlink_priv(devlink);
-	priv = &dev->priv;
-	priv->pci_dev_data = id->driver_data;
-
-	pci_set_drvdata(pdev, dev);
-
-	dev->pdev = pdev;
-	dev->event = mlx5_core_event;
-	dev->profile = &profile[prof_sel];
-
-	INIT_LIST_HEAD(&priv->ctx_list);
-	spin_lock_init(&priv->ctx_lock);
-	mutex_init(&dev->pci_status_mutex);
-	mutex_init(&dev->intf_state_mutex);
-
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
-	err = init_srcu_struct(&priv->pfault_srcu);
-	if (err) {
-		dev_err(&pdev->dev, "init_srcu_struct failed with error code %d\n",
-			err);
-		goto clean_dev;
-	}
-#endif
-	mutex_init(&priv->bfregs.reg_head.lock);
-	mutex_init(&priv->bfregs.wc_head.lock);
-	INIT_LIST_HEAD(&priv->bfregs.reg_head.list);
-	INIT_LIST_HEAD(&priv->bfregs.wc_head.list);
-
-	err = mlx5_pci_init(dev, priv);
-	if (err) {
-		dev_err(&pdev->dev, "mlx5_pci_init failed with error code %d\n", err);
-		goto clean_srcu;
-	}
-
-	err = mlx5_health_init(dev);
-	if (err) {
-		dev_err(&pdev->dev, "mlx5_health_init failed with error code %d\n", err);
-		goto close_pci;
-	}
-
-	mlx5_pagealloc_init(dev);
-
-	err = mlx5_load_one(dev, priv, true);
-	if (err) {
-		dev_err(&pdev->dev, "mlx5_load_one failed with error code %d\n", err);
-		goto clean_health;
-	}
-
-	err = request_module_nowait(MLX5_IB_MOD);
-	if (err)
-		pr_info("failed request module on %s\n", MLX5_IB_MOD);
-
-	err = devlink_register(devlink, &pdev->dev);
-	if (err)
-		goto clean_load;
-
-	return 0;
-
-clean_load:
-	mlx5_unload_one(dev, priv, true);
-clean_health:
-	mlx5_pagealloc_cleanup(dev);
-	mlx5_health_cleanup(dev);
-close_pci:
-	mlx5_pci_close(dev, priv);
-clean_srcu:
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
-	cleanup_srcu_struct(&priv->pfault_srcu);
-clean_dev:
-#endif
-	pci_set_drvdata(pdev, NULL);
-	devlink_free(devlink);
-
-	return err;
-}
-
-static void remove_one(struct pci_dev *pdev)
-{
-	struct mlx5_core_dev *dev  = pci_get_drvdata(pdev);
-	struct devlink *devlink = priv_to_devlink(dev);
-	struct mlx5_priv *priv = &dev->priv;
-
-	devlink_unregister(devlink);
-	mlx5_unregister_device(dev);
-
-	if (mlx5_unload_one(dev, priv, true)) {
-		dev_err(&dev->pdev->dev, "mlx5_unload_one failed\n");
-		mlx5_health_cleanup(dev);
-		return;
-	}
-
-	mlx5_pagealloc_cleanup(dev);
-	mlx5_health_cleanup(dev);
-	mlx5_pci_close(dev, priv);
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
-	cleanup_srcu_struct(&priv->pfault_srcu);
-#endif
-	pci_set_drvdata(pdev, NULL);
-	devlink_free(devlink);
-}
-
-static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
-					      pci_channel_state_t state)
-{
-	struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
-	struct mlx5_priv *priv = &dev->priv;
-
-	dev_info(&pdev->dev, "%s was called\n", __func__);
-
-	mlx5_enter_error_state(dev);
-	mlx5_unload_one(dev, priv, false);
-	/* In case of kernel call save the pci state and drain health wq */
-	if (state) {
-		pci_save_state(pdev);
-		mlx5_drain_health_wq(dev);
-		mlx5_pci_disable_device(dev);
-	}
-
-	return state == pci_channel_io_perm_failure ?
-		PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
-}
-
-/* wait for the device to show vital signs by waiting
- * for the health counter to start counting.
- */
-static int wait_vital(struct pci_dev *pdev)
-{
-	struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
-	struct mlx5_core_health *health = &dev->priv.health;
-	const int niter = 100;
-	u32 last_count = 0;
-	u32 count;
-	int i;
-
-	for (i = 0; i < niter; i++) {
-		count = ioread32be(health->health_counter);
-		if (count && count != 0xffffffff) {
-			if (last_count && last_count != count) {
-				dev_info(&pdev->dev, "Counter value 0x%x after %d iterations\n", count, i);
-				return 0;
-			}
-			last_count = count;
-		}
-		msleep(50);
-	}
-
-	return -ETIMEDOUT;
-}
-
-static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
-{
-	struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
-	int err;
-
-	dev_info(&pdev->dev, "%s was called\n", __func__);
-
-	err = mlx5_pci_enable_device(dev);
-	if (err) {
-		dev_err(&pdev->dev, "%s: mlx5_pci_enable_device failed with error code: %d\n"
-			, __func__, err);
-		return PCI_ERS_RESULT_DISCONNECT;
-	}
-
-	pci_set_master(pdev);
-	pci_restore_state(pdev);
-
-	if (wait_vital(pdev)) {
-		dev_err(&pdev->dev, "%s: wait_vital timed out\n", __func__);
-		return PCI_ERS_RESULT_DISCONNECT;
-	}
-
-	return PCI_ERS_RESULT_RECOVERED;
-}
-
-static void mlx5_pci_resume(struct pci_dev *pdev)
-{
-	struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
-	struct mlx5_priv *priv = &dev->priv;
-	int err;
-
-	dev_info(&pdev->dev, "%s was called\n", __func__);
-
-	err = mlx5_load_one(dev, priv, false);
-	if (err)
-		dev_err(&pdev->dev, "%s: mlx5_load_one failed with error code: %d\n"
-			, __func__, err);
-	else
-		dev_info(&pdev->dev, "%s: device recovered\n", __func__);
-}
-
-static const struct pci_error_handlers mlx5_err_handler = {
-	.error_detected = mlx5_pci_err_detected,
-	.slot_reset	= mlx5_pci_slot_reset,
-	.resume		= mlx5_pci_resume
-};
-
-static void shutdown(struct pci_dev *pdev)
-{
-	struct mlx5_core_dev *dev  = pci_get_drvdata(pdev);
-	struct mlx5_priv *priv = &dev->priv;
-
-	dev_info(&pdev->dev, "Shutdown was called\n");
-	/* Notify mlx5 clients that the kernel is being shut down */
-	set_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &dev->intf_state);
-	mlx5_unload_one(dev, priv, false);
-	mlx5_pci_disable_device(dev);
-}
-
-static const struct pci_device_id mlx5_core_pci_table[] = {
-	{ PCI_VDEVICE(MELLANOX, 0x1011) },			/* Connect-IB */
-	{ PCI_VDEVICE(MELLANOX, 0x1012), MLX5_PCI_DEV_IS_VF},	/* Connect-IB VF */
-	{ PCI_VDEVICE(MELLANOX, 0x1013) },			/* ConnectX-4 */
-	{ PCI_VDEVICE(MELLANOX, 0x1014), MLX5_PCI_DEV_IS_VF},	/* ConnectX-4 VF */
-	{ PCI_VDEVICE(MELLANOX, 0x1015) },			/* ConnectX-4LX */
-	{ PCI_VDEVICE(MELLANOX, 0x1016), MLX5_PCI_DEV_IS_VF},	/* ConnectX-4LX VF */
-	{ PCI_VDEVICE(MELLANOX, 0x1017) },			/* ConnectX-5, PCIe 3.0 */
-	{ PCI_VDEVICE(MELLANOX, 0x1018), MLX5_PCI_DEV_IS_VF},	/* ConnectX-5 VF */
-	{ PCI_VDEVICE(MELLANOX, 0x1019) },			/* ConnectX-5, PCIe 4.0 */
-	{ PCI_VDEVICE(MELLANOX, 0x101a), MLX5_PCI_DEV_IS_VF},	/* ConnectX-5, PCIe 4.0 VF */
-	{ 0, }
-};
-
-MODULE_DEVICE_TABLE(pci, mlx5_core_pci_table);
-
-void mlx5_disable_device(struct mlx5_core_dev *dev)
-{
-	mlx5_pci_err_detected(dev->pdev, 0);
-}
-
-void mlx5_recover_device(struct mlx5_core_dev *dev)
-{
-	mlx5_pci_disable_device(dev);
-	if (mlx5_pci_slot_reset(dev->pdev) == PCI_ERS_RESULT_RECOVERED)
-		mlx5_pci_resume(dev->pdev);
-}
-
-static struct pci_driver mlx5_core_driver = {
-	.name           = DRIVER_NAME,
-	.id_table       = mlx5_core_pci_table,
-	.probe          = init_one,
-	.remove         = remove_one,
-	.shutdown	= shutdown,
-	.err_handler	= &mlx5_err_handler,
-	.sriov_configure   = mlx5_core_sriov_configure,
-};
-
-static void mlx5_core_verify_params(void)
-{
-	if (prof_sel >= ARRAY_SIZE(profile)) {
-		pr_warn("mlx5_core: WARNING: Invalid module parameter prof_sel %d, valid range 0-%zu, changing back to default(%d)\n",
-			prof_sel,
-			ARRAY_SIZE(profile) - 1,
-			MLX5_DEFAULT_PROF);
-		prof_sel = MLX5_DEFAULT_PROF;
-	}
-}
-
-static int __init init(void)
-{
-	int err;
-
-	mlx5_core_verify_params();
-	mlx5_register_debugfs();
-
-	err = pci_register_driver(&mlx5_core_driver);
-	if (err)
-		goto err_debug;
-
-#ifdef CONFIG_MLX5_CORE_EN
-	mlx5e_init();
-#endif
-
-	return 0;
-
-err_debug:
-	mlx5_unregister_debugfs();
-	return err;
-}
-
-static void __exit cleanup(void)
-{
-#ifdef CONFIG_MLX5_CORE_EN
-	mlx5e_cleanup();
-#endif
-	pci_unregister_driver(&mlx5_core_driver);
-	mlx5_unregister_debugfs();
-}
-
-module_init(init);
-module_exit(cleanup);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mcg.c b/drivers/net/ethernet/mellanox/mlx5/core/mcg.c
deleted file mode 100644
index ba2b09c..0000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/mcg.c
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/mlx5/driver.h>
-#include <linux/mlx5/cmd.h>
-#include <rdma/ib_verbs.h>
-#include "mlx5_core.h"
-
-int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn)
-{
-	u32 out[MLX5_ST_SZ_DW(attach_to_mcg_out)] = {0};
-	u32 in[MLX5_ST_SZ_DW(attach_to_mcg_in)]   = {0};
-	void *gid;
-
-	MLX5_SET(attach_to_mcg_in, in, opcode, MLX5_CMD_OP_ATTACH_TO_MCG);
-	MLX5_SET(attach_to_mcg_in, in, qpn, qpn);
-	gid = MLX5_ADDR_OF(attach_to_mcg_in, in, multicast_gid);
-	memcpy(gid, mgid, sizeof(*mgid));
-	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-}
-EXPORT_SYMBOL(mlx5_core_attach_mcg);
-
-int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn)
-{
-	u32 out[MLX5_ST_SZ_DW(detach_from_mcg_out)] = {0};
-	u32 in[MLX5_ST_SZ_DW(detach_from_mcg_in)]   = {0};
-	void *gid;
-
-	MLX5_SET(detach_from_mcg_in, in, opcode, MLX5_CMD_OP_DETACH_FROM_MCG);
-	MLX5_SET(detach_from_mcg_in, in, qpn, qpn);
-	gid = MLX5_ADDR_OF(detach_from_mcg_in, in, multicast_gid);
-	memcpy(gid, mgid, sizeof(*mgid));
-	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-}
-EXPORT_SYMBOL(mlx5_core_detach_mcg);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
deleted file mode 100644
index 74241e8..0000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ /dev/null
@@ -1,156 +0,0 @@
-/*
- * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef __MLX5_CORE_H__
-#define __MLX5_CORE_H__
-
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/if_link.h>
-
-#define DRIVER_NAME "mlx5_core"
-#define DRIVER_VERSION "3.0-1"
-#define DRIVER_RELDATE  "January 2015"
-
-#define MLX5_TOTAL_VPORTS(mdev) (1 + pci_sriov_get_totalvfs(mdev->pdev))
-
-extern uint mlx5_core_debug_mask;
-
-#define mlx5_core_dbg(__dev, format, ...)				\
-	dev_dbg(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format,		\
-		 __func__, __LINE__, current->pid,			\
-		 ##__VA_ARGS__)
-
-#define mlx5_core_dbg_mask(__dev, mask, format, ...)			\
-do {									\
-	if ((mask) & mlx5_core_debug_mask)				\
-		mlx5_core_dbg(__dev, format, ##__VA_ARGS__);		\
-} while (0)
-
-#define mlx5_core_err(__dev, format, ...)				\
-	dev_err(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format,	\
-		__func__, __LINE__, current->pid,	\
-	       ##__VA_ARGS__)
-
-#define mlx5_core_warn(__dev, format, ...)				\
-	dev_warn(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format,	\
-		 __func__, __LINE__, current->pid,			\
-		##__VA_ARGS__)
-
-#define mlx5_core_info(__dev, format, ...)				\
-	dev_info(&(__dev)->pdev->dev, format, ##__VA_ARGS__)
-
-enum {
-	MLX5_CMD_DATA, /* print command payload only */
-	MLX5_CMD_TIME, /* print command execution time */
-};
-
-enum {
-	MLX5_DRIVER_STATUS_ABORTED = 0xfe,
-	MLX5_DRIVER_SYND = 0xbadd00de,
-};
-
-int mlx5_query_hca_caps(struct mlx5_core_dev *dev);
-int mlx5_query_board_id(struct mlx5_core_dev *dev);
-int mlx5_cmd_init_hca(struct mlx5_core_dev *dev);
-int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev);
-void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
-		     unsigned long param);
-void mlx5_core_page_fault(struct mlx5_core_dev *dev,
-			  struct mlx5_pagefault *pfault);
-void mlx5_port_module_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
-void mlx5_enter_error_state(struct mlx5_core_dev *dev);
-void mlx5_disable_device(struct mlx5_core_dev *dev);
-void mlx5_recover_device(struct mlx5_core_dev *dev);
-int mlx5_sriov_init(struct mlx5_core_dev *dev);
-void mlx5_sriov_cleanup(struct mlx5_core_dev *dev);
-int mlx5_sriov_attach(struct mlx5_core_dev *dev);
-void mlx5_sriov_detach(struct mlx5_core_dev *dev);
-int mlx5_core_sriov_configure(struct pci_dev *dev, int num_vfs);
-bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev);
-int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id);
-int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id);
-int mlx5_create_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
-				       void *context, u32 *element_id);
-int mlx5_modify_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
-				       void *context, u32 element_id,
-				       u32 modify_bitmask);
-int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
-					u32 element_id);
-int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev);
-u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev);
-u32 mlx5_get_msix_vec(struct mlx5_core_dev *dev, int vecidx);
-struct mlx5_eq *mlx5_eqn2eq(struct mlx5_core_dev *dev, int eqn);
-void mlx5_cq_tasklet_cb(unsigned long data);
-
-void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev);
-void mlx5_lag_remove(struct mlx5_core_dev *dev);
-
-void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv);
-void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv);
-void mlx5_attach_device(struct mlx5_core_dev *dev);
-void mlx5_detach_device(struct mlx5_core_dev *dev);
-bool mlx5_device_registered(struct mlx5_core_dev *dev);
-int mlx5_register_device(struct mlx5_core_dev *dev);
-void mlx5_unregister_device(struct mlx5_core_dev *dev);
-void mlx5_add_dev_by_protocol(struct mlx5_core_dev *dev, int protocol);
-void mlx5_remove_dev_by_protocol(struct mlx5_core_dev *dev, int protocol);
-struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev);
-void mlx5_dev_list_lock(void);
-void mlx5_dev_list_unlock(void);
-int mlx5_dev_list_trylock(void);
-int mlx5_encap_alloc(struct mlx5_core_dev *dev,
-		     int header_type,
-		     size_t size,
-		     void *encap_header,
-		     u32 *encap_id);
-void mlx5_encap_dealloc(struct mlx5_core_dev *dev, u32 encap_id);
-
-bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv);
-
-void mlx5e_init(void);
-void mlx5e_cleanup(void);
-
-static inline int mlx5_lag_is_lacp_owner(struct mlx5_core_dev *dev)
-{
-	/* LACP owner conditions:
-	 * 1) Function is physical.
-	 * 2) LAG is supported by FW.
-	 * 3) LAG is managed by driver (currently the only option).
-	 */
-	return  MLX5_CAP_GEN(dev, vport_group_manager) &&
-		   (MLX5_CAP_GEN(dev, num_lag_ports) > 1) &&
-		    MLX5_CAP_GEN(dev, lag_master);
-}
-
-#endif /* __MLX5_CORE_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
deleted file mode 100644
index b9736f5..0000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c
+++ /dev/null
@@ -1,210 +0,0 @@
-/*
- * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/mlx5/driver.h>
-#include <linux/mlx5/cmd.h>
-#include "mlx5_core.h"
-
-void mlx5_init_mkey_table(struct mlx5_core_dev *dev)
-{
-	struct mlx5_mkey_table *table = &dev->priv.mkey_table;
-
-	memset(table, 0, sizeof(*table));
-	rwlock_init(&table->lock);
-	INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
-}
-
-void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev)
-{
-}
-
-int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
-			     struct mlx5_core_mkey *mkey,
-			     u32 *in, int inlen,
-			     u32 *out, int outlen,
-			     mlx5_cmd_cbk_t callback, void *context)
-{
-	struct mlx5_mkey_table *table = &dev->priv.mkey_table;
-	u32 lout[MLX5_ST_SZ_DW(create_mkey_out)] = {0};
-	u32 mkey_index;
-	void *mkc;
-	int err;
-	u8 key;
-
-	spin_lock_irq(&dev->priv.mkey_lock);
-	key = dev->priv.mkey_key++;
-	spin_unlock_irq(&dev->priv.mkey_lock);
-	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
-
-	MLX5_SET(create_mkey_in, in, opcode, MLX5_CMD_OP_CREATE_MKEY);
-	MLX5_SET(mkc, mkc, mkey_7_0, key);
-
-	if (callback)
-		return mlx5_cmd_exec_cb(dev, in, inlen, out, outlen,
-					callback, context);
-
-	err = mlx5_cmd_exec(dev, in, inlen, lout, sizeof(lout));
-	if (err)
-		return err;
-
-	mkey_index = MLX5_GET(create_mkey_out, lout, mkey_index);
-	mkey->iova = MLX5_GET64(mkc, mkc, start_addr);
-	mkey->size = MLX5_GET64(mkc, mkc, len);
-	mkey->key = mlx5_idx_to_mkey(mkey_index) | key;
-	mkey->pd = MLX5_GET(mkc, mkc, pd);
-
-	mlx5_core_dbg(dev, "out 0x%x, key 0x%x, mkey 0x%x\n",
-		      mkey_index, key, mkey->key);
-
-	/* connect to mkey tree */
-	write_lock_irq(&table->lock);
-	err = radix_tree_insert(&table->tree, mlx5_base_mkey(mkey->key), mkey);
-	write_unlock_irq(&table->lock);
-	if (err) {
-		mlx5_core_warn(dev, "failed radix tree insert of mkey 0x%x, %d\n",
-			       mlx5_base_mkey(mkey->key), err);
-		mlx5_core_destroy_mkey(dev, mkey);
-	}
-
-	return err;
-}
-EXPORT_SYMBOL(mlx5_core_create_mkey_cb);
-
-int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
-			  struct mlx5_core_mkey *mkey,
-			  u32 *in, int inlen)
-{
-	return mlx5_core_create_mkey_cb(dev, mkey, in, inlen,
-					NULL, 0, NULL, NULL);
-}
-EXPORT_SYMBOL(mlx5_core_create_mkey);
-
-int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
-			   struct mlx5_core_mkey *mkey)
-{
-	struct mlx5_mkey_table *table = &dev->priv.mkey_table;
-	u32 out[MLX5_ST_SZ_DW(destroy_mkey_out)] = {0};
-	u32 in[MLX5_ST_SZ_DW(destroy_mkey_in)]   = {0};
-	struct mlx5_core_mkey *deleted_mkey;
-	unsigned long flags;
-
-	write_lock_irqsave(&table->lock, flags);
-	deleted_mkey = radix_tree_delete(&table->tree, mlx5_base_mkey(mkey->key));
-	write_unlock_irqrestore(&table->lock, flags);
-	if (!deleted_mkey) {
-		mlx5_core_warn(dev, "failed radix tree delete of mkey 0x%x\n",
-			       mlx5_base_mkey(mkey->key));
-		return -ENOENT;
-	}
-
-	MLX5_SET(destroy_mkey_in, in, opcode, MLX5_CMD_OP_DESTROY_MKEY);
-	MLX5_SET(destroy_mkey_in, in, mkey_index, mlx5_mkey_to_idx(mkey->key));
-	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-}
-EXPORT_SYMBOL(mlx5_core_destroy_mkey);
-
-int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey,
-			 u32 *out, int outlen)
-{
-	u32 in[MLX5_ST_SZ_DW(query_mkey_in)] = {0};
-
-	memset(out, 0, outlen);
-	MLX5_SET(query_mkey_in, in, opcode, MLX5_CMD_OP_QUERY_MKEY);
-	MLX5_SET(query_mkey_in, in, mkey_index, mlx5_mkey_to_idx(mkey->key));
-	return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
-}
-EXPORT_SYMBOL(mlx5_core_query_mkey);
-
-int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *_mkey,
-			     u32 *mkey)
-{
-	u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {0};
-	u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)]   = {0};
-	int err;
-
-	MLX5_SET(query_special_contexts_in, in, opcode,
-		 MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
-	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-	if (!err)
-		*mkey = MLX5_GET(query_special_contexts_out, out,
-				 dump_fill_mkey);
-	return err;
-}
-EXPORT_SYMBOL(mlx5_core_dump_fill_mkey);
-
-static inline u32 mlx5_get_psv(u32 *out, int psv_index)
-{
-	switch (psv_index) {
-	case 1: return MLX5_GET(create_psv_out, out, psv1_index);
-	case 2: return MLX5_GET(create_psv_out, out, psv2_index);
-	case 3: return MLX5_GET(create_psv_out, out, psv3_index);
-	default: return MLX5_GET(create_psv_out, out, psv0_index);
-	}
-}
-
-int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn,
-			 int npsvs, u32 *sig_index)
-{
-	u32 out[MLX5_ST_SZ_DW(create_psv_out)] = {0};
-	u32 in[MLX5_ST_SZ_DW(create_psv_in)]   = {0};
-	int i, err;
-
-	if (npsvs > MLX5_MAX_PSVS)
-		return -EINVAL;
-
-	MLX5_SET(create_psv_in, in, opcode, MLX5_CMD_OP_CREATE_PSV);
-	MLX5_SET(create_psv_in, in, pd, pdn);
-	MLX5_SET(create_psv_in, in, num_psv, npsvs);
-
-	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-	if (err)
-		return err;
-
-	for (i = 0; i < npsvs; i++)
-		sig_index[i] = mlx5_get_psv(out, i);
-
-	return err;
-}
-EXPORT_SYMBOL(mlx5_core_create_psv);
-
-int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num)
-{
-	u32 out[MLX5_ST_SZ_DW(destroy_psv_out)] = {0};
-	u32 in[MLX5_ST_SZ_DW(destroy_psv_in)]   = {0};
-
-	MLX5_SET(destroy_psv_in, in, opcode, MLX5_CMD_OP_DESTROY_PSV);
-	MLX5_SET(destroy_psv_in, in, psvn, psv_num);
-	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-}
-EXPORT_SYMBOL(mlx5_core_destroy_psv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
deleted file mode 100644
index a57d5a8..0000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ /dev/null
@@ -1,580 +0,0 @@
-/*
- * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/highmem.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/mlx5/driver.h>
-#include <linux/mlx5/cmd.h>
-#include "mlx5_core.h"
-
-enum {
-	MLX5_PAGES_CANT_GIVE	= 0,
-	MLX5_PAGES_GIVE		= 1,
-	MLX5_PAGES_TAKE		= 2
-};
-
-struct mlx5_pages_req {
-	struct mlx5_core_dev *dev;
-	u16	func_id;
-	s32	npages;
-	struct work_struct work;
-};
-
-struct fw_page {
-	struct rb_node		rb_node;
-	u64			addr;
-	struct page	       *page;
-	u16			func_id;
-	unsigned long		bitmask;
-	struct list_head	list;
-	unsigned		free_count;
-};
-
-enum {
-	MAX_RECLAIM_TIME_MSECS	= 5000,
-	MAX_RECLAIM_VFS_PAGES_TIME_MSECS = 2 * 1000 * 60,
-};
-
-enum {
-	MLX5_MAX_RECLAIM_TIME_MILI	= 5000,
-	MLX5_NUM_4K_IN_PAGE		= PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
-};
-
-static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id)
-{
-	struct rb_root *root = &dev->priv.page_root;
-	struct rb_node **new = &root->rb_node;
-	struct rb_node *parent = NULL;
-	struct fw_page *nfp;
-	struct fw_page *tfp;
-	int i;
-
-	while (*new) {
-		parent = *new;
-		tfp = rb_entry(parent, struct fw_page, rb_node);
-		if (tfp->addr < addr)
-			new = &parent->rb_left;
-		else if (tfp->addr > addr)
-			new = &parent->rb_right;
-		else
-			return -EEXIST;
-	}
-
-	nfp = kzalloc(sizeof(*nfp), GFP_KERNEL);
-	if (!nfp)
-		return -ENOMEM;
-
-	nfp->addr = addr;
-	nfp->page = page;
-	nfp->func_id = func_id;
-	nfp->free_count = MLX5_NUM_4K_IN_PAGE;
-	for (i = 0; i < MLX5_NUM_4K_IN_PAGE; i++)
-		set_bit(i, &nfp->bitmask);
-
-	rb_link_node(&nfp->rb_node, parent, new);
-	rb_insert_color(&nfp->rb_node, root);
-	list_add(&nfp->list, &dev->priv.free_list);
-
-	return 0;
-}
-
-static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr)
-{
-	struct rb_root *root = &dev->priv.page_root;
-	struct rb_node *tmp = root->rb_node;
-	struct fw_page *result = NULL;
-	struct fw_page *tfp;
-
-	while (tmp) {
-		tfp = rb_entry(tmp, struct fw_page, rb_node);
-		if (tfp->addr < addr) {
-			tmp = tmp->rb_left;
-		} else if (tfp->addr > addr) {
-			tmp = tmp->rb_right;
-		} else {
-			result = tfp;
-			break;
-		}
-	}
-
-	return result;
-}
-
-static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
-				s32 *npages, int boot)
-{
-	u32 out[MLX5_ST_SZ_DW(query_pages_out)] = {0};
-	u32 in[MLX5_ST_SZ_DW(query_pages_in)]   = {0};
-	int err;
-
-	MLX5_SET(query_pages_in, in, opcode, MLX5_CMD_OP_QUERY_PAGES);
-	MLX5_SET(query_pages_in, in, op_mod, boot ?
-		 MLX5_QUERY_PAGES_IN_OP_MOD_BOOT_PAGES :
-		 MLX5_QUERY_PAGES_IN_OP_MOD_INIT_PAGES);
-
-	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-	if (err)
-		return err;
-
-	*npages = MLX5_GET(query_pages_out, out, num_pages);
-	*func_id = MLX5_GET(query_pages_out, out, function_id);
-
-	return err;
-}
-
-static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr)
-{
-	struct fw_page *fp;
-	unsigned n;
-
-	if (list_empty(&dev->priv.free_list))
-		return -ENOMEM;
-
-	fp = list_entry(dev->priv.free_list.next, struct fw_page, list);
-	n = find_first_bit(&fp->bitmask, 8 * sizeof(fp->bitmask));
-	if (n >= MLX5_NUM_4K_IN_PAGE) {
-		mlx5_core_warn(dev, "alloc 4k bug\n");
-		return -ENOENT;
-	}
-	clear_bit(n, &fp->bitmask);
-	fp->free_count--;
-	if (!fp->free_count)
-		list_del(&fp->list);
-
-	*addr = fp->addr + n * MLX5_ADAPTER_PAGE_SIZE;
-
-	return 0;
-}
-
-#define MLX5_U64_4K_PAGE_MASK ((~(u64)0U) << PAGE_SHIFT)
-
-static void free_4k(struct mlx5_core_dev *dev, u64 addr)
-{
-	struct fw_page *fwp;
-	int n;
-
-	fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK);
-	if (!fwp) {
-		mlx5_core_warn(dev, "page not found\n");
-		return;
-	}
-
-	n = (addr & ~MLX5_U64_4K_PAGE_MASK) >> MLX5_ADAPTER_PAGE_SHIFT;
-	fwp->free_count++;
-	set_bit(n, &fwp->bitmask);
-	if (fwp->free_count == MLX5_NUM_4K_IN_PAGE) {
-		rb_erase(&fwp->rb_node, &dev->priv.page_root);
-		if (fwp->free_count != 1)
-			list_del(&fwp->list);
-		dma_unmap_page(&dev->pdev->dev, addr & MLX5_U64_4K_PAGE_MASK,
-			       PAGE_SIZE, DMA_BIDIRECTIONAL);
-		__free_page(fwp->page);
-		kfree(fwp);
-	} else if (fwp->free_count == 1) {
-		list_add(&fwp->list, &dev->priv.free_list);
-	}
-}
-
-static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
-{
-	struct page *page;
-	u64 zero_addr = 1;
-	u64 addr;
-	int err;
-	int nid = dev_to_node(&dev->pdev->dev);
-
-	page = alloc_pages_node(nid, GFP_HIGHUSER, 0);
-	if (!page) {
-		mlx5_core_warn(dev, "failed to allocate page\n");
-		return -ENOMEM;
-	}
-map:
-	addr = dma_map_page(&dev->pdev->dev, page, 0,
-			    PAGE_SIZE, DMA_BIDIRECTIONAL);
-	if (dma_mapping_error(&dev->pdev->dev, addr)) {
-		mlx5_core_warn(dev, "failed dma mapping page\n");
-		err = -ENOMEM;
-		goto err_mapping;
-	}
-
-	/* Firmware doesn't support page with physical address 0 */
-	if (addr == 0) {
-		zero_addr = addr;
-		goto map;
-	}
-
-	err = insert_page(dev, addr, page, func_id);
-	if (err) {
-		mlx5_core_err(dev, "failed to track allocated page\n");
-		dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE,
-			       DMA_BIDIRECTIONAL);
-	}
-
-err_mapping:
-	if (err)
-		__free_page(page);
-
-	if (zero_addr == 0)
-		dma_unmap_page(&dev->pdev->dev, zero_addr, PAGE_SIZE,
-			       DMA_BIDIRECTIONAL);
-
-	return err;
-}
-
-static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id)
-{
-	u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
-	u32 in[MLX5_ST_SZ_DW(manage_pages_in)]   = {0};
-	int err;
-
-	MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
-	MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_CANT_GIVE);
-	MLX5_SET(manage_pages_in, in, function_id, func_id);
-
-	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-	if (err)
-		mlx5_core_warn(dev, "page notify failed func_id(%d) err(%d)\n",
-			       func_id, err);
-}
-
-static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
-		      int notify_fail)
-{
-	u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
-	int inlen = MLX5_ST_SZ_BYTES(manage_pages_in);
-	u64 addr;
-	int err;
-	u32 *in;
-	int i;
-
-	inlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_in, pas[0]);
-	in = mlx5_vzalloc(inlen);
-	if (!in) {
-		err = -ENOMEM;
-		mlx5_core_warn(dev, "vzalloc failed %d\n", inlen);
-		goto out_free;
-	}
-
-	for (i = 0; i < npages; i++) {
-retry:
-		err = alloc_4k(dev, &addr);
-		if (err) {
-			if (err == -ENOMEM)
-				err = alloc_system_page(dev, func_id);
-			if (err)
-				goto out_4k;
-
-			goto retry;
-		}
-		MLX5_ARRAY_SET64(manage_pages_in, in, pas, i, addr);
-	}
-
-	MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
-	MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_GIVE);
-	MLX5_SET(manage_pages_in, in, function_id, func_id);
-	MLX5_SET(manage_pages_in, in, input_num_entries, npages);
-
-	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
-	if (err) {
-		mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n",
-			       func_id, npages, err);
-		goto out_4k;
-	}
-
-	dev->priv.fw_pages += npages;
-	if (func_id)
-		dev->priv.vfs_pages += npages;
-
-	mlx5_core_dbg(dev, "err %d\n", err);
-
-	kvfree(in);
-	return 0;
-
-out_4k:
-	for (i--; i >= 0; i--)
-		free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]));
-out_free:
-	kvfree(in);
-	if (notify_fail)
-		page_notify_fail(dev, func_id);
-	return err;
-}
-
-static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
-			     u32 *in, int in_size, u32 *out, int out_size)
-{
-	struct fw_page *fwp;
-	struct rb_node *p;
-	u32 func_id;
-	u32 npages;
-	u32 i = 0;
-
-	if (dev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR)
-		return mlx5_cmd_exec(dev, in, in_size, out, out_size);
-
-	/* No hard feelings, we want our pages back! */
-	npages = MLX5_GET(manage_pages_in, in, input_num_entries);
-	func_id = MLX5_GET(manage_pages_in, in, function_id);
-
-	p = rb_first(&dev->priv.page_root);
-	while (p && i < npages) {
-		fwp = rb_entry(p, struct fw_page, rb_node);
-		p = rb_next(p);
-		if (fwp->func_id != func_id)
-			continue;
-
-		MLX5_ARRAY_SET64(manage_pages_out, out, pas, i, fwp->addr);
-		i++;
-	}
-
-	MLX5_SET(manage_pages_out, out, output_num_entries, i);
-	return 0;
-}
-
-static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
-			 int *nclaimed)
-{
-	int outlen = MLX5_ST_SZ_BYTES(manage_pages_out);
-	u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {0};
-	int num_claimed;
-	u32 *out;
-	int err;
-	int i;
-
-	if (nclaimed)
-		*nclaimed = 0;
-
-	outlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]);
-	out = mlx5_vzalloc(outlen);
-	if (!out)
-		return -ENOMEM;
-
-	MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
-	MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_TAKE);
-	MLX5_SET(manage_pages_in, in, function_id, func_id);
-	MLX5_SET(manage_pages_in, in, input_num_entries, npages);
-
-	mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
-	err = reclaim_pages_cmd(dev, in, sizeof(in), out, outlen);
-	if (err) {
-		mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err);
-		goto out_free;
-	}
-
-	num_claimed = MLX5_GET(manage_pages_out, out, output_num_entries);
-	if (num_claimed > npages) {
-		mlx5_core_warn(dev, "fw returned %d, driver asked %d => corruption\n",
-			       num_claimed, npages);
-		err = -EINVAL;
-		goto out_free;
-	}
-
-	for (i = 0; i < num_claimed; i++)
-		free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i]));
-
-
-	if (nclaimed)
-		*nclaimed = num_claimed;
-
-	dev->priv.fw_pages -= num_claimed;
-	if (func_id)
-		dev->priv.vfs_pages -= num_claimed;
-
-out_free:
-	kvfree(out);
-	return err;
-}
-
-static void pages_work_handler(struct work_struct *work)
-{
-	struct mlx5_pages_req *req = container_of(work, struct mlx5_pages_req, work);
-	struct mlx5_core_dev *dev = req->dev;
-	int err = 0;
-
-	if (req->npages < 0)
-		err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL);
-	else if (req->npages > 0)
-		err = give_pages(dev, req->func_id, req->npages, 1);
-
-	if (err)
-		mlx5_core_warn(dev, "%s fail %d\n",
-			       req->npages < 0 ? "reclaim" : "give", err);
-
-	kfree(req);
-}
-
-void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
-				 s32 npages)
-{
-	struct mlx5_pages_req *req;
-
-	req = kzalloc(sizeof(*req), GFP_ATOMIC);
-	if (!req) {
-		mlx5_core_warn(dev, "failed to allocate pages request\n");
-		return;
-	}
-
-	req->dev = dev;
-	req->func_id = func_id;
-	req->npages = npages;
-	INIT_WORK(&req->work, pages_work_handler);
-	queue_work(dev->priv.pg_wq, &req->work);
-}
-
-int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot)
-{
-	u16 uninitialized_var(func_id);
-	s32 uninitialized_var(npages);
-	int err;
-
-	err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot);
-	if (err)
-		return err;
-
-	mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n",
-		      npages, boot ? "boot" : "init", func_id);
-
-	return give_pages(dev, func_id, npages, 0);
-}
-
-enum {
-	MLX5_BLKS_FOR_RECLAIM_PAGES = 12
-};
-
-static int optimal_reclaimed_pages(void)
-{
-	struct mlx5_cmd_prot_block *block;
-	struct mlx5_cmd_layout *lay;
-	int ret;
-
-	ret = (sizeof(lay->out) + MLX5_BLKS_FOR_RECLAIM_PAGES * sizeof(block->data) -
-	       MLX5_ST_SZ_BYTES(manage_pages_out)) /
-	       MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]);
-
-	return ret;
-}
-
-int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
-{
-	unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
-	struct fw_page *fwp;
-	struct rb_node *p;
-	int nclaimed = 0;
-	int err = 0;
-
-	do {
-		p = rb_first(&dev->priv.page_root);
-		if (p) {
-			fwp = rb_entry(p, struct fw_page, rb_node);
-			err = reclaim_pages(dev, fwp->func_id,
-					    optimal_reclaimed_pages(),
-					    &nclaimed);
-
-			if (err) {
-				mlx5_core_warn(dev, "failed reclaiming pages (%d)\n",
-					       err);
-				return err;
-			}
-			if (nclaimed)
-				end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
-		}
-		if (time_after(jiffies, end)) {
-			mlx5_core_warn(dev, "FW did not return all pages. giving up...\n");
-			break;
-		}
-	} while (p);
-
-	WARN(dev->priv.fw_pages,
-	     "FW pages counter is %d after reclaiming all pages\n",
-	     dev->priv.fw_pages);
-	WARN(dev->priv.vfs_pages,
-	     "VFs FW pages counter is %d after reclaiming all pages\n",
-	     dev->priv.vfs_pages);
-
-	return 0;
-}
-
-void mlx5_pagealloc_init(struct mlx5_core_dev *dev)
-{
-	dev->priv.page_root = RB_ROOT;
-	INIT_LIST_HEAD(&dev->priv.free_list);
-}
-
-void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev)
-{
-	/* nothing */
-}
-
-int mlx5_pagealloc_start(struct mlx5_core_dev *dev)
-{
-	dev->priv.pg_wq = create_singlethread_workqueue("mlx5_page_allocator");
-	if (!dev->priv.pg_wq)
-		return -ENOMEM;
-
-	return 0;
-}
-
-void mlx5_pagealloc_stop(struct mlx5_core_dev *dev)
-{
-	destroy_workqueue(dev->priv.pg_wq);
-}
-
-int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev)
-{
-	unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_VFS_PAGES_TIME_MSECS);
-	int prev_vfs_pages = dev->priv.vfs_pages;
-
-	/* In case of internal error we will free the pages manually later */
-	if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
-		mlx5_core_warn(dev, "Skipping wait for vf pages stage");
-		return 0;
-	}
-
-	mlx5_core_dbg(dev, "Waiting for %d pages from %s\n", prev_vfs_pages,
-		      dev->priv.name);
-	while (dev->priv.vfs_pages) {
-		if (time_after(jiffies, end)) {
-			mlx5_core_warn(dev, "aborting while there are %d pending pages\n", dev->priv.vfs_pages);
-			return -ETIMEDOUT;
-		}
-		if (dev->priv.vfs_pages < prev_vfs_pages) {
-			end = jiffies + msecs_to_jiffies(MAX_RECLAIM_VFS_PAGES_TIME_MSECS);
-			prev_vfs_pages = dev->priv.vfs_pages;
-		}
-		msleep(50);
-	}
-
-	mlx5_core_dbg(dev, "All pages received from %s\n", dev->priv.name);
-	return 0;
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pd.c b/drivers/net/ethernet/mellanox/mlx5/core/pd.c
deleted file mode 100644
index bd830d8..0000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/pd.c
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/mlx5/driver.h>
-#include <linux/mlx5/cmd.h>
-#include "mlx5_core.h"
-
-int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn)
-{
-	u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {0};
-	u32 in[MLX5_ST_SZ_DW(alloc_pd_in)]   = {0};
-	int err;
-
-	MLX5_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD);
-	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-	if (!err)
-		*pdn = MLX5_GET(alloc_pd_out, out, pd);
-	return err;
-}
-EXPORT_SYMBOL(mlx5_core_alloc_pd);
-
-int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn)
-{
-	u32 out[MLX5_ST_SZ_DW(dealloc_pd_out)] = {0};
-	u32 in[MLX5_ST_SZ_DW(dealloc_pd_in)]   = {0};
-
-	MLX5_SET(dealloc_pd_in, in, opcode, MLX5_CMD_OP_DEALLOC_PD);
-	MLX5_SET(dealloc_pd_in, in, pd, pdn);
-	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-}
-EXPORT_SYMBOL(mlx5_core_dealloc_pd);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
deleted file mode 100644
index d2ec9d2..0000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ /dev/null
@@ -1,868 +0,0 @@
-/*
- * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/module.h>
-#include <linux/mlx5/driver.h>
-#include <linux/mlx5/port.h>
-#include <linux/mlx5/cmd.h>
-#include "mlx5_core.h"
-
-int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
-			 int size_in, void *data_out, int size_out,
-			 u16 reg_id, int arg, int write)
-{
-	int outlen = MLX5_ST_SZ_BYTES(access_register_out) + size_out;
-	int inlen = MLX5_ST_SZ_BYTES(access_register_in) + size_in;
-	int err = -ENOMEM;
-	u32 *out = NULL;
-	u32 *in = NULL;
-	void *data;
-
-	in = mlx5_vzalloc(inlen);
-	out = mlx5_vzalloc(outlen);
-	if (!in || !out)
-		goto out;
-
-	data = MLX5_ADDR_OF(access_register_in, in, register_data);
-	memcpy(data, data_in, size_in);
-
-	MLX5_SET(access_register_in, in, opcode, MLX5_CMD_OP_ACCESS_REG);
-	MLX5_SET(access_register_in, in, op_mod, !write);
-	MLX5_SET(access_register_in, in, argument, arg);
-	MLX5_SET(access_register_in, in, register_id, reg_id);
-
-	err = mlx5_cmd_exec(dev, in, inlen, out, outlen);
-	if (err)
-		goto out;
-
-	data = MLX5_ADDR_OF(access_register_out, out, register_data);
-	memcpy(data_out, data, size_out);
-
-out:
-	kvfree(out);
-	kvfree(in);
-	return err;
-}
-EXPORT_SYMBOL_GPL(mlx5_core_access_reg);
-
-struct mlx5_reg_pcap {
-	u8			rsvd0;
-	u8			port_num;
-	u8			rsvd1[2];
-	__be32			caps_127_96;
-	__be32			caps_95_64;
-	__be32			caps_63_32;
-	__be32			caps_31_0;
-};
-
-int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps)
-{
-	struct mlx5_reg_pcap in;
-	struct mlx5_reg_pcap out;
-
-	memset(&in, 0, sizeof(in));
-	in.caps_127_96 = cpu_to_be32(caps);
-	in.port_num = port_num;
-
-	return mlx5_core_access_reg(dev, &in, sizeof(in), &out,
-				    sizeof(out), MLX5_REG_PCAP, 0, 1);
-}
-EXPORT_SYMBOL_GPL(mlx5_set_port_caps);
-
-int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
-			 int ptys_size, int proto_mask, u8 local_port)
-{
-	u32 in[MLX5_ST_SZ_DW(ptys_reg)] = {0};
-
-	MLX5_SET(ptys_reg, in, local_port, local_port);
-	MLX5_SET(ptys_reg, in, proto_mask, proto_mask);
-	return mlx5_core_access_reg(dev, in, sizeof(in), ptys,
-				    ptys_size, MLX5_REG_PTYS, 0, 0);
-}
-EXPORT_SYMBOL_GPL(mlx5_query_port_ptys);
-
-int mlx5_set_port_beacon(struct mlx5_core_dev *dev, u16 beacon_duration)
-{
-	u32 in[MLX5_ST_SZ_DW(mlcr_reg)]  = {0};
-	u32 out[MLX5_ST_SZ_DW(mlcr_reg)];
-
-	MLX5_SET(mlcr_reg, in, local_port, 1);
-	MLX5_SET(mlcr_reg, in, beacon_duration, beacon_duration);
-	return mlx5_core_access_reg(dev, in, sizeof(in), out,
-				    sizeof(out), MLX5_REG_MLCR, 0, 1);
-}
-
-int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev,
-			      u32 *proto_cap, int proto_mask)
-{
-	u32 out[MLX5_ST_SZ_DW(ptys_reg)];
-	int err;
-
-	err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask, 1);
-	if (err)
-		return err;
-
-	if (proto_mask == MLX5_PTYS_EN)
-		*proto_cap = MLX5_GET(ptys_reg, out, eth_proto_capability);
-	else
-		*proto_cap = MLX5_GET(ptys_reg, out, ib_proto_capability);
-
-	return 0;
-}
-EXPORT_SYMBOL_GPL(mlx5_query_port_proto_cap);
-
-int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev,
-				u32 *proto_admin, int proto_mask)
-{
-	u32 out[MLX5_ST_SZ_DW(ptys_reg)];
-	int err;
-
-	err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask, 1);
-	if (err)
-		return err;
-
-	if (proto_mask == MLX5_PTYS_EN)
-		*proto_admin = MLX5_GET(ptys_reg, out, eth_proto_admin);
-	else
-		*proto_admin = MLX5_GET(ptys_reg, out, ib_proto_admin);
-
-	return 0;
-}
-EXPORT_SYMBOL_GPL(mlx5_query_port_proto_admin);
-
-int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev,
-				    u8 *link_width_oper, u8 local_port)
-{
-	u32 out[MLX5_ST_SZ_DW(ptys_reg)];
-	int err;
-
-	err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_IB, local_port);
-	if (err)
-		return err;
-
-	*link_width_oper = MLX5_GET(ptys_reg, out, ib_link_width_oper);
-
-	return 0;
-}
-EXPORT_SYMBOL_GPL(mlx5_query_port_link_width_oper);
-
-int mlx5_query_port_eth_proto_oper(struct mlx5_core_dev *dev,
-				   u32 *proto_oper, u8 local_port)
-{
-	u32 out[MLX5_ST_SZ_DW(ptys_reg)];
-	int err;
-
-	err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_EN,
-				   local_port);
-	if (err)
-		return err;
-
-	*proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper);
-
-	return 0;
-}
-EXPORT_SYMBOL(mlx5_query_port_eth_proto_oper);
-
-int mlx5_query_port_ib_proto_oper(struct mlx5_core_dev *dev,
-				  u8 *proto_oper, u8 local_port)
-{
-	u32 out[MLX5_ST_SZ_DW(ptys_reg)];
-	int err;
-
-	err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_IB,
-				   local_port);
-	if (err)
-		return err;
-
-	*proto_oper = MLX5_GET(ptys_reg, out, ib_proto_oper);
-
-	return 0;
-}
-EXPORT_SYMBOL(mlx5_query_port_ib_proto_oper);
-
-int mlx5_set_port_ptys(struct mlx5_core_dev *dev, bool an_disable,
-		       u32 proto_admin, int proto_mask)
-{
-	u32 out[MLX5_ST_SZ_DW(ptys_reg)];
-	u32 in[MLX5_ST_SZ_DW(ptys_reg)];
-	u8 an_disable_admin;
-	u8 an_disable_cap;
-	u8 an_status;
-
-	mlx5_query_port_autoneg(dev, proto_mask, &an_status,
-				&an_disable_cap, &an_disable_admin);
-	if (!an_disable_cap && an_disable)
-		return -EPERM;
-
-	memset(in, 0, sizeof(in));
-
-	MLX5_SET(ptys_reg, in, local_port, 1);
-	MLX5_SET(ptys_reg, in, an_disable_admin, an_disable);
-	MLX5_SET(ptys_reg, in, proto_mask, proto_mask);
-	if (proto_mask == MLX5_PTYS_EN)
-		MLX5_SET(ptys_reg, in, eth_proto_admin, proto_admin);
-	else
-		MLX5_SET(ptys_reg, in, ib_proto_admin, proto_admin);
-
-	return mlx5_core_access_reg(dev, in, sizeof(in), out,
-				    sizeof(out), MLX5_REG_PTYS, 0, 1);
-}
-EXPORT_SYMBOL_GPL(mlx5_set_port_ptys);
-
-/* This function should be used after setting a port register only */
-void mlx5_toggle_port_link(struct mlx5_core_dev *dev)
-{
-	enum mlx5_port_status ps;
-
-	mlx5_query_port_admin_status(dev, &ps);
-	mlx5_set_port_admin_status(dev, MLX5_PORT_DOWN);
-	if (ps == MLX5_PORT_UP)
-		mlx5_set_port_admin_status(dev, MLX5_PORT_UP);
-}
-EXPORT_SYMBOL_GPL(mlx5_toggle_port_link);
-
-int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
-			       enum mlx5_port_status status)
-{
-	u32 in[MLX5_ST_SZ_DW(paos_reg)] = {0};
-	u32 out[MLX5_ST_SZ_DW(paos_reg)];
-
-	MLX5_SET(paos_reg, in, local_port, 1);
-	MLX5_SET(paos_reg, in, admin_status, status);
-	MLX5_SET(paos_reg, in, ase, 1);
-	return mlx5_core_access_reg(dev, in, sizeof(in), out,
-				    sizeof(out), MLX5_REG_PAOS, 0, 1);
-}
-EXPORT_SYMBOL_GPL(mlx5_set_port_admin_status);
-
-int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
-				 enum mlx5_port_status *status)
-{
-	u32 in[MLX5_ST_SZ_DW(paos_reg)] = {0};
-	u32 out[MLX5_ST_SZ_DW(paos_reg)];
-	int err;
-
-	MLX5_SET(paos_reg, in, local_port, 1);
-	err = mlx5_core_access_reg(dev, in, sizeof(in), out,
-				   sizeof(out), MLX5_REG_PAOS, 0, 0);
-	if (err)
-		return err;
-	*status = MLX5_GET(paos_reg, out, admin_status);
-	return 0;
-}
-EXPORT_SYMBOL_GPL(mlx5_query_port_admin_status);
-
-static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, u16 *admin_mtu,
-				u16 *max_mtu, u16 *oper_mtu, u8 port)
-{
-	u32 in[MLX5_ST_SZ_DW(pmtu_reg)] = {0};
-	u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
-
-	MLX5_SET(pmtu_reg, in, local_port, port);
-	mlx5_core_access_reg(dev, in, sizeof(in), out,
-			     sizeof(out), MLX5_REG_PMTU, 0, 0);
-
-	if (max_mtu)
-		*max_mtu  = MLX5_GET(pmtu_reg, out, max_mtu);
-	if (oper_mtu)
-		*oper_mtu = MLX5_GET(pmtu_reg, out, oper_mtu);
-	if (admin_mtu)
-		*admin_mtu = MLX5_GET(pmtu_reg, out, admin_mtu);
-}
-
-int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port)
-{
-	u32 in[MLX5_ST_SZ_DW(pmtu_reg)] = {0};
-	u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
-
-	MLX5_SET(pmtu_reg, in, admin_mtu, mtu);
-	MLX5_SET(pmtu_reg, in, local_port, port);
-	return mlx5_core_access_reg(dev, in, sizeof(in), out,
-				   sizeof(out), MLX5_REG_PMTU, 0, 1);
-}
-EXPORT_SYMBOL_GPL(mlx5_set_port_mtu);
-
-void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu,
-			     u8 port)
-{
-	mlx5_query_port_mtu(dev, NULL, max_mtu, NULL, port);
-}
-EXPORT_SYMBOL_GPL(mlx5_query_port_max_mtu);
-
-void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu,
-			      u8 port)
-{
-	mlx5_query_port_mtu(dev, NULL, NULL, oper_mtu, port);
-}
-EXPORT_SYMBOL_GPL(mlx5_query_port_oper_mtu);
-
-static int mlx5_query_module_num(struct mlx5_core_dev *dev, int *module_num)
-{
-	u32 in[MLX5_ST_SZ_DW(pmlp_reg)] = {0};
-	u32 out[MLX5_ST_SZ_DW(pmlp_reg)];
-	int module_mapping;
-	int err;
-
-	MLX5_SET(pmlp_reg, in, local_port, 1);
-	err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out),
-				   MLX5_REG_PMLP, 0, 0);
-	if (err)
-		return err;
-
-	module_mapping = MLX5_GET(pmlp_reg, out, lane0_module_mapping);
-	*module_num = module_mapping & MLX5_EEPROM_IDENTIFIER_BYTE_MASK;
-
-	return 0;
-}
-
-int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
-			     u16 offset, u16 size, u8 *data)
-{
-	u32 out[MLX5_ST_SZ_DW(mcia_reg)];
-	u32 in[MLX5_ST_SZ_DW(mcia_reg)];
-	int module_num;
-	u16 i2c_addr;
-	int status;
-	int err;
-	void *ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0);
-
-	err = mlx5_query_module_num(dev, &module_num);
-	if (err)
-		return err;
-
-	memset(in, 0, sizeof(in));
-	size = min_t(int, size, MLX5_EEPROM_MAX_BYTES);
-
-	if (offset < MLX5_EEPROM_PAGE_LENGTH &&
-	    offset + size > MLX5_EEPROM_PAGE_LENGTH)
-		/* Cross pages read, read until offset 256 in low page */
-		size -= offset + size - MLX5_EEPROM_PAGE_LENGTH;
-
-	i2c_addr = MLX5_I2C_ADDR_LOW;
-	if (offset >= MLX5_EEPROM_PAGE_LENGTH) {
-		i2c_addr = MLX5_I2C_ADDR_HIGH;
-		offset -= MLX5_EEPROM_PAGE_LENGTH;
-	}
-
-	MLX5_SET(mcia_reg, in, l, 0);
-	MLX5_SET(mcia_reg, in, module, module_num);
-	MLX5_SET(mcia_reg, in, i2c_device_address, i2c_addr);
-	MLX5_SET(mcia_reg, in, page_number, 0);
-	MLX5_SET(mcia_reg, in, device_address, offset);
-	MLX5_SET(mcia_reg, in, size, size);
-
-	err = mlx5_core_access_reg(dev, in, sizeof(in), out,
-				   sizeof(out), MLX5_REG_MCIA, 0, 0);
-	if (err)
-		return err;
-
-	status = MLX5_GET(mcia_reg, out, status);
-	if (status) {
-		mlx5_core_err(dev, "query_mcia_reg failed: status: 0x%x\n",
-			      status);
-		return -EIO;
-	}
-
-	memcpy(data, ptr, size);
-
-	return size;
-}
-EXPORT_SYMBOL_GPL(mlx5_query_module_eeprom);
-
-static int mlx5_query_port_pvlc(struct mlx5_core_dev *dev, u32 *pvlc,
-				int pvlc_size,  u8 local_port)
-{
-	u32 in[MLX5_ST_SZ_DW(pvlc_reg)] = {0};
-
-	MLX5_SET(pvlc_reg, in, local_port, local_port);
-	return mlx5_core_access_reg(dev, in, sizeof(in), pvlc,
-				    pvlc_size, MLX5_REG_PVLC, 0, 0);
-}
-
-int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
-			      u8 *vl_hw_cap, u8 local_port)
-{
-	u32 out[MLX5_ST_SZ_DW(pvlc_reg)];
-	int err;
-
-	err = mlx5_query_port_pvlc(dev, out, sizeof(out), local_port);
-	if (err)
-		return err;
-
-	*vl_hw_cap = MLX5_GET(pvlc_reg, out, vl_hw_cap);
-
-	return 0;
-}
-EXPORT_SYMBOL_GPL(mlx5_query_port_vl_hw_cap);
-
-int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev,
-			     u8 port_num, void *out, size_t sz)
-{
-	u32 *in;
-	int err;
-
-	in  = mlx5_vzalloc(sz);
-	if (!in) {
-		err = -ENOMEM;
-		return err;
-	}
-
-	MLX5_SET(ppcnt_reg, in, local_port, port_num);
-
-	MLX5_SET(ppcnt_reg, in, grp, MLX5_INFINIBAND_PORT_COUNTERS_GROUP);
-	err = mlx5_core_access_reg(dev, in, sz, out,
-				   sz, MLX5_REG_PPCNT, 0, 0);
-
-	kvfree(in);
-	return err;
-}
-EXPORT_SYMBOL_GPL(mlx5_core_query_ib_ppcnt);
-
-int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause)
-{
-	u32 in[MLX5_ST_SZ_DW(pfcc_reg)] = {0};
-	u32 out[MLX5_ST_SZ_DW(pfcc_reg)];
-
-	MLX5_SET(pfcc_reg, in, local_port, 1);
-	MLX5_SET(pfcc_reg, in, pptx, tx_pause);
-	MLX5_SET(pfcc_reg, in, pprx, rx_pause);
-
-	return mlx5_core_access_reg(dev, in, sizeof(in), out,
-				    sizeof(out), MLX5_REG_PFCC, 0, 1);
-}
-EXPORT_SYMBOL_GPL(mlx5_set_port_pause);
-
-int mlx5_query_port_pause(struct mlx5_core_dev *dev,
-			  u32 *rx_pause, u32 *tx_pause)
-{
-	u32 in[MLX5_ST_SZ_DW(pfcc_reg)] = {0};
-	u32 out[MLX5_ST_SZ_DW(pfcc_reg)];
-	int err;
-
-	MLX5_SET(pfcc_reg, in, local_port, 1);
-	err = mlx5_core_access_reg(dev, in, sizeof(in), out,
-				   sizeof(out), MLX5_REG_PFCC, 0, 0);
-	if (err)
-		return err;
-
-	if (rx_pause)
-		*rx_pause = MLX5_GET(pfcc_reg, out, pprx);
-
-	if (tx_pause)
-		*tx_pause = MLX5_GET(pfcc_reg, out, pptx);
-
-	return 0;
-}
-EXPORT_SYMBOL_GPL(mlx5_query_port_pause);
-
-int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx)
-{
-	u32 in[MLX5_ST_SZ_DW(pfcc_reg)] = {0};
-	u32 out[MLX5_ST_SZ_DW(pfcc_reg)];
-
-	MLX5_SET(pfcc_reg, in, local_port, 1);
-	MLX5_SET(pfcc_reg, in, pfctx, pfc_en_tx);
-	MLX5_SET(pfcc_reg, in, pfcrx, pfc_en_rx);
-	MLX5_SET_TO_ONES(pfcc_reg, in, prio_mask_tx);
-	MLX5_SET_TO_ONES(pfcc_reg, in, prio_mask_rx);
-
-	return mlx5_core_access_reg(dev, in, sizeof(in), out,
-				    sizeof(out), MLX5_REG_PFCC, 0, 1);
-}
-EXPORT_SYMBOL_GPL(mlx5_set_port_pfc);
-
-int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, u8 *pfc_en_rx)
-{
-	u32 in[MLX5_ST_SZ_DW(pfcc_reg)] = {0};
-	u32 out[MLX5_ST_SZ_DW(pfcc_reg)];
-	int err;
-
-	MLX5_SET(pfcc_reg, in, local_port, 1);
-	err = mlx5_core_access_reg(dev, in, sizeof(in), out,
-				   sizeof(out), MLX5_REG_PFCC, 0, 0);
-	if (err)
-		return err;
-
-	if (pfc_en_tx)
-		*pfc_en_tx = MLX5_GET(pfcc_reg, out, pfctx);
-
-	if (pfc_en_rx)
-		*pfc_en_rx = MLX5_GET(pfcc_reg, out, pfcrx);
-
-	return 0;
-}
-EXPORT_SYMBOL_GPL(mlx5_query_port_pfc);
-
-void mlx5_query_port_autoneg(struct mlx5_core_dev *dev, int proto_mask,
-			     u8 *an_status,
-			     u8 *an_disable_cap, u8 *an_disable_admin)
-{
-	u32 out[MLX5_ST_SZ_DW(ptys_reg)];
-
-	*an_status = 0;
-	*an_disable_cap = 0;
-	*an_disable_admin = 0;
-
-	if (mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask, 1))
-		return;
-
-	*an_status = MLX5_GET(ptys_reg, out, an_status);
-	*an_disable_cap = MLX5_GET(ptys_reg, out, an_disable_cap);
-	*an_disable_admin = MLX5_GET(ptys_reg, out, an_disable_admin);
-}
-EXPORT_SYMBOL_GPL(mlx5_query_port_autoneg);
-
-int mlx5_max_tc(struct mlx5_core_dev *mdev)
-{
-	u8 num_tc = MLX5_CAP_GEN(mdev, max_tc) ? : 8;
-
-	return num_tc - 1;
-}
-
-int mlx5_query_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *out)
-{
-	u32 in[MLX5_ST_SZ_DW(dcbx_param)] = {0};
-
-	MLX5_SET(dcbx_param, in, port_number, 1);
-
-	return  mlx5_core_access_reg(mdev, in, sizeof(in), out,
-				    sizeof(in), MLX5_REG_DCBX_PARAM, 0, 0);
-}
-
-int mlx5_set_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *in)
-{
-	u32 out[MLX5_ST_SZ_DW(dcbx_param)];
-
-	MLX5_SET(dcbx_param, in, port_number, 1);
-
-	return mlx5_core_access_reg(mdev, in, sizeof(out), out,
-				    sizeof(out), MLX5_REG_DCBX_PARAM, 0, 1);
-}
-
-int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc)
-{
-	u32 in[MLX5_ST_SZ_DW(qtct_reg)] = {0};
-	u32 out[MLX5_ST_SZ_DW(qtct_reg)];
-	int err;
-	int i;
-
-	for (i = 0; i < 8; i++) {
-		if (prio_tc[i] > mlx5_max_tc(mdev))
-			return -EINVAL;
-
-		MLX5_SET(qtct_reg, in, prio, i);
-		MLX5_SET(qtct_reg, in, tclass, prio_tc[i]);
-
-		err = mlx5_core_access_reg(mdev, in, sizeof(in), out,
-					   sizeof(out), MLX5_REG_QTCT, 0, 1);
-		if (err)
-			return err;
-	}
-
-	return 0;
-}
-EXPORT_SYMBOL_GPL(mlx5_set_port_prio_tc);
-
-int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev,
-			    u8 prio, u8 *tc)
-{
-	u32 in[MLX5_ST_SZ_DW(qtct_reg)];
-	u32 out[MLX5_ST_SZ_DW(qtct_reg)];
-	int err;
-
-	memset(in, 0, sizeof(in));
-	memset(out, 0, sizeof(out));
-
-	MLX5_SET(qtct_reg, in, port_number, 1);
-	MLX5_SET(qtct_reg, in, prio, prio);
-
-	err = mlx5_core_access_reg(mdev, in, sizeof(in), out,
-				   sizeof(out), MLX5_REG_QTCT, 0, 0);
-	if (!err)
-		*tc = MLX5_GET(qtct_reg, out, tclass);
-
-	return err;
-}
-EXPORT_SYMBOL_GPL(mlx5_query_port_prio_tc);
-
-static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in,
-				   int inlen)
-{
-	u32 out[MLX5_ST_SZ_DW(qtct_reg)];
-
-	if (!MLX5_CAP_GEN(mdev, ets))
-		return -ENOTSUPP;
-
-	return mlx5_core_access_reg(mdev, in, inlen, out, sizeof(out),
-				    MLX5_REG_QETCR, 0, 1);
-}
-
-static int mlx5_query_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *out,
-				     int outlen)
-{
-	u32 in[MLX5_ST_SZ_DW(qtct_reg)];
-
-	if (!MLX5_CAP_GEN(mdev, ets))
-		return -ENOTSUPP;
-
-	memset(in, 0, sizeof(in));
-	return mlx5_core_access_reg(mdev, in, sizeof(in), out, outlen,
-				    MLX5_REG_QETCR, 0, 0);
-}
-
-int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group)
-{
-	u32 in[MLX5_ST_SZ_DW(qetc_reg)] = {0};
-	int i;
-
-	for (i = 0; i <= mlx5_max_tc(mdev); i++) {
-		MLX5_SET(qetc_reg, in, tc_configuration[i].g, 1);
-		MLX5_SET(qetc_reg, in, tc_configuration[i].group, tc_group[i]);
-	}
-
-	return mlx5_set_port_qetcr_reg(mdev, in, sizeof(in));
-}
-EXPORT_SYMBOL_GPL(mlx5_set_port_tc_group);
-
-int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw)
-{
-	u32 in[MLX5_ST_SZ_DW(qetc_reg)] = {0};
-	int i;
-
-	for (i = 0; i <= mlx5_max_tc(mdev); i++) {
-		MLX5_SET(qetc_reg, in, tc_configuration[i].b, 1);
-		MLX5_SET(qetc_reg, in, tc_configuration[i].bw_allocation, tc_bw[i]);
-	}
-
-	return mlx5_set_port_qetcr_reg(mdev, in, sizeof(in));
-}
-EXPORT_SYMBOL_GPL(mlx5_set_port_tc_bw_alloc);
-
-int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev,
-				u8 tc, u8 *bw_pct)
-{
-	u32 out[MLX5_ST_SZ_DW(qetc_reg)];
-	void *ets_tcn_conf;
-	int err;
-
-	err = mlx5_query_port_qetcr_reg(mdev, out, sizeof(out));
-	if (err)
-		return err;
-
-	ets_tcn_conf = MLX5_ADDR_OF(qetc_reg, out,
-				    tc_configuration[tc]);
-
-	*bw_pct = MLX5_GET(ets_tcn_config_reg, ets_tcn_conf,
-			   bw_allocation);
-
-	return 0;
-}
-EXPORT_SYMBOL_GPL(mlx5_query_port_tc_bw_alloc);
-
-int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev,
-				    u8 *max_bw_value,
-				    u8 *max_bw_units)
-{
-	u32 in[MLX5_ST_SZ_DW(qetc_reg)] = {0};
-	void *ets_tcn_conf;
-	int i;
-
-	MLX5_SET(qetc_reg, in, port_number, 1);
-
-	for (i = 0; i <= mlx5_max_tc(mdev); i++) {
-		ets_tcn_conf = MLX5_ADDR_OF(qetc_reg, in, tc_configuration[i]);
-
-		MLX5_SET(ets_tcn_config_reg, ets_tcn_conf, r, 1);
-		MLX5_SET(ets_tcn_config_reg, ets_tcn_conf, max_bw_units,
-			 max_bw_units[i]);
-		MLX5_SET(ets_tcn_config_reg, ets_tcn_conf, max_bw_value,
-			 max_bw_value[i]);
-	}
-
-	return mlx5_set_port_qetcr_reg(mdev, in, sizeof(in));
-}
-EXPORT_SYMBOL_GPL(mlx5_modify_port_ets_rate_limit);
-
-int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev,
-				   u8 *max_bw_value,
-				   u8 *max_bw_units)
-{
-	u32 out[MLX5_ST_SZ_DW(qetc_reg)];
-	void *ets_tcn_conf;
-	int err;
-	int i;
-
-	err = mlx5_query_port_qetcr_reg(mdev, out, sizeof(out));
-	if (err)
-		return err;
-
-	for (i = 0; i <= mlx5_max_tc(mdev); i++) {
-		ets_tcn_conf = MLX5_ADDR_OF(qetc_reg, out, tc_configuration[i]);
-
-		max_bw_value[i] = MLX5_GET(ets_tcn_config_reg, ets_tcn_conf,
-					   max_bw_value);
-		max_bw_units[i] = MLX5_GET(ets_tcn_config_reg, ets_tcn_conf,
-					   max_bw_units);
-	}
-
-	return 0;
-}
-EXPORT_SYMBOL_GPL(mlx5_query_port_ets_rate_limit);
-
-int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode)
-{
-	u32 in[MLX5_ST_SZ_DW(set_wol_rol_in)]   = {0};
-	u32 out[MLX5_ST_SZ_DW(set_wol_rol_out)] = {0};
-
-	MLX5_SET(set_wol_rol_in, in, opcode, MLX5_CMD_OP_SET_WOL_ROL);
-	MLX5_SET(set_wol_rol_in, in, wol_mode_valid, 1);
-	MLX5_SET(set_wol_rol_in, in, wol_mode, wol_mode);
-	return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
-}
-EXPORT_SYMBOL_GPL(mlx5_set_port_wol);
-
-int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode)
-{
-	u32 in[MLX5_ST_SZ_DW(query_wol_rol_in)]   = {0};
-	u32 out[MLX5_ST_SZ_DW(query_wol_rol_out)] = {0};
-	int err;
-
-	MLX5_SET(query_wol_rol_in, in, opcode, MLX5_CMD_OP_QUERY_WOL_ROL);
-	err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
-	if (!err)
-		*wol_mode = MLX5_GET(query_wol_rol_out, out, wol_mode);
-
-	return err;
-}
-EXPORT_SYMBOL_GPL(mlx5_query_port_wol);
-
-static int mlx5_query_ports_check(struct mlx5_core_dev *mdev, u32 *out,
-				  int outlen)
-{
-	u32 in[MLX5_ST_SZ_DW(pcmr_reg)] = {0};
-
-	MLX5_SET(pcmr_reg, in, local_port, 1);
-	return mlx5_core_access_reg(mdev, in, sizeof(in), out,
-				    outlen, MLX5_REG_PCMR, 0, 0);
-}
-
-static int mlx5_set_ports_check(struct mlx5_core_dev *mdev, u32 *in, int inlen)
-{
-	u32 out[MLX5_ST_SZ_DW(pcmr_reg)];
-
-	return mlx5_core_access_reg(mdev, in, inlen, out,
-				    sizeof(out), MLX5_REG_PCMR, 0, 1);
-}
-
-int mlx5_set_port_fcs(struct mlx5_core_dev *mdev, u8 enable)
-{
-	u32 in[MLX5_ST_SZ_DW(pcmr_reg)] = {0};
-
-	MLX5_SET(pcmr_reg, in, local_port, 1);
-	MLX5_SET(pcmr_reg, in, fcs_chk, enable);
-	return mlx5_set_ports_check(mdev, in, sizeof(in));
-}
-
-void mlx5_query_port_fcs(struct mlx5_core_dev *mdev, bool *supported,
-			 bool *enabled)
-{
-	u32 out[MLX5_ST_SZ_DW(pcmr_reg)];
-	/* Default values for FW which do not support MLX5_REG_PCMR */
-	*supported = false;
-	*enabled = true;
-
-	if (!MLX5_CAP_GEN(mdev, ports_check))
-		return;
-
-	if (mlx5_query_ports_check(mdev, out, sizeof(out)))
-		return;
-
-	*supported = !!(MLX5_GET(pcmr_reg, out, fcs_cap));
-	*enabled = !!(MLX5_GET(pcmr_reg, out, fcs_chk));
-}
-
-static const char *mlx5_pme_status[MLX5_MODULE_STATUS_NUM] = {
-	"Cable plugged",   /* MLX5_MODULE_STATUS_PLUGGED    = 0x1 */
-	"Cable unplugged", /* MLX5_MODULE_STATUS_UNPLUGGED  = 0x2 */
-	"Cable error",     /* MLX5_MODULE_STATUS_ERROR      = 0x3 */
-};
-
-static const char *mlx5_pme_error[MLX5_MODULE_EVENT_ERROR_NUM] = {
-	"Power budget exceeded",
-	"Long Range for non MLNX cable",
-	"Bus stuck(I2C or data shorted)",
-	"No EEPROM/retry timeout",
-	"Enforce part number list",
-	"Unknown identifier",
-	"High Temperature",
-	"Bad or shorted cable/module",
-	"Unknown status",
-};
-
-void mlx5_port_module_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe)
-{
-	enum port_module_event_status_type module_status;
-	enum port_module_event_error_type error_type;
-	struct mlx5_eqe_port_module *module_event_eqe;
-	struct mlx5_priv *priv = &dev->priv;
-	u8 module_num;
-
-	module_event_eqe = &eqe->data.port_module;
-	module_num = module_event_eqe->module;
-	module_status = module_event_eqe->module_status &
-			PORT_MODULE_EVENT_MODULE_STATUS_MASK;
-	error_type = module_event_eqe->error_type &
-		     PORT_MODULE_EVENT_ERROR_TYPE_MASK;
-
-	if (module_status < MLX5_MODULE_STATUS_ERROR) {
-		priv->pme_stats.status_counters[module_status - 1]++;
-	} else if (module_status == MLX5_MODULE_STATUS_ERROR) {
-		if (error_type >= MLX5_MODULE_EVENT_ERROR_UNKNOWN)
-			/* Unknown error type */
-			error_type = MLX5_MODULE_EVENT_ERROR_UNKNOWN;
-		priv->pme_stats.error_counters[error_type]++;
-	}
-
-	if (!printk_ratelimit())
-		return;
-
-	if (module_status < MLX5_MODULE_STATUS_ERROR)
-		mlx5_core_info(dev,
-			       "Port module event: module %u, %s\n",
-			       module_num, mlx5_pme_status[module_status - 1]);
-
-	else if (module_status == MLX5_MODULE_STATUS_ERROR)
-		mlx5_core_info(dev,
-			       "Port module event[error]: module %u, %s, %s\n",
-			       module_num, mlx5_pme_status[module_status - 1],
-			       mlx5_pme_error[error_type]);
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
deleted file mode 100644
index cbbcef2..0000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ /dev/null
@@ -1,541 +0,0 @@
-/*
- * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-
-#include <linux/gfp.h>
-#include <linux/export.h>
-#include <linux/mlx5/cmd.h>
-#include <linux/mlx5/qp.h>
-#include <linux/mlx5/driver.h>
-#include <linux/mlx5/transobj.h>
-
-#include "mlx5_core.h"
-
-static struct mlx5_core_rsc_common *mlx5_get_rsc(struct mlx5_core_dev *dev,
-						 u32 rsn)
-{
-	struct mlx5_qp_table *table = &dev->priv.qp_table;
-	struct mlx5_core_rsc_common *common;
-
-	spin_lock(&table->lock);
-
-	common = radix_tree_lookup(&table->tree, rsn);
-	if (common)
-		atomic_inc(&common->refcount);
-
-	spin_unlock(&table->lock);
-
-	if (!common) {
-		mlx5_core_warn(dev, "Async event for bogus resource 0x%x\n",
-			       rsn);
-		return NULL;
-	}
-	return common;
-}
-
-void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common)
-{
-	if (atomic_dec_and_test(&common->refcount))
-		complete(&common->free);
-}
-
-static u64 qp_allowed_event_types(void)
-{
-	u64 mask;
-
-	mask = BIT(MLX5_EVENT_TYPE_PATH_MIG) |
-	       BIT(MLX5_EVENT_TYPE_COMM_EST) |
-	       BIT(MLX5_EVENT_TYPE_SQ_DRAINED) |
-	       BIT(MLX5_EVENT_TYPE_SRQ_LAST_WQE) |
-	       BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR) |
-	       BIT(MLX5_EVENT_TYPE_PATH_MIG_FAILED) |
-	       BIT(MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) |
-	       BIT(MLX5_EVENT_TYPE_WQ_ACCESS_ERROR);
-
-	return mask;
-}
-
-static u64 rq_allowed_event_types(void)
-{
-	u64 mask;
-
-	mask = BIT(MLX5_EVENT_TYPE_SRQ_LAST_WQE) |
-	       BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR);
-
-	return mask;
-}
-
-static u64 sq_allowed_event_types(void)
-{
-	return BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR);
-}
-
-static bool is_event_type_allowed(int rsc_type, int event_type)
-{
-	switch (rsc_type) {
-	case MLX5_EVENT_QUEUE_TYPE_QP:
-		return BIT(event_type) & qp_allowed_event_types();
-	case MLX5_EVENT_QUEUE_TYPE_RQ:
-		return BIT(event_type) & rq_allowed_event_types();
-	case MLX5_EVENT_QUEUE_TYPE_SQ:
-		return BIT(event_type) & sq_allowed_event_types();
-	default:
-		WARN(1, "Event arrived for unknown resource type");
-		return false;
-	}
-}
-
-void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type)
-{
-	struct mlx5_core_rsc_common *common = mlx5_get_rsc(dev, rsn);
-	struct mlx5_core_qp *qp;
-
-	if (!common)
-		return;
-
-	if (!is_event_type_allowed((rsn >> MLX5_USER_INDEX_LEN), event_type)) {
-		mlx5_core_warn(dev, "event 0x%.2x is not allowed on resource 0x%.8x\n",
-			       event_type, rsn);
-		return;
-	}
-
-	switch (common->res) {
-	case MLX5_RES_QP:
-	case MLX5_RES_RQ:
-	case MLX5_RES_SQ:
-		qp = (struct mlx5_core_qp *)common;
-		qp->event(qp, event_type);
-		break;
-
-	default:
-		mlx5_core_warn(dev, "invalid resource type for 0x%x\n", rsn);
-	}
-
-	mlx5_core_put_rsc(common);
-}
-
-static int create_qprqsq_common(struct mlx5_core_dev *dev,
-				struct mlx5_core_qp *qp,
-				int rsc_type)
-{
-	struct mlx5_qp_table *table = &dev->priv.qp_table;
-	int err;
-
-	qp->common.res = rsc_type;
-	spin_lock_irq(&table->lock);
-	err = radix_tree_insert(&table->tree,
-				qp->qpn | (rsc_type << MLX5_USER_INDEX_LEN),
-				qp);
-	spin_unlock_irq(&table->lock);
-	if (err)
-		return err;
-
-	atomic_set(&qp->common.refcount, 1);
-	init_completion(&qp->common.free);
-	qp->pid = current->pid;
-
-	return 0;
-}
-
-static void destroy_qprqsq_common(struct mlx5_core_dev *dev,
-				  struct mlx5_core_qp *qp)
-{
-	struct mlx5_qp_table *table = &dev->priv.qp_table;
-	unsigned long flags;
-
-	spin_lock_irqsave(&table->lock, flags);
-	radix_tree_delete(&table->tree,
-			  qp->qpn | (qp->common.res << MLX5_USER_INDEX_LEN));
-	spin_unlock_irqrestore(&table->lock, flags);
-	mlx5_core_put_rsc((struct mlx5_core_rsc_common *)qp);
-	wait_for_completion(&qp->common.free);
-}
-
-int mlx5_core_create_qp(struct mlx5_core_dev *dev,
-			struct mlx5_core_qp *qp,
-			u32 *in, int inlen)
-{
-	u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {0};
-	u32 dout[MLX5_ST_SZ_DW(destroy_qp_out)];
-	u32 din[MLX5_ST_SZ_DW(destroy_qp_in)];
-	int err;
-
-	MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
-
-	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
-	if (err)
-		return err;
-
-	qp->qpn = MLX5_GET(create_qp_out, out, qpn);
-	mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn);
-
-	err = create_qprqsq_common(dev, qp, MLX5_RES_QP);
-	if (err)
-		goto err_cmd;
-
-	err = mlx5_debug_qp_add(dev, qp);
-	if (err)
-		mlx5_core_dbg(dev, "failed adding QP 0x%x to debug file system\n",
-			      qp->qpn);
-
-	atomic_inc(&dev->num_qps);
-
-	return 0;
-
-err_cmd:
-	memset(din, 0, sizeof(din));
-	memset(dout, 0, sizeof(dout));
-	MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
-	MLX5_SET(destroy_qp_in, in, qpn, qp->qpn);
-	mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout));
-	return err;
-}
-EXPORT_SYMBOL_GPL(mlx5_core_create_qp);
-
-int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
-			 struct mlx5_core_qp *qp)
-{
-	u32 out[MLX5_ST_SZ_DW(destroy_qp_out)] = {0};
-	u32 in[MLX5_ST_SZ_DW(destroy_qp_in)]   = {0};
-	int err;
-
-	mlx5_debug_qp_remove(dev, qp);
-
-	destroy_qprqsq_common(dev, qp);
-
-	MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
-	MLX5_SET(destroy_qp_in, in, qpn, qp->qpn);
-	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-	if (err)
-		return err;
-
-	atomic_dec(&dev->num_qps);
-	return 0;
-}
-EXPORT_SYMBOL_GPL(mlx5_core_destroy_qp);
-
-struct mbox_info {
-	u32 *in;
-	u32 *out;
-	int inlen;
-	int outlen;
-};
-
-static int mbox_alloc(struct mbox_info *mbox, int inlen, int outlen)
-{
-	mbox->inlen  = inlen;
-	mbox->outlen = outlen;
-	mbox->in = kzalloc(mbox->inlen, GFP_KERNEL);
-	mbox->out = kzalloc(mbox->outlen, GFP_KERNEL);
-	if (!mbox->in || !mbox->out) {
-		kfree(mbox->in);
-		kfree(mbox->out);
-		return -ENOMEM;
-	}
-
-	return 0;
-}
-
-static void mbox_free(struct mbox_info *mbox)
-{
-	kfree(mbox->in);
-	kfree(mbox->out);
-}
-
-static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn,
-				u32 opt_param_mask, void *qpc,
-				struct mbox_info *mbox)
-{
-	mbox->out = NULL;
-	mbox->in = NULL;
-
-#define MBOX_ALLOC(mbox, typ)  \
-	mbox_alloc(mbox, MLX5_ST_SZ_BYTES(typ##_in), MLX5_ST_SZ_BYTES(typ##_out))
-
-#define MOD_QP_IN_SET(typ, in, _opcode, _qpn) \
-	MLX5_SET(typ##_in, in, opcode, _opcode); \
-	MLX5_SET(typ##_in, in, qpn, _qpn)
-
-#define MOD_QP_IN_SET_QPC(typ, in, _opcode, _qpn, _opt_p, _qpc) \
-	MOD_QP_IN_SET(typ, in, _opcode, _qpn); \
-	MLX5_SET(typ##_in, in, opt_param_mask, _opt_p); \
-	memcpy(MLX5_ADDR_OF(typ##_in, in, qpc), _qpc, MLX5_ST_SZ_BYTES(qpc))
-
-	switch (opcode) {
-	/* 2RST & 2ERR */
-	case MLX5_CMD_OP_2RST_QP:
-		if (MBOX_ALLOC(mbox, qp_2rst))
-			return -ENOMEM;
-		MOD_QP_IN_SET(qp_2rst, mbox->in, opcode, qpn);
-		break;
-	case MLX5_CMD_OP_2ERR_QP:
-		if (MBOX_ALLOC(mbox, qp_2err))
-			return -ENOMEM;
-		MOD_QP_IN_SET(qp_2err, mbox->in, opcode, qpn);
-		break;
-
-	/* MODIFY with QPC */
-	case MLX5_CMD_OP_RST2INIT_QP:
-		if (MBOX_ALLOC(mbox, rst2init_qp))
-			return -ENOMEM;
-		 MOD_QP_IN_SET_QPC(rst2init_qp, mbox->in, opcode, qpn,
-				   opt_param_mask, qpc);
-		 break;
-	case MLX5_CMD_OP_INIT2RTR_QP:
-		if (MBOX_ALLOC(mbox, init2rtr_qp))
-			return -ENOMEM;
-		 MOD_QP_IN_SET_QPC(init2rtr_qp, mbox->in, opcode, qpn,
-				   opt_param_mask, qpc);
-		 break;
-	case MLX5_CMD_OP_RTR2RTS_QP:
-		if (MBOX_ALLOC(mbox, rtr2rts_qp))
-			return -ENOMEM;
-		 MOD_QP_IN_SET_QPC(rtr2rts_qp, mbox->in, opcode, qpn,
-				   opt_param_mask, qpc);
-		 break;
-	case MLX5_CMD_OP_RTS2RTS_QP:
-		if (MBOX_ALLOC(mbox, rts2rts_qp))
-			return -ENOMEM;
-		MOD_QP_IN_SET_QPC(rts2rts_qp, mbox->in, opcode, qpn,
-				  opt_param_mask, qpc);
-		break;
-	case MLX5_CMD_OP_SQERR2RTS_QP:
-		if (MBOX_ALLOC(mbox, sqerr2rts_qp))
-			return -ENOMEM;
-		MOD_QP_IN_SET_QPC(sqerr2rts_qp, mbox->in, opcode, qpn,
-				  opt_param_mask, qpc);
-		break;
-	case MLX5_CMD_OP_INIT2INIT_QP:
-		if (MBOX_ALLOC(mbox, init2init_qp))
-			return -ENOMEM;
-		MOD_QP_IN_SET_QPC(init2init_qp, mbox->in, opcode, qpn,
-				  opt_param_mask, qpc);
-		break;
-	default:
-		mlx5_core_err(dev, "Unknown transition for modify QP: OP(0x%x) QPN(0x%x)\n",
-			      opcode, qpn);
-		return -EINVAL;
-	}
-	return 0;
-}
-
-int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 opcode,
-			u32 opt_param_mask, void *qpc,
-			struct mlx5_core_qp *qp)
-{
-	struct mbox_info mbox;
-	int err;
-
-	err = modify_qp_mbox_alloc(dev, opcode, qp->qpn,
-				   opt_param_mask, qpc, &mbox);
-	if (err)
-		return err;
-
-	err = mlx5_cmd_exec(dev, mbox.in, mbox.inlen, mbox.out, mbox.outlen);
-	mbox_free(&mbox);
-	return err;
-}
-EXPORT_SYMBOL_GPL(mlx5_core_qp_modify);
-
-void mlx5_init_qp_table(struct mlx5_core_dev *dev)
-{
-	struct mlx5_qp_table *table = &dev->priv.qp_table;
-
-	memset(table, 0, sizeof(*table));
-	spin_lock_init(&table->lock);
-	INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
-	mlx5_qp_debugfs_init(dev);
-}
-
-void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev)
-{
-	mlx5_qp_debugfs_cleanup(dev);
-}
-
-int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
-		       u32 *out, int outlen)
-{
-	u32 in[MLX5_ST_SZ_DW(query_qp_in)] = {0};
-
-	MLX5_SET(query_qp_in, in, opcode, MLX5_CMD_OP_QUERY_QP);
-	MLX5_SET(query_qp_in, in, qpn, qp->qpn);
-	return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
-}
-EXPORT_SYMBOL_GPL(mlx5_core_qp_query);
-
-int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn)
-{
-	u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)] = {0};
-	u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)]   = {0};
-	int err;
-
-	MLX5_SET(alloc_xrcd_in, in, opcode, MLX5_CMD_OP_ALLOC_XRCD);
-	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-	if (!err)
-		*xrcdn = MLX5_GET(alloc_xrcd_out, out, xrcd);
-	return err;
-}
-EXPORT_SYMBOL_GPL(mlx5_core_xrcd_alloc);
-
-int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn)
-{
-	u32 out[MLX5_ST_SZ_DW(dealloc_xrcd_out)] = {0};
-	u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)]   = {0};
-
-	MLX5_SET(dealloc_xrcd_in, in, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
-	MLX5_SET(dealloc_xrcd_in, in, xrcd, xrcdn);
-	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-}
-EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc);
-
-int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
-				struct mlx5_core_qp *rq)
-{
-	int err;
-	u32 rqn;
-
-	err = mlx5_core_create_rq(dev, in, inlen, &rqn);
-	if (err)
-		return err;
-
-	rq->qpn = rqn;
-	err = create_qprqsq_common(dev, rq, MLX5_RES_RQ);
-	if (err)
-		goto err_destroy_rq;
-
-	return 0;
-
-err_destroy_rq:
-	mlx5_core_destroy_rq(dev, rq->qpn);
-
-	return err;
-}
-EXPORT_SYMBOL(mlx5_core_create_rq_tracked);
-
-void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev,
-				  struct mlx5_core_qp *rq)
-{
-	destroy_qprqsq_common(dev, rq);
-	mlx5_core_destroy_rq(dev, rq->qpn);
-}
-EXPORT_SYMBOL(mlx5_core_destroy_rq_tracked);
-
-int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
-				struct mlx5_core_qp *sq)
-{
-	int err;
-	u32 sqn;
-
-	err = mlx5_core_create_sq(dev, in, inlen, &sqn);
-	if (err)
-		return err;
-
-	sq->qpn = sqn;
-	err = create_qprqsq_common(dev, sq, MLX5_RES_SQ);
-	if (err)
-		goto err_destroy_sq;
-
-	return 0;
-
-err_destroy_sq:
-	mlx5_core_destroy_sq(dev, sq->qpn);
-
-	return err;
-}
-EXPORT_SYMBOL(mlx5_core_create_sq_tracked);
-
-void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev,
-				  struct mlx5_core_qp *sq)
-{
-	destroy_qprqsq_common(dev, sq);
-	mlx5_core_destroy_sq(dev, sq->qpn);
-}
-EXPORT_SYMBOL(mlx5_core_destroy_sq_tracked);
-
-int mlx5_core_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id)
-{
-	u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)]   = {0};
-	u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {0};
-	int err;
-
-	MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER);
-	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-	if (!err)
-		*counter_id = MLX5_GET(alloc_q_counter_out, out,
-				       counter_set_id);
-	return err;
-}
-EXPORT_SYMBOL_GPL(mlx5_core_alloc_q_counter);
-
-int mlx5_core_dealloc_q_counter(struct mlx5_core_dev *dev, u16 counter_id)
-{
-	u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)]   = {0};
-	u32 out[MLX5_ST_SZ_DW(dealloc_q_counter_out)] = {0};
-
-	MLX5_SET(dealloc_q_counter_in, in, opcode,
-		 MLX5_CMD_OP_DEALLOC_Q_COUNTER);
-	MLX5_SET(dealloc_q_counter_in, in, counter_set_id, counter_id);
-	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-}
-EXPORT_SYMBOL_GPL(mlx5_core_dealloc_q_counter);
-
-int mlx5_core_query_q_counter(struct mlx5_core_dev *dev, u16 counter_id,
-			      int reset, void *out, int out_size)
-{
-	u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {0};
-
-	MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
-	MLX5_SET(query_q_counter_in, in, clear, reset);
-	MLX5_SET(query_q_counter_in, in, counter_set_id, counter_id);
-	return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size);
-}
-EXPORT_SYMBOL_GPL(mlx5_core_query_q_counter);
-
-int mlx5_core_query_out_of_buffer(struct mlx5_core_dev *dev, u16 counter_id,
-				  u32 *out_of_buffer)
-{
-	int outlen = MLX5_ST_SZ_BYTES(query_q_counter_out);
-	void *out;
-	int err;
-
-	out = mlx5_vzalloc(outlen);
-	if (!out)
-		return -ENOMEM;
-
-	err = mlx5_core_query_q_counter(dev, counter_id, 0, out, outlen);
-	if (!err)
-		*out_of_buffer = MLX5_GET(query_q_counter_out, out,
-					  out_of_buffer);
-
-	kfree(out);
-	return err;
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rl.c b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
deleted file mode 100644
index e651e4c..0000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/rl.c
+++ /dev/null
@@ -1,269 +0,0 @@
-/*
- * Copyright (c) 2013-2016, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/mlx5/driver.h>
-#include <linux/mlx5/cmd.h>
-#include "mlx5_core.h"
-
-/* Scheduling element fw management */
-int mlx5_create_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
-				       void *ctx, u32 *element_id)
-{
-	u32 in[MLX5_ST_SZ_DW(create_scheduling_element_in)]  = {0};
-	u32 out[MLX5_ST_SZ_DW(create_scheduling_element_in)] = {0};
-	void *schedc;
-	int err;
-
-	schedc = MLX5_ADDR_OF(create_scheduling_element_in, in,
-			      scheduling_context);
-	MLX5_SET(create_scheduling_element_in, in, opcode,
-		 MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT);
-	MLX5_SET(create_scheduling_element_in, in, scheduling_hierarchy,
-		 hierarchy);
-	memcpy(schedc, ctx, MLX5_ST_SZ_BYTES(scheduling_context));
-
-	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-	if (err)
-		return err;
-
-	*element_id = MLX5_GET(create_scheduling_element_out, out,
-			       scheduling_element_id);
-	return 0;
-}
-
-int mlx5_modify_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
-				       void *ctx, u32 element_id,
-				       u32 modify_bitmask)
-{
-	u32 in[MLX5_ST_SZ_DW(modify_scheduling_element_in)]  = {0};
-	u32 out[MLX5_ST_SZ_DW(modify_scheduling_element_in)] = {0};
-	void *schedc;
-
-	schedc = MLX5_ADDR_OF(modify_scheduling_element_in, in,
-			      scheduling_context);
-	MLX5_SET(modify_scheduling_element_in, in, opcode,
-		 MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT);
-	MLX5_SET(modify_scheduling_element_in, in, scheduling_element_id,
-		 element_id);
-	MLX5_SET(modify_scheduling_element_in, in, modify_bitmask,
-		 modify_bitmask);
-	MLX5_SET(modify_scheduling_element_in, in, scheduling_hierarchy,
-		 hierarchy);
-	memcpy(schedc, ctx, MLX5_ST_SZ_BYTES(scheduling_context));
-
-	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-}
-
-int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
-					u32 element_id)
-{
-	u32 in[MLX5_ST_SZ_DW(destroy_scheduling_element_in)]  = {0};
-	u32 out[MLX5_ST_SZ_DW(destroy_scheduling_element_in)] = {0};
-
-	MLX5_SET(destroy_scheduling_element_in, in, opcode,
-		 MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT);
-	MLX5_SET(destroy_scheduling_element_in, in, scheduling_element_id,
-		 element_id);
-	MLX5_SET(destroy_scheduling_element_in, in, scheduling_hierarchy,
-		 hierarchy);
-
-	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-}
-
-/* Finds an entry where we can register the given rate
- * If the rate already exists, return the entry where it is registered,
- * otherwise return the first available entry.
- * If the table is full, return NULL
- */
-static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table,
-					   u32 rate)
-{
-	struct mlx5_rl_entry *ret_entry = NULL;
-	bool empty_found = false;
-	int i;
-
-	for (i = 0; i < table->max_size; i++) {
-		if (table->rl_entry[i].rate == rate)
-			return &table->rl_entry[i];
-		if (!empty_found && !table->rl_entry[i].rate) {
-			empty_found = true;
-			ret_entry = &table->rl_entry[i];
-		}
-	}
-
-	return ret_entry;
-}
-
-static int mlx5_set_rate_limit_cmd(struct mlx5_core_dev *dev,
-				   u32 rate, u16 index)
-{
-	u32 in[MLX5_ST_SZ_DW(set_rate_limit_in)]   = {0};
-	u32 out[MLX5_ST_SZ_DW(set_rate_limit_out)] = {0};
-
-	MLX5_SET(set_rate_limit_in, in, opcode,
-		 MLX5_CMD_OP_SET_RATE_LIMIT);
-	MLX5_SET(set_rate_limit_in, in, rate_limit_index, index);
-	MLX5_SET(set_rate_limit_in, in, rate_limit, rate);
-	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-}
-
-bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate)
-{
-	struct mlx5_rl_table *table = &dev->priv.rl_table;
-
-	return (rate <= table->max_rate && rate >= table->min_rate);
-}
-EXPORT_SYMBOL(mlx5_rl_is_in_range);
-
-int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u32 rate, u16 *index)
-{
-	struct mlx5_rl_table *table = &dev->priv.rl_table;
-	struct mlx5_rl_entry *entry;
-	int err = 0;
-
-	mutex_lock(&table->rl_lock);
-
-	if (!rate || !mlx5_rl_is_in_range(dev, rate)) {
-		mlx5_core_err(dev, "Invalid rate: %u, should be %u to %u\n",
-			      rate, table->min_rate, table->max_rate);
-		err = -EINVAL;
-		goto out;
-	}
-
-	entry = find_rl_entry(table, rate);
-	if (!entry) {
-		mlx5_core_err(dev, "Max number of %u rates reached\n",
-			      table->max_size);
-		err = -ENOSPC;
-		goto out;
-	}
-	if (entry->refcount) {
-		/* rate already configured */
-		entry->refcount++;
-	} else {
-		/* new rate limit */
-		err = mlx5_set_rate_limit_cmd(dev, rate, entry->index);
-		if (err) {
-			mlx5_core_err(dev, "Failed configuring rate: %u (%d)\n",
-				      rate, err);
-			goto out;
-		}
-		entry->rate = rate;
-		entry->refcount = 1;
-	}
-	*index = entry->index;
-
-out:
-	mutex_unlock(&table->rl_lock);
-	return err;
-}
-EXPORT_SYMBOL(mlx5_rl_add_rate);
-
-void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, u32 rate)
-{
-	struct mlx5_rl_table *table = &dev->priv.rl_table;
-	struct mlx5_rl_entry *entry = NULL;
-
-	/* 0 is a reserved value for unlimited rate */
-	if (rate == 0)
-		return;
-
-	mutex_lock(&table->rl_lock);
-	entry = find_rl_entry(table, rate);
-	if (!entry || !entry->refcount) {
-		mlx5_core_warn(dev, "Rate %u is not configured\n", rate);
-		goto out;
-	}
-
-	entry->refcount--;
-	if (!entry->refcount) {
-		/* need to remove rate */
-		mlx5_set_rate_limit_cmd(dev, 0, entry->index);
-		entry->rate = 0;
-	}
-
-out:
-	mutex_unlock(&table->rl_lock);
-}
-EXPORT_SYMBOL(mlx5_rl_remove_rate);
-
-int mlx5_init_rl_table(struct mlx5_core_dev *dev)
-{
-	struct mlx5_rl_table *table = &dev->priv.rl_table;
-	int i;
-
-	mutex_init(&table->rl_lock);
-	if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, packet_pacing)) {
-		table->max_size = 0;
-		return 0;
-	}
-
-	/* First entry is reserved for unlimited rate */
-	table->max_size = MLX5_CAP_QOS(dev, packet_pacing_rate_table_size) - 1;
-	table->max_rate = MLX5_CAP_QOS(dev, packet_pacing_max_rate);
-	table->min_rate = MLX5_CAP_QOS(dev, packet_pacing_min_rate);
-
-	table->rl_entry = kcalloc(table->max_size, sizeof(struct mlx5_rl_entry),
-				  GFP_KERNEL);
-	if (!table->rl_entry)
-		return -ENOMEM;
-
-	/* The index represents the index in HW rate limit table
-	 * Index 0 is reserved for unlimited rate
-	 */
-	for (i = 0; i < table->max_size; i++)
-		table->rl_entry[i].index = i + 1;
-
-	/* Index 0 is reserved */
-	mlx5_core_info(dev, "Rate limit: %u rates are supported, range: %uMbps to %uMbps\n",
-		       table->max_size,
-		       table->min_rate >> 10,
-		       table->max_rate >> 10);
-
-	return 0;
-}
-
-void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev)
-{
-	struct mlx5_rl_table *table = &dev->priv.rl_table;
-	int i;
-
-	/* Clear all configured rates */
-	for (i = 0; i < table->max_size; i++)
-		if (table->rl_entry[i].rate)
-			mlx5_set_rate_limit_cmd(dev, 0,
-						table->rl_entry[i].index);
-
-	kfree(dev->priv.rl_table.rl_entry);
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
deleted file mode 100644
index e086277..0000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+++ /dev/null
@@ -1,236 +0,0 @@
-/*
- * Copyright (c) 2014, Mellanox Technologies inc.  All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/pci.h>
-#include <linux/mlx5/driver.h>
-#include "mlx5_core.h"
-#ifdef CONFIG_MLX5_CORE_EN
-#include "eswitch.h"
-#endif
-
-bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev)
-{
-	struct mlx5_core_sriov *sriov = &dev->priv.sriov;
-
-	return !!sriov->num_vfs;
-}
-
-static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs)
-{
-	struct mlx5_core_sriov *sriov = &dev->priv.sriov;
-	int err;
-	int vf;
-
-	if (sriov->enabled_vfs) {
-		mlx5_core_warn(dev,
-			       "failed to enable SRIOV on device, already enabled with %d vfs\n",
-			       sriov->enabled_vfs);
-		return -EBUSY;
-	}
-
-#ifdef CONFIG_MLX5_CORE_EN
-	err = mlx5_eswitch_enable_sriov(dev->priv.eswitch, num_vfs, SRIOV_LEGACY);
-	if (err) {
-		mlx5_core_warn(dev,
-			       "failed to enable eswitch SRIOV (%d)\n", err);
-		return err;
-	}
-#endif
-
-	for (vf = 0; vf < num_vfs; vf++) {
-		err = mlx5_core_enable_hca(dev, vf + 1);
-		if (err) {
-			mlx5_core_warn(dev, "failed to enable VF %d (%d)\n", vf, err);
-			continue;
-		}
-		sriov->vfs_ctx[vf].enabled = 1;
-		sriov->enabled_vfs++;
-		mlx5_core_dbg(dev, "successfully enabled VF* %d\n", vf);
-
-	}
-
-	return 0;
-}
-
-static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev)
-{
-	struct mlx5_core_sriov *sriov = &dev->priv.sriov;
-	int err;
-	int vf;
-
-	if (!sriov->enabled_vfs)
-		return;
-
-	for (vf = 0; vf < sriov->num_vfs; vf++) {
-		if (!sriov->vfs_ctx[vf].enabled)
-			continue;
-		err = mlx5_core_disable_hca(dev, vf + 1);
-		if (err) {
-			mlx5_core_warn(dev, "failed to disable VF %d\n", vf);
-			continue;
-		}
-		sriov->vfs_ctx[vf].enabled = 0;
-		sriov->enabled_vfs--;
-	}
-
-#ifdef CONFIG_MLX5_CORE_EN
-	mlx5_eswitch_disable_sriov(dev->priv.eswitch);
-#endif
-
-	if (mlx5_wait_for_vf_pages(dev))
-		mlx5_core_warn(dev, "timeout reclaiming VFs pages\n");
-}
-
-static int mlx5_pci_enable_sriov(struct pci_dev *pdev, int num_vfs)
-{
-	struct mlx5_core_dev *dev  = pci_get_drvdata(pdev);
-	int err = 0;
-
-	if (pci_num_vf(pdev)) {
-		mlx5_core_warn(dev, "Unable to enable pci sriov, already enabled\n");
-		return -EBUSY;
-	}
-
-	err = pci_enable_sriov(pdev, num_vfs);
-	if (err)
-		mlx5_core_warn(dev, "pci_enable_sriov failed : %d\n", err);
-
-	return err;
-}
-
-static void mlx5_pci_disable_sriov(struct pci_dev *pdev)
-{
-	pci_disable_sriov(pdev);
-}
-
-static int mlx5_sriov_enable(struct pci_dev *pdev, int num_vfs)
-{
-	struct mlx5_core_dev *dev  = pci_get_drvdata(pdev);
-	struct mlx5_core_sriov *sriov = &dev->priv.sriov;
-	int err = 0;
-
-	err = mlx5_device_enable_sriov(dev, num_vfs);
-	if (err) {
-		mlx5_core_warn(dev, "mlx5_device_enable_sriov failed : %d\n", err);
-		return err;
-	}
-
-	err = mlx5_pci_enable_sriov(pdev, num_vfs);
-	if (err) {
-		mlx5_core_warn(dev, "mlx5_pci_enable_sriov failed : %d\n", err);
-		mlx5_device_disable_sriov(dev);
-		return err;
-	}
-
-	sriov->num_vfs = num_vfs;
-
-	return 0;
-}
-
-static void mlx5_sriov_disable(struct pci_dev *pdev)
-{
-	struct mlx5_core_dev *dev  = pci_get_drvdata(pdev);
-	struct mlx5_core_sriov *sriov = &dev->priv.sriov;
-
-	mlx5_pci_disable_sriov(pdev);
-	mlx5_device_disable_sriov(dev);
-	sriov->num_vfs = 0;
-}
-
-int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs)
-{
-	struct mlx5_core_dev *dev  = pci_get_drvdata(pdev);
-	int err = 0;
-
-	mlx5_core_dbg(dev, "requested num_vfs %d\n", num_vfs);
-	if (!mlx5_core_is_pf(dev))
-		return -EPERM;
-
-	if (num_vfs && mlx5_lag_is_active(dev)) {
-		mlx5_core_warn(dev, "can't turn sriov on while LAG is active");
-		return -EINVAL;
-	}
-
-	if (num_vfs)
-		err = mlx5_sriov_enable(pdev, num_vfs);
-	else
-		mlx5_sriov_disable(pdev);
-
-	return err ? err : num_vfs;
-}
-
-int mlx5_sriov_attach(struct mlx5_core_dev *dev)
-{
-	struct mlx5_core_sriov *sriov = &dev->priv.sriov;
-
-	if (!mlx5_core_is_pf(dev) || !sriov->num_vfs)
-		return 0;
-
-	/* If sriov VFs exist in PCI level, enable them in device level */
-	return mlx5_device_enable_sriov(dev, sriov->num_vfs);
-}
-
-void mlx5_sriov_detach(struct mlx5_core_dev *dev)
-{
-	if (!mlx5_core_is_pf(dev))
-		return;
-
-	mlx5_device_disable_sriov(dev);
-}
-
-int mlx5_sriov_init(struct mlx5_core_dev *dev)
-{
-	struct mlx5_core_sriov *sriov = &dev->priv.sriov;
-	struct pci_dev *pdev = dev->pdev;
-	int total_vfs;
-
-	if (!mlx5_core_is_pf(dev))
-		return 0;
-
-	total_vfs = pci_sriov_get_totalvfs(pdev);
-	sriov->num_vfs = pci_num_vf(pdev);
-	sriov->vfs_ctx = kcalloc(total_vfs, sizeof(*sriov->vfs_ctx), GFP_KERNEL);
-	if (!sriov->vfs_ctx)
-		return -ENOMEM;
-
-	return 0;
-}
-
-void mlx5_sriov_cleanup(struct mlx5_core_dev *dev)
-{
-	struct mlx5_core_sriov *sriov = &dev->priv.sriov;
-
-	if (!mlx5_core_is_pf(dev))
-		return;
-
-	kfree(sriov->vfs_ctx);
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/srq.c b/drivers/net/ethernet/mellanox/mlx5/core/srq.c
deleted file mode 100644
index 30996300..0000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/srq.c
+++ /dev/null
@@ -1,562 +0,0 @@
-/*
- * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/mlx5/driver.h>
-#include <linux/mlx5/cmd.h>
-#include <linux/mlx5/srq.h>
-#include <rdma/ib_verbs.h>
-#include "mlx5_core.h"
-#include <linux/mlx5/transobj.h>
-
-void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type)
-{
-	struct mlx5_srq_table *table = &dev->priv.srq_table;
-	struct mlx5_core_srq *srq;
-
-	spin_lock(&table->lock);
-
-	srq = radix_tree_lookup(&table->tree, srqn);
-	if (srq)
-		atomic_inc(&srq->refcount);
-
-	spin_unlock(&table->lock);
-
-	if (!srq) {
-		mlx5_core_warn(dev, "Async event for bogus SRQ 0x%08x\n", srqn);
-		return;
-	}
-
-	srq->event(srq, event_type);
-
-	if (atomic_dec_and_test(&srq->refcount))
-		complete(&srq->free);
-}
-
-static int get_pas_size(struct mlx5_srq_attr *in)
-{
-	u32 log_page_size = in->log_page_size + 12;
-	u32 log_srq_size  = in->log_size;
-	u32 log_rq_stride = in->wqe_shift;
-	u32 page_offset   = in->page_offset;
-	u32 po_quanta	  = 1 << (log_page_size - 6);
-	u32 rq_sz	  = 1 << (log_srq_size + 4 + log_rq_stride);
-	u32 page_size	  = 1 << log_page_size;
-	u32 rq_sz_po      = rq_sz + (page_offset * po_quanta);
-	u32 rq_num_pas	  = (rq_sz_po + page_size - 1) / page_size;
-
-	return rq_num_pas * sizeof(u64);
-}
-
-static void set_wq(void *wq, struct mlx5_srq_attr *in)
-{
-	MLX5_SET(wq,   wq, wq_signature,  !!(in->flags
-		 & MLX5_SRQ_FLAG_WQ_SIG));
-	MLX5_SET(wq,   wq, log_wq_pg_sz,  in->log_page_size);
-	MLX5_SET(wq,   wq, log_wq_stride, in->wqe_shift + 4);
-	MLX5_SET(wq,   wq, log_wq_sz,     in->log_size);
-	MLX5_SET(wq,   wq, page_offset,   in->page_offset);
-	MLX5_SET(wq,   wq, lwm,		  in->lwm);
-	MLX5_SET(wq,   wq, pd,		  in->pd);
-	MLX5_SET64(wq, wq, dbr_addr,	  in->db_record);
-}
-
-static void set_srqc(void *srqc, struct mlx5_srq_attr *in)
-{
-	MLX5_SET(srqc,   srqc, wq_signature,  !!(in->flags
-		 & MLX5_SRQ_FLAG_WQ_SIG));
-	MLX5_SET(srqc,   srqc, log_page_size, in->log_page_size);
-	MLX5_SET(srqc,   srqc, log_rq_stride, in->wqe_shift);
-	MLX5_SET(srqc,   srqc, log_srq_size,  in->log_size);
-	MLX5_SET(srqc,   srqc, page_offset,   in->page_offset);
-	MLX5_SET(srqc,	 srqc, lwm,	      in->lwm);
-	MLX5_SET(srqc,	 srqc, pd,	      in->pd);
-	MLX5_SET64(srqc, srqc, dbr_addr,      in->db_record);
-	MLX5_SET(srqc,	 srqc, xrcd,	      in->xrcd);
-	MLX5_SET(srqc,	 srqc, cqn,	      in->cqn);
-}
-
-static void get_wq(void *wq, struct mlx5_srq_attr *in)
-{
-	if (MLX5_GET(wq, wq, wq_signature))
-		in->flags &= MLX5_SRQ_FLAG_WQ_SIG;
-	in->log_page_size = MLX5_GET(wq,   wq, log_wq_pg_sz);
-	in->wqe_shift	  = MLX5_GET(wq,   wq, log_wq_stride) - 4;
-	in->log_size	  = MLX5_GET(wq,   wq, log_wq_sz);
-	in->page_offset   = MLX5_GET(wq,   wq, page_offset);
-	in->lwm		  = MLX5_GET(wq,   wq, lwm);
-	in->pd		  = MLX5_GET(wq,   wq, pd);
-	in->db_record	  = MLX5_GET64(wq, wq, dbr_addr);
-}
-
-static void get_srqc(void *srqc, struct mlx5_srq_attr *in)
-{
-	if (MLX5_GET(srqc, srqc, wq_signature))
-		in->flags &= MLX5_SRQ_FLAG_WQ_SIG;
-	in->log_page_size = MLX5_GET(srqc,   srqc, log_page_size);
-	in->wqe_shift	  = MLX5_GET(srqc,   srqc, log_rq_stride);
-	in->log_size	  = MLX5_GET(srqc,   srqc, log_srq_size);
-	in->page_offset   = MLX5_GET(srqc,   srqc, page_offset);
-	in->lwm		  = MLX5_GET(srqc,   srqc, lwm);
-	in->pd		  = MLX5_GET(srqc,   srqc, pd);
-	in->db_record	  = MLX5_GET64(srqc, srqc, dbr_addr);
-}
-
-struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn)
-{
-	struct mlx5_srq_table *table = &dev->priv.srq_table;
-	struct mlx5_core_srq *srq;
-
-	spin_lock(&table->lock);
-
-	srq = radix_tree_lookup(&table->tree, srqn);
-	if (srq)
-		atomic_inc(&srq->refcount);
-
-	spin_unlock(&table->lock);
-
-	return srq;
-}
-EXPORT_SYMBOL(mlx5_core_get_srq);
-
-static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
-			  struct mlx5_srq_attr *in)
-{
-	u32 create_out[MLX5_ST_SZ_DW(create_srq_out)] = {0};
-	void *create_in;
-	void *srqc;
-	void *pas;
-	int pas_size;
-	int inlen;
-	int err;
-
-	pas_size  = get_pas_size(in);
-	inlen	  = MLX5_ST_SZ_BYTES(create_srq_in) + pas_size;
-	create_in = mlx5_vzalloc(inlen);
-	if (!create_in)
-		return -ENOMEM;
-
-	srqc = MLX5_ADDR_OF(create_srq_in, create_in, srq_context_entry);
-	pas = MLX5_ADDR_OF(create_srq_in, create_in, pas);
-
-	set_srqc(srqc, in);
-	memcpy(pas, in->pas, pas_size);
-
-	MLX5_SET(create_srq_in, create_in, opcode,
-		 MLX5_CMD_OP_CREATE_SRQ);
-
-	err = mlx5_cmd_exec(dev, create_in, inlen, create_out,
-			    sizeof(create_out));
-	kvfree(create_in);
-	if (!err)
-		srq->srqn = MLX5_GET(create_srq_out, create_out, srqn);
-
-	return err;
-}
-
-static int destroy_srq_cmd(struct mlx5_core_dev *dev,
-			   struct mlx5_core_srq *srq)
-{
-	u32 srq_in[MLX5_ST_SZ_DW(destroy_srq_in)] = {0};
-	u32 srq_out[MLX5_ST_SZ_DW(destroy_srq_out)] = {0};
-
-	MLX5_SET(destroy_srq_in, srq_in, opcode,
-		 MLX5_CMD_OP_DESTROY_SRQ);
-	MLX5_SET(destroy_srq_in, srq_in, srqn, srq->srqn);
-
-	return mlx5_cmd_exec(dev, srq_in, sizeof(srq_in),
-			     srq_out, sizeof(srq_out));
-}
-
-static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
-		       u16 lwm, int is_srq)
-{
-	/* arm_srq structs missing using identical xrc ones */
-	u32 srq_in[MLX5_ST_SZ_DW(arm_xrc_srq_in)] = {0};
-	u32 srq_out[MLX5_ST_SZ_DW(arm_xrc_srq_out)] = {0};
-
-	MLX5_SET(arm_xrc_srq_in, srq_in, opcode,   MLX5_CMD_OP_ARM_XRC_SRQ);
-	MLX5_SET(arm_xrc_srq_in, srq_in, xrc_srqn, srq->srqn);
-	MLX5_SET(arm_xrc_srq_in, srq_in, lwm,      lwm);
-
-	return  mlx5_cmd_exec(dev, srq_in, sizeof(srq_in),
-			      srq_out, sizeof(srq_out));
-}
-
-static int query_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
-			 struct mlx5_srq_attr *out)
-{
-	u32 srq_in[MLX5_ST_SZ_DW(query_srq_in)] = {0};
-	u32 *srq_out;
-	void *srqc;
-	int err;
-
-	srq_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_srq_out));
-	if (!srq_out)
-		return -ENOMEM;
-
-	MLX5_SET(query_srq_in, srq_in, opcode,
-		 MLX5_CMD_OP_QUERY_SRQ);
-	MLX5_SET(query_srq_in, srq_in, srqn, srq->srqn);
-	err =  mlx5_cmd_exec(dev, srq_in, sizeof(srq_in),
-			     srq_out, MLX5_ST_SZ_BYTES(query_srq_out));
-	if (err)
-		goto out;
-
-	srqc = MLX5_ADDR_OF(query_srq_out, srq_out, srq_context_entry);
-	get_srqc(srqc, out);
-	if (MLX5_GET(srqc, srqc, state) != MLX5_SRQC_STATE_GOOD)
-		out->flags |= MLX5_SRQ_FLAG_ERR;
-out:
-	kvfree(srq_out);
-	return err;
-}
-
-static int create_xrc_srq_cmd(struct mlx5_core_dev *dev,
-			      struct mlx5_core_srq *srq,
-			      struct mlx5_srq_attr *in)
-{
-	u32 create_out[MLX5_ST_SZ_DW(create_xrc_srq_out)];
-	void *create_in;
-	void *xrc_srqc;
-	void *pas;
-	int pas_size;
-	int inlen;
-	int err;
-
-	pas_size  = get_pas_size(in);
-	inlen	  = MLX5_ST_SZ_BYTES(create_xrc_srq_in) + pas_size;
-	create_in = mlx5_vzalloc(inlen);
-	if (!create_in)
-		return -ENOMEM;
-
-	xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, create_in,
-				xrc_srq_context_entry);
-	pas	 = MLX5_ADDR_OF(create_xrc_srq_in, create_in, pas);
-
-	set_srqc(xrc_srqc, in);
-	MLX5_SET(xrc_srqc, xrc_srqc, user_index, in->user_index);
-	memcpy(pas, in->pas, pas_size);
-	MLX5_SET(create_xrc_srq_in, create_in, opcode,
-		 MLX5_CMD_OP_CREATE_XRC_SRQ);
-
-	memset(create_out, 0, sizeof(create_out));
-	err = mlx5_cmd_exec(dev, create_in, inlen, create_out,
-			    sizeof(create_out));
-	if (err)
-		goto out;
-
-	srq->srqn = MLX5_GET(create_xrc_srq_out, create_out, xrc_srqn);
-out:
-	kvfree(create_in);
-	return err;
-}
-
-static int destroy_xrc_srq_cmd(struct mlx5_core_dev *dev,
-			       struct mlx5_core_srq *srq)
-{
-	u32 xrcsrq_in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)]   = {0};
-	u32 xrcsrq_out[MLX5_ST_SZ_DW(destroy_xrc_srq_out)] = {0};
-
-	MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, opcode,
-		 MLX5_CMD_OP_DESTROY_XRC_SRQ);
-	MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
-
-	return mlx5_cmd_exec(dev, xrcsrq_in, sizeof(xrcsrq_in),
-			     xrcsrq_out, sizeof(xrcsrq_out));
-}
-
-static int arm_xrc_srq_cmd(struct mlx5_core_dev *dev,
-			   struct mlx5_core_srq *srq, u16 lwm)
-{
-	u32 xrcsrq_in[MLX5_ST_SZ_DW(arm_xrc_srq_in)]   = {0};
-	u32 xrcsrq_out[MLX5_ST_SZ_DW(arm_xrc_srq_out)] = {0};
-
-	MLX5_SET(arm_xrc_srq_in, xrcsrq_in, opcode,   MLX5_CMD_OP_ARM_XRC_SRQ);
-	MLX5_SET(arm_xrc_srq_in, xrcsrq_in, op_mod,   MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ);
-	MLX5_SET(arm_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
-	MLX5_SET(arm_xrc_srq_in, xrcsrq_in, lwm,      lwm);
-
-	return  mlx5_cmd_exec(dev, xrcsrq_in, sizeof(xrcsrq_in),
-			      xrcsrq_out, sizeof(xrcsrq_out));
-}
-
-static int query_xrc_srq_cmd(struct mlx5_core_dev *dev,
-			     struct mlx5_core_srq *srq,
-			     struct mlx5_srq_attr *out)
-{
-	u32 xrcsrq_in[MLX5_ST_SZ_DW(query_xrc_srq_in)];
-	u32 *xrcsrq_out;
-	void *xrc_srqc;
-	int err;
-
-	xrcsrq_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_xrc_srq_out));
-	if (!xrcsrq_out)
-		return -ENOMEM;
-	memset(xrcsrq_in, 0, sizeof(xrcsrq_in));
-
-	MLX5_SET(query_xrc_srq_in, xrcsrq_in, opcode,
-		 MLX5_CMD_OP_QUERY_XRC_SRQ);
-	MLX5_SET(query_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
-
-	err =  mlx5_cmd_exec(dev, xrcsrq_in, sizeof(xrcsrq_in), xrcsrq_out,
-			     MLX5_ST_SZ_BYTES(query_xrc_srq_out));
-	if (err)
-		goto out;
-
-	xrc_srqc = MLX5_ADDR_OF(query_xrc_srq_out, xrcsrq_out,
-				xrc_srq_context_entry);
-	get_srqc(xrc_srqc, out);
-	if (MLX5_GET(xrc_srqc, xrc_srqc, state) != MLX5_XRC_SRQC_STATE_GOOD)
-		out->flags |= MLX5_SRQ_FLAG_ERR;
-
-out:
-	kvfree(xrcsrq_out);
-	return err;
-}
-
-static int create_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
-			  struct mlx5_srq_attr *in)
-{
-	void *create_in;
-	void *rmpc;
-	void *wq;
-	int pas_size;
-	int inlen;
-	int err;
-
-	pas_size = get_pas_size(in);
-	inlen = MLX5_ST_SZ_BYTES(create_rmp_in) + pas_size;
-	create_in = mlx5_vzalloc(inlen);
-	if (!create_in)
-		return -ENOMEM;
-
-	rmpc = MLX5_ADDR_OF(create_rmp_in, create_in, ctx);
-	wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
-
-	MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
-	set_wq(wq, in);
-	memcpy(MLX5_ADDR_OF(rmpc, rmpc, wq.pas), in->pas, pas_size);
-
-	err = mlx5_core_create_rmp(dev, create_in, inlen, &srq->srqn);
-
-	kvfree(create_in);
-	return err;
-}
-
-static int destroy_rmp_cmd(struct mlx5_core_dev *dev,
-			   struct mlx5_core_srq *srq)
-{
-	return mlx5_core_destroy_rmp(dev, srq->srqn);
-}
-
-static int arm_rmp_cmd(struct mlx5_core_dev *dev,
-		       struct mlx5_core_srq *srq,
-		       u16 lwm)
-{
-	void *in;
-	void *rmpc;
-	void *wq;
-	void *bitmask;
-	int err;
-
-	in = mlx5_vzalloc(MLX5_ST_SZ_BYTES(modify_rmp_in));
-	if (!in)
-		return -ENOMEM;
-
-	rmpc =	  MLX5_ADDR_OF(modify_rmp_in,   in,   ctx);
-	bitmask = MLX5_ADDR_OF(modify_rmp_in,   in,   bitmask);
-	wq   =	  MLX5_ADDR_OF(rmpc,	        rmpc, wq);
-
-	MLX5_SET(modify_rmp_in, in,	 rmp_state, MLX5_RMPC_STATE_RDY);
-	MLX5_SET(modify_rmp_in, in,	 rmpn,      srq->srqn);
-	MLX5_SET(wq,		wq,	 lwm,	    lwm);
-	MLX5_SET(rmp_bitmask,	bitmask, lwm,	    1);
-	MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
-
-	err = mlx5_core_modify_rmp(dev, in, MLX5_ST_SZ_BYTES(modify_rmp_in));
-
-	kvfree(in);
-	return err;
-}
-
-static int query_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
-			 struct mlx5_srq_attr *out)
-{
-	u32 *rmp_out;
-	void *rmpc;
-	int err;
-
-	rmp_out =  mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_rmp_out));
-	if (!rmp_out)
-		return -ENOMEM;
-
-	err = mlx5_core_query_rmp(dev, srq->srqn, rmp_out);
-	if (err)
-		goto out;
-
-	rmpc = MLX5_ADDR_OF(query_rmp_out, rmp_out, rmp_context);
-	get_wq(MLX5_ADDR_OF(rmpc, rmpc, wq), out);
-	if (MLX5_GET(rmpc, rmpc, state) != MLX5_RMPC_STATE_RDY)
-		out->flags |= MLX5_SRQ_FLAG_ERR;
-
-out:
-	kvfree(rmp_out);
-	return err;
-}
-
-static int create_srq_split(struct mlx5_core_dev *dev,
-			    struct mlx5_core_srq *srq,
-			    struct mlx5_srq_attr *in)
-{
-	if (!dev->issi)
-		return create_srq_cmd(dev, srq, in);
-	else if (srq->common.res == MLX5_RES_XSRQ)
-		return create_xrc_srq_cmd(dev, srq, in);
-	else
-		return create_rmp_cmd(dev, srq, in);
-}
-
-static int destroy_srq_split(struct mlx5_core_dev *dev,
-			     struct mlx5_core_srq *srq)
-{
-	if (!dev->issi)
-		return destroy_srq_cmd(dev, srq);
-	else if (srq->common.res == MLX5_RES_XSRQ)
-		return destroy_xrc_srq_cmd(dev, srq);
-	else
-		return destroy_rmp_cmd(dev, srq);
-}
-
-int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
-			 struct mlx5_srq_attr *in)
-{
-	int err;
-	struct mlx5_srq_table *table = &dev->priv.srq_table;
-
-	if (in->type == IB_SRQT_XRC)
-		srq->common.res = MLX5_RES_XSRQ;
-	else
-		srq->common.res = MLX5_RES_SRQ;
-
-	err = create_srq_split(dev, srq, in);
-	if (err)
-		return err;
-
-	atomic_set(&srq->refcount, 1);
-	init_completion(&srq->free);
-
-	spin_lock_irq(&table->lock);
-	err = radix_tree_insert(&table->tree, srq->srqn, srq);
-	spin_unlock_irq(&table->lock);
-	if (err) {
-		mlx5_core_warn(dev, "err %d, srqn 0x%x\n", err, srq->srqn);
-		goto err_destroy_srq_split;
-	}
-
-	return 0;
-
-err_destroy_srq_split:
-	destroy_srq_split(dev, srq);
-
-	return err;
-}
-EXPORT_SYMBOL(mlx5_core_create_srq);
-
-int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq)
-{
-	struct mlx5_srq_table *table = &dev->priv.srq_table;
-	struct mlx5_core_srq *tmp;
-	int err;
-
-	spin_lock_irq(&table->lock);
-	tmp = radix_tree_delete(&table->tree, srq->srqn);
-	spin_unlock_irq(&table->lock);
-	if (!tmp) {
-		mlx5_core_warn(dev, "srq 0x%x not found in tree\n", srq->srqn);
-		return -EINVAL;
-	}
-	if (tmp != srq) {
-		mlx5_core_warn(dev, "corruption on srqn 0x%x\n", srq->srqn);
-		return -EINVAL;
-	}
-
-	err = destroy_srq_split(dev, srq);
-	if (err)
-		return err;
-
-	if (atomic_dec_and_test(&srq->refcount))
-		complete(&srq->free);
-	wait_for_completion(&srq->free);
-
-	return 0;
-}
-EXPORT_SYMBOL(mlx5_core_destroy_srq);
-
-int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
-			struct mlx5_srq_attr *out)
-{
-	if (!dev->issi)
-		return query_srq_cmd(dev, srq, out);
-	else if (srq->common.res == MLX5_RES_XSRQ)
-		return query_xrc_srq_cmd(dev, srq, out);
-	else
-		return query_rmp_cmd(dev, srq, out);
-}
-EXPORT_SYMBOL(mlx5_core_query_srq);
-
-int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
-		      u16 lwm, int is_srq)
-{
-	if (!dev->issi)
-		return arm_srq_cmd(dev, srq, lwm, is_srq);
-	else if (srq->common.res == MLX5_RES_XSRQ)
-		return arm_xrc_srq_cmd(dev, srq, lwm);
-	else
-		return arm_rmp_cmd(dev, srq, lwm);
-}
-EXPORT_SYMBOL(mlx5_core_arm_srq);
-
-void mlx5_init_srq_table(struct mlx5_core_dev *dev)
-{
-	struct mlx5_srq_table *table = &dev->priv.srq_table;
-
-	memset(table, 0, sizeof(*table));
-	spin_lock_init(&table->lock);
-	INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
-}
-
-void mlx5_cleanup_srq_table(struct mlx5_core_dev *dev)
-{
-	/* nothing */
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
deleted file mode 100644
index a00ff49..0000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
+++ /dev/null
@@ -1,400 +0,0 @@
-/*
- * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/mlx5/driver.h>
-#include "mlx5_core.h"
-#include <linux/mlx5/transobj.h>
-
-int mlx5_core_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn)
-{
-	u32 in[MLX5_ST_SZ_DW(alloc_transport_domain_in)]   = {0};
-	u32 out[MLX5_ST_SZ_DW(alloc_transport_domain_out)] = {0};
-	int err;
-
-	MLX5_SET(alloc_transport_domain_in, in, opcode,
-		 MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN);
-
-	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-	if (!err)
-		*tdn = MLX5_GET(alloc_transport_domain_out, out,
-				transport_domain);
-
-	return err;
-}
-EXPORT_SYMBOL(mlx5_core_alloc_transport_domain);
-
-void mlx5_core_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn)
-{
-	u32 in[MLX5_ST_SZ_DW(dealloc_transport_domain_in)]   = {0};
-	u32 out[MLX5_ST_SZ_DW(dealloc_transport_domain_out)] = {0};
-
-	MLX5_SET(dealloc_transport_domain_in, in, opcode,
-		 MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
-	MLX5_SET(dealloc_transport_domain_in, in, transport_domain, tdn);
-	mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-}
-EXPORT_SYMBOL(mlx5_core_dealloc_transport_domain);
-
-int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqn)
-{
-	u32 out[MLX5_ST_SZ_DW(create_rq_out)] = {0};
-	int err;
-
-	MLX5_SET(create_rq_in, in, opcode, MLX5_CMD_OP_CREATE_RQ);
-	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
-	if (!err)
-		*rqn = MLX5_GET(create_rq_out, out, rqn);
-
-	return err;
-}
-EXPORT_SYMBOL(mlx5_core_create_rq);
-
-int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen)
-{
-	u32 out[MLX5_ST_SZ_DW(modify_rq_out)];
-
-	MLX5_SET(modify_rq_in, in, rqn, rqn);
-	MLX5_SET(modify_rq_in, in, opcode, MLX5_CMD_OP_MODIFY_RQ);
-
-	memset(out, 0, sizeof(out));
-	return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
-}
-EXPORT_SYMBOL(mlx5_core_modify_rq);
-
-void mlx5_core_destroy_rq(struct mlx5_core_dev *dev, u32 rqn)
-{
-	u32 in[MLX5_ST_SZ_DW(destroy_rq_in)]   = {0};
-	u32 out[MLX5_ST_SZ_DW(destroy_rq_out)] = {0};
-
-	MLX5_SET(destroy_rq_in, in, opcode, MLX5_CMD_OP_DESTROY_RQ);
-	MLX5_SET(destroy_rq_in, in, rqn, rqn);
-	mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-}
-EXPORT_SYMBOL(mlx5_core_destroy_rq);
-
-int mlx5_core_query_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *out)
-{
-	u32 in[MLX5_ST_SZ_DW(query_rq_in)] = {0};
-	int outlen = MLX5_ST_SZ_BYTES(query_rq_out);
-
-	MLX5_SET(query_rq_in, in, opcode, MLX5_CMD_OP_QUERY_RQ);
-	MLX5_SET(query_rq_in, in, rqn, rqn);
-
-	return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
-}
-EXPORT_SYMBOL(mlx5_core_query_rq);
-
-int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *sqn)
-{
-	u32 out[MLX5_ST_SZ_DW(create_sq_out)] = {0};
-	int err;
-
-	MLX5_SET(create_sq_in, in, opcode, MLX5_CMD_OP_CREATE_SQ);
-	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
-	if (!err)
-		*sqn = MLX5_GET(create_sq_out, out, sqn);
-
-	return err;
-}
-
-int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen)
-{
-	u32 out[MLX5_ST_SZ_DW(modify_sq_out)] = {0};
-
-	MLX5_SET(modify_sq_in, in, sqn, sqn);
-	MLX5_SET(modify_sq_in, in, opcode, MLX5_CMD_OP_MODIFY_SQ);
-	return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
-}
-EXPORT_SYMBOL(mlx5_core_modify_sq);
-
-void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn)
-{
-	u32 in[MLX5_ST_SZ_DW(destroy_sq_in)]   = {0};
-	u32 out[MLX5_ST_SZ_DW(destroy_sq_out)] = {0};
-
-	MLX5_SET(destroy_sq_in, in, opcode, MLX5_CMD_OP_DESTROY_SQ);
-	MLX5_SET(destroy_sq_in, in, sqn, sqn);
-	mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-}
-
-int mlx5_core_query_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *out)
-{
-	u32 in[MLX5_ST_SZ_DW(query_sq_in)] = {0};
-	int outlen = MLX5_ST_SZ_BYTES(query_sq_out);
-
-	MLX5_SET(query_sq_in, in, opcode, MLX5_CMD_OP_QUERY_SQ);
-	MLX5_SET(query_sq_in, in, sqn, sqn);
-	return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
-}
-EXPORT_SYMBOL(mlx5_core_query_sq);
-
-int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
-			 u32 *tirn)
-{
-	u32 out[MLX5_ST_SZ_DW(create_tir_out)] = {0};
-	int err;
-
-	MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR);
-
-	memset(out, 0, sizeof(out));
-	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
-	if (!err)
-		*tirn = MLX5_GET(create_tir_out, out, tirn);
-
-	return err;
-}
-EXPORT_SYMBOL(mlx5_core_create_tir);
-
-int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in,
-			 int inlen)
-{
-	u32 out[MLX5_ST_SZ_DW(modify_tir_out)] = {0};
-
-	MLX5_SET(modify_tir_in, in, tirn, tirn);
-	MLX5_SET(modify_tir_in, in, opcode, MLX5_CMD_OP_MODIFY_TIR);
-	return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
-}
-
-void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn)
-{
-	u32 in[MLX5_ST_SZ_DW(destroy_tir_in)]   = {0};
-	u32 out[MLX5_ST_SZ_DW(destroy_tir_out)] = {0};
-
-	MLX5_SET(destroy_tir_in, in, opcode, MLX5_CMD_OP_DESTROY_TIR);
-	MLX5_SET(destroy_tir_in, in, tirn, tirn);
-	mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-}
-EXPORT_SYMBOL(mlx5_core_destroy_tir);
-
-int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen,
-			 u32 *tisn)
-{
-	u32 out[MLX5_ST_SZ_DW(create_tis_out)] = {0};
-	int err;
-
-	MLX5_SET(create_tis_in, in, opcode, MLX5_CMD_OP_CREATE_TIS);
-	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
-	if (!err)
-		*tisn = MLX5_GET(create_tis_out, out, tisn);
-
-	return err;
-}
-EXPORT_SYMBOL(mlx5_core_create_tis);
-
-int mlx5_core_modify_tis(struct mlx5_core_dev *dev, u32 tisn, u32 *in,
-			 int inlen)
-{
-	u32 out[MLX5_ST_SZ_DW(modify_tis_out)] = {0};
-
-	MLX5_SET(modify_tis_in, in, tisn, tisn);
-	MLX5_SET(modify_tis_in, in, opcode, MLX5_CMD_OP_MODIFY_TIS);
-
-	return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
-}
-EXPORT_SYMBOL(mlx5_core_modify_tis);
-
-void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn)
-{
-	u32 in[MLX5_ST_SZ_DW(destroy_tis_in)]   = {0};
-	u32 out[MLX5_ST_SZ_DW(destroy_tis_out)] = {0};
-
-	MLX5_SET(destroy_tis_in, in, opcode, MLX5_CMD_OP_DESTROY_TIS);
-	MLX5_SET(destroy_tis_in, in, tisn, tisn);
-	mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-}
-EXPORT_SYMBOL(mlx5_core_destroy_tis);
-
-int mlx5_core_create_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen,
-			 u32 *rmpn)
-{
-	u32 out[MLX5_ST_SZ_DW(create_rmp_out)] = {0};
-	int err;
-
-	MLX5_SET(create_rmp_in, in, opcode, MLX5_CMD_OP_CREATE_RMP);
-	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
-	if (!err)
-		*rmpn = MLX5_GET(create_rmp_out, out, rmpn);
-
-	return err;
-}
-
-int mlx5_core_modify_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen)
-{
-	u32 out[MLX5_ST_SZ_DW(modify_rmp_out)] = {0};
-
-	MLX5_SET(modify_rmp_in, in, opcode, MLX5_CMD_OP_MODIFY_RMP);
-	return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
-}
-
-int mlx5_core_destroy_rmp(struct mlx5_core_dev *dev, u32 rmpn)
-{
-	u32 in[MLX5_ST_SZ_DW(destroy_rmp_in)]   = {0};
-	u32 out[MLX5_ST_SZ_DW(destroy_rmp_out)] = {0};
-
-	MLX5_SET(destroy_rmp_in, in, opcode, MLX5_CMD_OP_DESTROY_RMP);
-	MLX5_SET(destroy_rmp_in, in, rmpn, rmpn);
-	return mlx5_cmd_exec(dev, in, sizeof(in), out,
-					  sizeof(out));
-}
-
-int mlx5_core_query_rmp(struct mlx5_core_dev *dev, u32 rmpn, u32 *out)
-{
-	u32 in[MLX5_ST_SZ_DW(query_rmp_in)] = {0};
-	int outlen = MLX5_ST_SZ_BYTES(query_rmp_out);
-
-	MLX5_SET(query_rmp_in, in, opcode, MLX5_CMD_OP_QUERY_RMP);
-	MLX5_SET(query_rmp_in, in, rmpn,   rmpn);
-	return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
-}
-
-int mlx5_core_arm_rmp(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm)
-{
-	void *in;
-	void *rmpc;
-	void *wq;
-	void *bitmask;
-	int  err;
-
-	in = mlx5_vzalloc(MLX5_ST_SZ_BYTES(modify_rmp_in));
-	if (!in)
-		return -ENOMEM;
-
-	rmpc    = MLX5_ADDR_OF(modify_rmp_in,   in,   ctx);
-	bitmask = MLX5_ADDR_OF(modify_rmp_in,   in,   bitmask);
-	wq      = MLX5_ADDR_OF(rmpc,	        rmpc, wq);
-
-	MLX5_SET(modify_rmp_in, in,	 rmp_state, MLX5_RMPC_STATE_RDY);
-	MLX5_SET(modify_rmp_in, in,	 rmpn,      rmpn);
-	MLX5_SET(wq,		wq,	 lwm,	    lwm);
-	MLX5_SET(rmp_bitmask,	bitmask, lwm,	    1);
-	MLX5_SET(rmpc,		rmpc,	 state,	    MLX5_RMPC_STATE_RDY);
-
-	err =  mlx5_core_modify_rmp(dev, in, MLX5_ST_SZ_BYTES(modify_rmp_in));
-
-	kvfree(in);
-
-	return err;
-}
-
-int mlx5_core_create_xsrq(struct mlx5_core_dev *dev, u32 *in, int inlen,
-			  u32 *xsrqn)
-{
-	u32 out[MLX5_ST_SZ_DW(create_xrc_srq_out)] = {0};
-	int err;
-
-	MLX5_SET(create_xrc_srq_in, in, opcode,     MLX5_CMD_OP_CREATE_XRC_SRQ);
-	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
-	if (!err)
-		*xsrqn = MLX5_GET(create_xrc_srq_out, out, xrc_srqn);
-
-	return err;
-}
-
-int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 xsrqn)
-{
-	u32 in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)]   = {0};
-	u32 out[MLX5_ST_SZ_DW(destroy_xrc_srq_out)] = {0};
-
-	MLX5_SET(destroy_xrc_srq_in, in, opcode,   MLX5_CMD_OP_DESTROY_XRC_SRQ);
-	MLX5_SET(destroy_xrc_srq_in, in, xrc_srqn, xsrqn);
-	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-}
-
-int mlx5_core_query_xsrq(struct mlx5_core_dev *dev, u32 xsrqn, u32 *out)
-{
-	u32 in[MLX5_ST_SZ_DW(query_xrc_srq_in)] = {0};
-	void *srqc;
-	void *xrc_srqc;
-	int err;
-
-	MLX5_SET(query_xrc_srq_in, in, opcode,   MLX5_CMD_OP_QUERY_XRC_SRQ);
-	MLX5_SET(query_xrc_srq_in, in, xrc_srqn, xsrqn);
-	err = mlx5_cmd_exec(dev, in, sizeof(in), out,
-			    MLX5_ST_SZ_BYTES(query_xrc_srq_out));
-	if (!err) {
-		xrc_srqc = MLX5_ADDR_OF(query_xrc_srq_out, out,
-					xrc_srq_context_entry);
-		srqc = MLX5_ADDR_OF(query_srq_out, out, srq_context_entry);
-		memcpy(srqc, xrc_srqc, MLX5_ST_SZ_BYTES(srqc));
-	}
-
-	return err;
-}
-
-int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 xsrqn, u16 lwm)
-{
-	u32 in[MLX5_ST_SZ_DW(arm_xrc_srq_in)]   = {0};
-	u32 out[MLX5_ST_SZ_DW(arm_xrc_srq_out)] = {0};
-
-	MLX5_SET(arm_xrc_srq_in, in, opcode,   MLX5_CMD_OP_ARM_XRC_SRQ);
-	MLX5_SET(arm_xrc_srq_in, in, xrc_srqn, xsrqn);
-	MLX5_SET(arm_xrc_srq_in, in, lwm,      lwm);
-	MLX5_SET(arm_xrc_srq_in, in, op_mod,
-		 MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ);
-	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-}
-
-int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen,
-			 u32 *rqtn)
-{
-	u32 out[MLX5_ST_SZ_DW(create_rqt_out)] = {0};
-	int err;
-
-	MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT);
-	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
-	if (!err)
-		*rqtn = MLX5_GET(create_rqt_out, out, rqtn);
-
-	return err;
-}
-EXPORT_SYMBOL(mlx5_core_create_rqt);
-
-int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in,
-			 int inlen)
-{
-	u32 out[MLX5_ST_SZ_DW(modify_rqt_out)] = {0};
-
-	MLX5_SET(modify_rqt_in, in, rqtn, rqtn);
-	MLX5_SET(modify_rqt_in, in, opcode, MLX5_CMD_OP_MODIFY_RQT);
-	return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
-}
-
-void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn)
-{
-	u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)]   = {0};
-	u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)] = {0};
-
-	MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
-	MLX5_SET(destroy_rqt_in, in, rqtn, rqtn);
-	mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-}
-EXPORT_SYMBOL(mlx5_core_destroy_rqt);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
deleted file mode 100644
index 2e6b0f2..0000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c
+++ /dev/null
@@ -1,330 +0,0 @@
-/*
- * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/io-mapping.h>
-#include <linux/mlx5/driver.h>
-#include <linux/mlx5/cmd.h>
-#include "mlx5_core.h"
-
-int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn)
-{
-	u32 out[MLX5_ST_SZ_DW(alloc_uar_out)] = {0};
-	u32 in[MLX5_ST_SZ_DW(alloc_uar_in)]   = {0};
-	int err;
-
-	MLX5_SET(alloc_uar_in, in, opcode, MLX5_CMD_OP_ALLOC_UAR);
-	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-	if (!err)
-		*uarn = MLX5_GET(alloc_uar_out, out, uar);
-	return err;
-}
-EXPORT_SYMBOL(mlx5_cmd_alloc_uar);
-
-int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn)
-{
-	u32 out[MLX5_ST_SZ_DW(dealloc_uar_out)] = {0};
-	u32 in[MLX5_ST_SZ_DW(dealloc_uar_in)]   = {0};
-
-	MLX5_SET(dealloc_uar_in, in, opcode, MLX5_CMD_OP_DEALLOC_UAR);
-	MLX5_SET(dealloc_uar_in, in, uar, uarn);
-	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-}
-EXPORT_SYMBOL(mlx5_cmd_free_uar);
-
-static int uars_per_sys_page(struct mlx5_core_dev *mdev)
-{
-	if (MLX5_CAP_GEN(mdev, uar_4k))
-		return MLX5_CAP_GEN(mdev, num_of_uars_per_page);
-
-	return 1;
-}
-
-static u64 uar2pfn(struct mlx5_core_dev *mdev, u32 index)
-{
-	u32 system_page_index;
-
-	if (MLX5_CAP_GEN(mdev, uar_4k))
-		system_page_index = index >> (PAGE_SHIFT - MLX5_ADAPTER_PAGE_SHIFT);
-	else
-		system_page_index = index;
-
-	return (pci_resource_start(mdev->pdev, 0) >> PAGE_SHIFT) + system_page_index;
-}
-
-static void up_rel_func(struct kref *kref)
-{
-	struct mlx5_uars_page *up = container_of(kref, struct mlx5_uars_page, ref_count);
-
-	list_del(&up->list);
-	if (mlx5_cmd_free_uar(up->mdev, up->index))
-		mlx5_core_warn(up->mdev, "failed to free uar index %d\n", up->index);
-	kfree(up->reg_bitmap);
-	kfree(up->fp_bitmap);
-	kfree(up);
-}
-
-static struct mlx5_uars_page *alloc_uars_page(struct mlx5_core_dev *mdev,
-					      bool map_wc)
-{
-	struct mlx5_uars_page *up;
-	int err = -ENOMEM;
-	phys_addr_t pfn;
-	int bfregs;
-	int i;
-
-	bfregs = uars_per_sys_page(mdev) * MLX5_BFREGS_PER_UAR;
-	up = kzalloc(sizeof(*up), GFP_KERNEL);
-	if (!up)
-		return ERR_PTR(err);
-
-	up->mdev = mdev;
-	up->reg_bitmap = kcalloc(BITS_TO_LONGS(bfregs), sizeof(unsigned long), GFP_KERNEL);
-	if (!up->reg_bitmap)
-		goto error1;
-
-	up->fp_bitmap = kcalloc(BITS_TO_LONGS(bfregs), sizeof(unsigned long), GFP_KERNEL);
-	if (!up->fp_bitmap)
-		goto error1;
-
-	for (i = 0; i < bfregs; i++)
-		if ((i % MLX5_BFREGS_PER_UAR) < MLX5_NON_FP_BFREGS_PER_UAR)
-			set_bit(i, up->reg_bitmap);
-		else
-			set_bit(i, up->fp_bitmap);
-
-	up->bfregs = bfregs;
-	up->fp_avail = bfregs * MLX5_FP_BFREGS_PER_UAR / MLX5_BFREGS_PER_UAR;
-	up->reg_avail = bfregs * MLX5_NON_FP_BFREGS_PER_UAR / MLX5_BFREGS_PER_UAR;
-
-	err = mlx5_cmd_alloc_uar(mdev, &up->index);
-	if (err) {
-		mlx5_core_warn(mdev, "mlx5_cmd_alloc_uar() failed, %d\n", err);
-		goto error1;
-	}
-
-	pfn = uar2pfn(mdev, up->index);
-	if (map_wc) {
-		up->map = ioremap_wc(pfn << PAGE_SHIFT, PAGE_SIZE);
-		if (!up->map) {
-			err = -EAGAIN;
-			goto error2;
-		}
-	} else {
-		up->map = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
-		if (!up->map) {
-			err = -ENOMEM;
-			goto error2;
-		}
-	}
-	kref_init(&up->ref_count);
-	mlx5_core_dbg(mdev, "allocated UAR page: index %d, total bfregs %d\n",
-		      up->index, up->bfregs);
-	return up;
-
-error2:
-	if (mlx5_cmd_free_uar(mdev, up->index))
-		mlx5_core_warn(mdev, "failed to free uar index %d\n", up->index);
-error1:
-	kfree(up->fp_bitmap);
-	kfree(up->reg_bitmap);
-	kfree(up);
-	return ERR_PTR(err);
-}
-
-struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev)
-{
-	struct mlx5_uars_page *ret;
-
-	mutex_lock(&mdev->priv.bfregs.reg_head.lock);
-	if (list_empty(&mdev->priv.bfregs.reg_head.list)) {
-		ret = alloc_uars_page(mdev, false);
-		if (IS_ERR(ret)) {
-			ret = NULL;
-			goto out;
-		}
-		list_add(&ret->list, &mdev->priv.bfregs.reg_head.list);
-	} else {
-		ret = list_first_entry(&mdev->priv.bfregs.reg_head.list,
-				       struct mlx5_uars_page, list);
-		kref_get(&ret->ref_count);
-	}
-out:
-	mutex_unlock(&mdev->priv.bfregs.reg_head.lock);
-
-	return ret;
-}
-EXPORT_SYMBOL(mlx5_get_uars_page);
-
-void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up)
-{
-	mutex_lock(&mdev->priv.bfregs.reg_head.lock);
-	kref_put(&up->ref_count, up_rel_func);
-	mutex_unlock(&mdev->priv.bfregs.reg_head.lock);
-}
-EXPORT_SYMBOL(mlx5_put_uars_page);
-
-static unsigned long map_offset(struct mlx5_core_dev *mdev, int dbi)
-{
-	/* return the offset in bytes from the start of the page to the
-	 * blue flame area of the UAR
-	 */
-	return dbi / MLX5_BFREGS_PER_UAR * MLX5_ADAPTER_PAGE_SIZE +
-	       (dbi % MLX5_BFREGS_PER_UAR) *
-	       (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) + MLX5_BF_OFFSET;
-}
-
-static int alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
-		       bool map_wc, bool fast_path)
-{
-	struct mlx5_bfreg_data *bfregs;
-	struct mlx5_uars_page *up;
-	struct list_head *head;
-	unsigned long *bitmap;
-	unsigned int *avail;
-	struct mutex *lock;  /* pointer to right mutex */
-	int dbi;
-
-	bfregs = &mdev->priv.bfregs;
-	if (map_wc) {
-		head = &bfregs->wc_head.list;
-		lock = &bfregs->wc_head.lock;
-	} else {
-		head = &bfregs->reg_head.list;
-		lock = &bfregs->reg_head.lock;
-	}
-	mutex_lock(lock);
-	if (list_empty(head)) {
-		up = alloc_uars_page(mdev, map_wc);
-		if (IS_ERR(up)) {
-			mutex_unlock(lock);
-			return PTR_ERR(up);
-		}
-		list_add(&up->list, head);
-	} else {
-		up = list_entry(head->next, struct mlx5_uars_page, list);
-		kref_get(&up->ref_count);
-	}
-	if (fast_path) {
-		bitmap = up->fp_bitmap;
-		avail = &up->fp_avail;
-	} else {
-		bitmap = up->reg_bitmap;
-		avail = &up->reg_avail;
-	}
-	dbi = find_first_bit(bitmap, up->bfregs);
-	clear_bit(dbi, bitmap);
-	(*avail)--;
-	if (!(*avail))
-		list_del(&up->list);
-
-	bfreg->map = up->map + map_offset(mdev, dbi);
-	bfreg->up = up;
-	bfreg->wc = map_wc;
-	bfreg->index = up->index + dbi / MLX5_BFREGS_PER_UAR;
-	mutex_unlock(lock);
-
-	return 0;
-}
-
-int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
-		     bool map_wc, bool fast_path)
-{
-	int err;
-
-	err = alloc_bfreg(mdev, bfreg, map_wc, fast_path);
-	if (!err)
-		return 0;
-
-	if (err == -EAGAIN && map_wc)
-		return alloc_bfreg(mdev, bfreg, false, fast_path);
-
-	return err;
-}
-EXPORT_SYMBOL(mlx5_alloc_bfreg);
-
-static unsigned int addr_to_dbi_in_syspage(struct mlx5_core_dev *dev,
-					   struct mlx5_uars_page *up,
-					   struct mlx5_sq_bfreg *bfreg)
-{
-	unsigned int uar_idx;
-	unsigned int bfreg_idx;
-	unsigned int bf_reg_size;
-
-	bf_reg_size = 1 << MLX5_CAP_GEN(dev, log_bf_reg_size);
-
-	uar_idx = (bfreg->map - up->map) >> MLX5_ADAPTER_PAGE_SHIFT;
-	bfreg_idx = (((uintptr_t)bfreg->map % MLX5_ADAPTER_PAGE_SIZE) - MLX5_BF_OFFSET) / bf_reg_size;
-
-	return uar_idx * MLX5_BFREGS_PER_UAR + bfreg_idx;
-}
-
-void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg)
-{
-	struct mlx5_bfreg_data *bfregs;
-	struct mlx5_uars_page *up;
-	struct mutex *lock; /* pointer to right mutex */
-	unsigned int dbi;
-	bool fp;
-	unsigned int *avail;
-	unsigned long *bitmap;
-	struct list_head *head;
-
-	bfregs = &mdev->priv.bfregs;
-	if (bfreg->wc) {
-		head = &bfregs->wc_head.list;
-		lock = &bfregs->wc_head.lock;
-	} else {
-		head = &bfregs->reg_head.list;
-		lock = &bfregs->reg_head.lock;
-	}
-	up = bfreg->up;
-	dbi = addr_to_dbi_in_syspage(mdev, up, bfreg);
-	fp = (dbi % MLX5_BFREGS_PER_UAR) >= MLX5_NON_FP_BFREGS_PER_UAR;
-	if (fp) {
-		avail = &up->fp_avail;
-		bitmap = up->fp_bitmap;
-	} else {
-		avail = &up->reg_avail;
-		bitmap = up->reg_bitmap;
-	}
-	mutex_lock(lock);
-	(*avail)++;
-	set_bit(dbi, bitmap);
-	if (*avail == 1)
-		list_add_tail(&up->list, head);
-
-	kref_put(&up->ref_count, up_rel_func);
-	mutex_unlock(lock);
-}
-EXPORT_SYMBOL(mlx5_free_bfreg);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
deleted file mode 100644
index 269e440..0000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ /dev/null
@@ -1,1025 +0,0 @@
-/*
- * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/export.h>
-#include <linux/etherdevice.h>
-#include <linux/mlx5/driver.h>
-#include <linux/mlx5/vport.h>
-#include "mlx5_core.h"
-
-static int _mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod,
-				   u16 vport, u32 *out, int outlen)
-{
-	u32 in[MLX5_ST_SZ_DW(query_vport_state_in)] = {0};
-
-	MLX5_SET(query_vport_state_in, in, opcode,
-		 MLX5_CMD_OP_QUERY_VPORT_STATE);
-	MLX5_SET(query_vport_state_in, in, op_mod, opmod);
-	MLX5_SET(query_vport_state_in, in, vport_number, vport);
-	if (vport)
-		MLX5_SET(query_vport_state_in, in, other_vport, 1);
-
-	return mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
-}
-
-u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
-{
-	u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {0};
-
-	_mlx5_query_vport_state(mdev, opmod, vport, out, sizeof(out));
-
-	return MLX5_GET(query_vport_state_out, out, state);
-}
-EXPORT_SYMBOL_GPL(mlx5_query_vport_state);
-
-u8 mlx5_query_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
-{
-	u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {0};
-
-	_mlx5_query_vport_state(mdev, opmod, vport, out, sizeof(out));
-
-	return MLX5_GET(query_vport_state_out, out, admin_state);
-}
-EXPORT_SYMBOL_GPL(mlx5_query_vport_admin_state);
-
-int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
-				  u16 vport, u8 state)
-{
-	u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)]   = {0};
-	u32 out[MLX5_ST_SZ_DW(modify_vport_state_out)] = {0};
-
-	MLX5_SET(modify_vport_state_in, in, opcode,
-		 MLX5_CMD_OP_MODIFY_VPORT_STATE);
-	MLX5_SET(modify_vport_state_in, in, op_mod, opmod);
-	MLX5_SET(modify_vport_state_in, in, vport_number, vport);
-	if (vport)
-		MLX5_SET(modify_vport_state_in, in, other_vport, 1);
-	MLX5_SET(modify_vport_state_in, in, admin_state, state);
-
-	return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
-}
-EXPORT_SYMBOL_GPL(mlx5_modify_vport_admin_state);
-
-static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u16 vport,
-					u32 *out, int outlen)
-{
-	u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0};
-
-	MLX5_SET(query_nic_vport_context_in, in, opcode,
-		 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
-	MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
-	if (vport)
-		MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
-
-	return mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
-}
-
-static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in,
-					 int inlen)
-{
-	u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0};
-
-	MLX5_SET(modify_nic_vport_context_in, in, opcode,
-		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
-	return mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
-}
-
-int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
-				    u16 vport, u8 *min_inline)
-{
-	u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {0};
-	int err;
-
-	err = mlx5_query_nic_vport_context(mdev, vport, out, sizeof(out));
-	if (!err)
-		*min_inline = MLX5_GET(query_nic_vport_context_out, out,
-				       nic_vport_context.min_wqe_inline_mode);
-	return err;
-}
-EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_min_inline);
-
-int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev,
-				     u16 vport, u8 min_inline)
-{
-	u32 in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {0};
-	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
-	void *nic_vport_ctx;
-
-	MLX5_SET(modify_nic_vport_context_in, in,
-		 field_select.min_inline, 1);
-	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
-	MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
-
-	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
-				     in, nic_vport_context);
-	MLX5_SET(nic_vport_context, nic_vport_ctx,
-		 min_wqe_inline_mode, min_inline);
-
-	return mlx5_modify_nic_vport_context(mdev, in, inlen);
-}
-
-int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
-				     u16 vport, u8 *addr)
-{
-	u32 *out;
-	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
-	u8 *out_addr;
-	int err;
-
-	out = mlx5_vzalloc(outlen);
-	if (!out)
-		return -ENOMEM;
-
-	out_addr = MLX5_ADDR_OF(query_nic_vport_context_out, out,
-				nic_vport_context.permanent_address);
-
-	err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
-	if (!err)
-		ether_addr_copy(addr, &out_addr[2]);
-
-	kvfree(out);
-	return err;
-}
-EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_address);
-
-int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev,
-				      u16 vport, u8 *addr)
-{
-	void *in;
-	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
-	int err;
-	void *nic_vport_ctx;
-	u8 *perm_mac;
-
-	in = mlx5_vzalloc(inlen);
-	if (!in) {
-		mlx5_core_warn(mdev, "failed to allocate inbox\n");
-		return -ENOMEM;
-	}
-
-	MLX5_SET(modify_nic_vport_context_in, in,
-		 field_select.permanent_address, 1);
-	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
-
-	if (vport)
-		MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
-
-	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
-				     in, nic_vport_context);
-	perm_mac = MLX5_ADDR_OF(nic_vport_context, nic_vport_ctx,
-				permanent_address);
-
-	ether_addr_copy(&perm_mac[2], addr);
-
-	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
-
-	kvfree(in);
-
-	return err;
-}
-EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_address);
-
-int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu)
-{
-	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
-	u32 *out;
-	int err;
-
-	out = mlx5_vzalloc(outlen);
-	if (!out)
-		return -ENOMEM;
-
-	err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
-	if (!err)
-		*mtu = MLX5_GET(query_nic_vport_context_out, out,
-				nic_vport_context.mtu);
-
-	kvfree(out);
-	return err;
-}
-EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mtu);
-
-int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu)
-{
-	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
-	void *in;
-	int err;
-
-	in = mlx5_vzalloc(inlen);
-	if (!in)
-		return -ENOMEM;
-
-	MLX5_SET(modify_nic_vport_context_in, in, field_select.mtu, 1);
-	MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.mtu, mtu);
-
-	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
-
-	kvfree(in);
-	return err;
-}
-EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mtu);
-
-int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
-				  u32 vport,
-				  enum mlx5_list_type list_type,
-				  u8 addr_list[][ETH_ALEN],
-				  int *list_size)
-{
-	u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0};
-	void *nic_vport_ctx;
-	int max_list_size;
-	int req_list_size;
-	int out_sz;
-	void *out;
-	int err;
-	int i;
-
-	req_list_size = *list_size;
-
-	max_list_size = list_type == MLX5_NVPRT_LIST_TYPE_UC ?
-		1 << MLX5_CAP_GEN(dev, log_max_current_uc_list) :
-		1 << MLX5_CAP_GEN(dev, log_max_current_mc_list);
-
-	if (req_list_size > max_list_size) {
-		mlx5_core_warn(dev, "Requested list size (%d) > (%d) max_list_size\n",
-			       req_list_size, max_list_size);
-		req_list_size = max_list_size;
-	}
-
-	out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
-			req_list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
-
-	out = kzalloc(out_sz, GFP_KERNEL);
-	if (!out)
-		return -ENOMEM;
-
-	MLX5_SET(query_nic_vport_context_in, in, opcode,
-		 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
-	MLX5_SET(query_nic_vport_context_in, in, allowed_list_type, list_type);
-	MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
-
-	if (vport)
-		MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
-
-	err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
-	if (err)
-		goto out;
-
-	nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
-				     nic_vport_context);
-	req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
-				 allowed_list_size);
-
-	*list_size = req_list_size;
-	for (i = 0; i < req_list_size; i++) {
-		u8 *mac_addr = MLX5_ADDR_OF(nic_vport_context,
-					nic_vport_ctx,
-					current_uc_mac_address[i]) + 2;
-		ether_addr_copy(addr_list[i], mac_addr);
-	}
-out:
-	kfree(out);
-	return err;
-}
-EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_list);
-
-int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
-				   enum mlx5_list_type list_type,
-				   u8 addr_list[][ETH_ALEN],
-				   int list_size)
-{
-	u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
-	void *nic_vport_ctx;
-	int max_list_size;
-	int in_sz;
-	void *in;
-	int err;
-	int i;
-
-	max_list_size = list_type == MLX5_NVPRT_LIST_TYPE_UC ?
-		 1 << MLX5_CAP_GEN(dev, log_max_current_uc_list) :
-		 1 << MLX5_CAP_GEN(dev, log_max_current_mc_list);
-
-	if (list_size > max_list_size)
-		return -ENOSPC;
-
-	in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
-		list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
-
-	memset(out, 0, sizeof(out));
-	in = kzalloc(in_sz, GFP_KERNEL);
-	if (!in)
-		return -ENOMEM;
-
-	MLX5_SET(modify_nic_vport_context_in, in, opcode,
-		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
-	MLX5_SET(modify_nic_vport_context_in, in,
-		 field_select.addresses_list, 1);
-
-	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
-				     nic_vport_context);
-
-	MLX5_SET(nic_vport_context, nic_vport_ctx,
-		 allowed_list_type, list_type);
-	MLX5_SET(nic_vport_context, nic_vport_ctx,
-		 allowed_list_size, list_size);
-
-	for (i = 0; i < list_size; i++) {
-		u8 *curr_mac = MLX5_ADDR_OF(nic_vport_context,
-					    nic_vport_ctx,
-					    current_uc_mac_address[i]) + 2;
-		ether_addr_copy(curr_mac, addr_list[i]);
-	}
-
-	err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
-	kfree(in);
-	return err;
-}
-EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_list);
-
-int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev,
-			       u32 vport,
-			       u16 vlans[],
-			       int *size)
-{
-	u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
-	void *nic_vport_ctx;
-	int req_list_size;
-	int max_list_size;
-	int out_sz;
-	void *out;
-	int err;
-	int i;
-
-	req_list_size = *size;
-	max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list);
-	if (req_list_size > max_list_size) {
-		mlx5_core_warn(dev, "Requested list size (%d) > (%d) max list size\n",
-			       req_list_size, max_list_size);
-		req_list_size = max_list_size;
-	}
-
-	out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
-			req_list_size * MLX5_ST_SZ_BYTES(vlan_layout);
-
-	memset(in, 0, sizeof(in));
-	out = kzalloc(out_sz, GFP_KERNEL);
-	if (!out)
-		return -ENOMEM;
-
-	MLX5_SET(query_nic_vport_context_in, in, opcode,
-		 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
-	MLX5_SET(query_nic_vport_context_in, in, allowed_list_type,
-		 MLX5_NVPRT_LIST_TYPE_VLAN);
-	MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
-
-	if (vport)
-		MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
-
-	err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
-	if (err)
-		goto out;
-
-	nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
-				     nic_vport_context);
-	req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
-				 allowed_list_size);
-
-	*size = req_list_size;
-	for (i = 0; i < req_list_size; i++) {
-		void *vlan_addr = MLX5_ADDR_OF(nic_vport_context,
-					       nic_vport_ctx,
-					       current_uc_mac_address[i]);
-		vlans[i] = MLX5_GET(vlan_layout, vlan_addr, vlan);
-	}
-out:
-	kfree(out);
-	return err;
-}
-EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_vlans);
-
-int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
-				u16 vlans[],
-				int list_size)
-{
-	u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
-	void *nic_vport_ctx;
-	int max_list_size;
-	int in_sz;
-	void *in;
-	int err;
-	int i;
-
-	max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list);
-
-	if (list_size > max_list_size)
-		return -ENOSPC;
-
-	in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
-		list_size * MLX5_ST_SZ_BYTES(vlan_layout);
-
-	memset(out, 0, sizeof(out));
-	in = kzalloc(in_sz, GFP_KERNEL);
-	if (!in)
-		return -ENOMEM;
-
-	MLX5_SET(modify_nic_vport_context_in, in, opcode,
-		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
-	MLX5_SET(modify_nic_vport_context_in, in,
-		 field_select.addresses_list, 1);
-
-	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
-				     nic_vport_context);
-
-	MLX5_SET(nic_vport_context, nic_vport_ctx,
-		 allowed_list_type, MLX5_NVPRT_LIST_TYPE_VLAN);
-	MLX5_SET(nic_vport_context, nic_vport_ctx,
-		 allowed_list_size, list_size);
-
-	for (i = 0; i < list_size; i++) {
-		void *vlan_addr = MLX5_ADDR_OF(nic_vport_context,
-					       nic_vport_ctx,
-					       current_uc_mac_address[i]);
-		MLX5_SET(vlan_layout, vlan_addr, vlan, vlans[i]);
-	}
-
-	err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
-	kfree(in);
-	return err;
-}
-EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_vlans);
-
-int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
-					   u64 *system_image_guid)
-{
-	u32 *out;
-	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
-
-	out = mlx5_vzalloc(outlen);
-	if (!out)
-		return -ENOMEM;
-
-	mlx5_query_nic_vport_context(mdev, 0, out, outlen);
-
-	*system_image_guid = MLX5_GET64(query_nic_vport_context_out, out,
-					nic_vport_context.system_image_guid);
-
-	kfree(out);
-
-	return 0;
-}
-EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_system_image_guid);
-
-int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
-{
-	u32 *out;
-	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
-
-	out = mlx5_vzalloc(outlen);
-	if (!out)
-		return -ENOMEM;
-
-	mlx5_query_nic_vport_context(mdev, 0, out, outlen);
-
-	*node_guid = MLX5_GET64(query_nic_vport_context_out, out,
-				nic_vport_context.node_guid);
-
-	kfree(out);
-
-	return 0;
-}
-EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid);
-
-int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
-				    u32 vport, u64 node_guid)
-{
-	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
-	void *nic_vport_context;
-	void *in;
-	int err;
-
-	if (!vport)
-		return -EINVAL;
-	if (!MLX5_CAP_GEN(mdev, vport_group_manager))
-		return -EACCES;
-	if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify))
-		return -ENOTSUPP;
-
-	in = mlx5_vzalloc(inlen);
-	if (!in)
-		return -ENOMEM;
-
-	MLX5_SET(modify_nic_vport_context_in, in,
-		 field_select.node_guid, 1);
-	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
-	MLX5_SET(modify_nic_vport_context_in, in, other_vport, !!vport);
-
-	nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
-					 in, nic_vport_context);
-	MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid);
-
-	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
-
-	kvfree(in);
-
-	return err;
-}
-
-int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
-					u16 *qkey_viol_cntr)
-{
-	u32 *out;
-	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
-
-	out = mlx5_vzalloc(outlen);
-	if (!out)
-		return -ENOMEM;
-
-	mlx5_query_nic_vport_context(mdev, 0, out, outlen);
-
-	*qkey_viol_cntr = MLX5_GET(query_nic_vport_context_out, out,
-				   nic_vport_context.qkey_violation_counter);
-
-	kfree(out);
-
-	return 0;
-}
-EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_qkey_viol_cntr);
-
-int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
-			     u8 port_num, u16  vf_num, u16 gid_index,
-			     union ib_gid *gid)
-{
-	int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_in);
-	int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
-	int is_group_manager;
-	void *out = NULL;
-	void *in = NULL;
-	union ib_gid *tmp;
-	int tbsz;
-	int nout;
-	int err;
-
-	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
-	tbsz = mlx5_get_gid_table_len(MLX5_CAP_GEN(dev, gid_table_size));
-	mlx5_core_dbg(dev, "vf_num %d, index %d, gid_table_size %d\n",
-		      vf_num, gid_index, tbsz);
-
-	if (gid_index > tbsz && gid_index != 0xffff)
-		return -EINVAL;
-
-	if (gid_index == 0xffff)
-		nout = tbsz;
-	else
-		nout = 1;
-
-	out_sz += nout * sizeof(*gid);
-
-	in = kzalloc(in_sz, GFP_KERNEL);
-	out = kzalloc(out_sz, GFP_KERNEL);
-	if (!in || !out) {
-		err = -ENOMEM;
-		goto out;
-	}
-
-	MLX5_SET(query_hca_vport_gid_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_GID);
-	if (other_vport) {
-		if (is_group_manager) {
-			MLX5_SET(query_hca_vport_gid_in, in, vport_number, vf_num);
-			MLX5_SET(query_hca_vport_gid_in, in, other_vport, 1);
-		} else {
-			err = -EPERM;
-			goto out;
-		}
-	}
-	MLX5_SET(query_hca_vport_gid_in, in, gid_index, gid_index);
-
-	if (MLX5_CAP_GEN(dev, num_ports) == 2)
-		MLX5_SET(query_hca_vport_gid_in, in, port_num, port_num);
-
-	err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
-	if (err)
-		goto out;
-
-	tmp = out + MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
-	gid->global.subnet_prefix = tmp->global.subnet_prefix;
-	gid->global.interface_id = tmp->global.interface_id;
-
-out:
-	kfree(in);
-	kfree(out);
-	return err;
-}
-EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_gid);
-
-int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
-			      u8 port_num, u16 vf_num, u16 pkey_index,
-			      u16 *pkey)
-{
-	int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_in);
-	int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_out);
-	int is_group_manager;
-	void *out = NULL;
-	void *in = NULL;
-	void *pkarr;
-	int nout;
-	int tbsz;
-	int err;
-	int i;
-
-	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
-
-	tbsz = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size));
-	if (pkey_index > tbsz && pkey_index != 0xffff)
-		return -EINVAL;
-
-	if (pkey_index == 0xffff)
-		nout = tbsz;
-	else
-		nout = 1;
-
-	out_sz += nout * MLX5_ST_SZ_BYTES(pkey);
-
-	in = kzalloc(in_sz, GFP_KERNEL);
-	out = kzalloc(out_sz, GFP_KERNEL);
-	if (!in || !out) {
-		err = -ENOMEM;
-		goto out;
-	}
-
-	MLX5_SET(query_hca_vport_pkey_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY);
-	if (other_vport) {
-		if (is_group_manager) {
-			MLX5_SET(query_hca_vport_pkey_in, in, vport_number, vf_num);
-			MLX5_SET(query_hca_vport_pkey_in, in, other_vport, 1);
-		} else {
-			err = -EPERM;
-			goto out;
-		}
-	}
-	MLX5_SET(query_hca_vport_pkey_in, in, pkey_index, pkey_index);
-
-	if (MLX5_CAP_GEN(dev, num_ports) == 2)
-		MLX5_SET(query_hca_vport_pkey_in, in, port_num, port_num);
-
-	err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
-	if (err)
-		goto out;
-
-	pkarr = MLX5_ADDR_OF(query_hca_vport_pkey_out, out, pkey);
-	for (i = 0; i < nout; i++, pkey++, pkarr += MLX5_ST_SZ_BYTES(pkey))
-		*pkey = MLX5_GET_PR(pkey, pkarr, pkey);
-
-out:
-	kfree(in);
-	kfree(out);
-	return err;
-}
-EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_pkey);
-
-int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev,
-				 u8 other_vport, u8 port_num,
-				 u16 vf_num,
-				 struct mlx5_hca_vport_context *rep)
-{
-	int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
-	int in[MLX5_ST_SZ_DW(query_hca_vport_context_in)] = {0};
-	int is_group_manager;
-	void *out;
-	void *ctx;
-	int err;
-
-	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
-
-	out = kzalloc(out_sz, GFP_KERNEL);
-	if (!out)
-		return -ENOMEM;
-
-	MLX5_SET(query_hca_vport_context_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT);
-
-	if (other_vport) {
-		if (is_group_manager) {
-			MLX5_SET(query_hca_vport_context_in, in, other_vport, 1);
-			MLX5_SET(query_hca_vport_context_in, in, vport_number, vf_num);
-		} else {
-			err = -EPERM;
-			goto ex;
-		}
-	}
-
-	if (MLX5_CAP_GEN(dev, num_ports) == 2)
-		MLX5_SET(query_hca_vport_context_in, in, port_num, port_num);
-
-	err = mlx5_cmd_exec(dev, in, sizeof(in), out,  out_sz);
-	if (err)
-		goto ex;
-
-	ctx = MLX5_ADDR_OF(query_hca_vport_context_out, out, hca_vport_context);
-	rep->field_select = MLX5_GET_PR(hca_vport_context, ctx, field_select);
-	rep->sm_virt_aware = MLX5_GET_PR(hca_vport_context, ctx, sm_virt_aware);
-	rep->has_smi = MLX5_GET_PR(hca_vport_context, ctx, has_smi);
-	rep->has_raw = MLX5_GET_PR(hca_vport_context, ctx, has_raw);
-	rep->policy = MLX5_GET_PR(hca_vport_context, ctx, vport_state_policy);
-	rep->phys_state = MLX5_GET_PR(hca_vport_context, ctx,
-				      port_physical_state);
-	rep->vport_state = MLX5_GET_PR(hca_vport_context, ctx, vport_state);
-	rep->port_physical_state = MLX5_GET_PR(hca_vport_context, ctx,
-					       port_physical_state);
-	rep->port_guid = MLX5_GET64_PR(hca_vport_context, ctx, port_guid);
-	rep->node_guid = MLX5_GET64_PR(hca_vport_context, ctx, node_guid);
-	rep->cap_mask1 = MLX5_GET_PR(hca_vport_context, ctx, cap_mask1);
-	rep->cap_mask1_perm = MLX5_GET_PR(hca_vport_context, ctx,
-					  cap_mask1_field_select);
-	rep->cap_mask2 = MLX5_GET_PR(hca_vport_context, ctx, cap_mask2);
-	rep->cap_mask2_perm = MLX5_GET_PR(hca_vport_context, ctx,
-					  cap_mask2_field_select);
-	rep->lid = MLX5_GET_PR(hca_vport_context, ctx, lid);
-	rep->init_type_reply = MLX5_GET_PR(hca_vport_context, ctx,
-					   init_type_reply);
-	rep->lmc = MLX5_GET_PR(hca_vport_context, ctx, lmc);
-	rep->subnet_timeout = MLX5_GET_PR(hca_vport_context, ctx,
-					  subnet_timeout);
-	rep->sm_lid = MLX5_GET_PR(hca_vport_context, ctx, sm_lid);
-	rep->sm_sl = MLX5_GET_PR(hca_vport_context, ctx, sm_sl);
-	rep->qkey_violation_counter = MLX5_GET_PR(hca_vport_context, ctx,
-						  qkey_violation_counter);
-	rep->pkey_violation_counter = MLX5_GET_PR(hca_vport_context, ctx,
-						  pkey_violation_counter);
-	rep->grh_required = MLX5_GET_PR(hca_vport_context, ctx, grh_required);
-	rep->sys_image_guid = MLX5_GET64_PR(hca_vport_context, ctx,
-					    system_image_guid);
-
-ex:
-	kfree(out);
-	return err;
-}
-EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_context);
-
-int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev,
-					   u64 *sys_image_guid)
-{
-	struct mlx5_hca_vport_context *rep;
-	int err;
-
-	rep = kzalloc(sizeof(*rep), GFP_KERNEL);
-	if (!rep)
-		return -ENOMEM;
-
-	err = mlx5_query_hca_vport_context(dev, 0, 1, 0, rep);
-	if (!err)
-		*sys_image_guid = rep->sys_image_guid;
-
-	kfree(rep);
-	return err;
-}
-EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_system_image_guid);
-
-int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev,
-				   u64 *node_guid)
-{
-	struct mlx5_hca_vport_context *rep;
-	int err;
-
-	rep = kzalloc(sizeof(*rep), GFP_KERNEL);
-	if (!rep)
-		return -ENOMEM;
-
-	err = mlx5_query_hca_vport_context(dev, 0, 1, 0, rep);
-	if (!err)
-		*node_guid = rep->node_guid;
-
-	kfree(rep);
-	return err;
-}
-EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_node_guid);
-
-int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev,
-				 u32 vport,
-				 int *promisc_uc,
-				 int *promisc_mc,
-				 int *promisc_all)
-{
-	u32 *out;
-	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
-	int err;
-
-	out = kzalloc(outlen, GFP_KERNEL);
-	if (!out)
-		return -ENOMEM;
-
-	err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
-	if (err)
-		goto out;
-
-	*promisc_uc = MLX5_GET(query_nic_vport_context_out, out,
-			       nic_vport_context.promisc_uc);
-	*promisc_mc = MLX5_GET(query_nic_vport_context_out, out,
-			       nic_vport_context.promisc_mc);
-	*promisc_all = MLX5_GET(query_nic_vport_context_out, out,
-				nic_vport_context.promisc_all);
-
-out:
-	kfree(out);
-	return err;
-}
-EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_promisc);
-
-int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev,
-				  int promisc_uc,
-				  int promisc_mc,
-				  int promisc_all)
-{
-	void *in;
-	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
-	int err;
-
-	in = mlx5_vzalloc(inlen);
-	if (!in) {
-		mlx5_core_err(mdev, "failed to allocate inbox\n");
-		return -ENOMEM;
-	}
-
-	MLX5_SET(modify_nic_vport_context_in, in, field_select.promisc, 1);
-	MLX5_SET(modify_nic_vport_context_in, in,
-		 nic_vport_context.promisc_uc, promisc_uc);
-	MLX5_SET(modify_nic_vport_context_in, in,
-		 nic_vport_context.promisc_mc, promisc_mc);
-	MLX5_SET(modify_nic_vport_context_in, in,
-		 nic_vport_context.promisc_all, promisc_all);
-
-	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
-
-	kvfree(in);
-
-	return err;
-}
-EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_promisc);
-
-enum mlx5_vport_roce_state {
-	MLX5_VPORT_ROCE_DISABLED = 0,
-	MLX5_VPORT_ROCE_ENABLED  = 1,
-};
-
-static int mlx5_nic_vport_update_roce_state(struct mlx5_core_dev *mdev,
-					    enum mlx5_vport_roce_state state)
-{
-	void *in;
-	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
-	int err;
-
-	in = mlx5_vzalloc(inlen);
-	if (!in) {
-		mlx5_core_warn(mdev, "failed to allocate inbox\n");
-		return -ENOMEM;
-	}
-
-	MLX5_SET(modify_nic_vport_context_in, in, field_select.roce_en, 1);
-	MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.roce_en,
-		 state);
-
-	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
-
-	kvfree(in);
-
-	return err;
-}
-
-int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev)
-{
-	return mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_ENABLED);
-}
-EXPORT_SYMBOL_GPL(mlx5_nic_vport_enable_roce);
-
-int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev)
-{
-	return mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_DISABLED);
-}
-EXPORT_SYMBOL_GPL(mlx5_nic_vport_disable_roce);
-
-int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport,
-				  int vf, u8 port_num, void *out,
-				  size_t out_sz)
-{
-	int	in_sz = MLX5_ST_SZ_BYTES(query_vport_counter_in);
-	int	is_group_manager;
-	void   *in;
-	int	err;
-
-	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
-	in = mlx5_vzalloc(in_sz);
-	if (!in) {
-		err = -ENOMEM;
-		return err;
-	}
-
-	MLX5_SET(query_vport_counter_in, in, opcode,
-		 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
-	if (other_vport) {
-		if (is_group_manager) {
-			MLX5_SET(query_vport_counter_in, in, other_vport, 1);
-			MLX5_SET(query_vport_counter_in, in, vport_number, vf + 1);
-		} else {
-			err = -EPERM;
-			goto free;
-		}
-	}
-	if (MLX5_CAP_GEN(dev, num_ports) == 2)
-		MLX5_SET(query_vport_counter_in, in, port_num, port_num);
-
-	err = mlx5_cmd_exec(dev, in, in_sz, out,  out_sz);
-free:
-	kvfree(in);
-	return err;
-}
-EXPORT_SYMBOL_GPL(mlx5_core_query_vport_counter);
-
-int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
-				       u8 other_vport, u8 port_num,
-				       int vf,
-				       struct mlx5_hca_vport_context *req)
-{
-	int in_sz = MLX5_ST_SZ_BYTES(modify_hca_vport_context_in);
-	u8 out[MLX5_ST_SZ_BYTES(modify_hca_vport_context_out)];
-	int is_group_manager;
-	void *in;
-	int err;
-	void *ctx;
-
-	mlx5_core_dbg(dev, "vf %d\n", vf);
-	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
-	in = kzalloc(in_sz, GFP_KERNEL);
-	if (!in)
-		return -ENOMEM;
-
-	memset(out, 0, sizeof(out));
-	MLX5_SET(modify_hca_vport_context_in, in, opcode, MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT);
-	if (other_vport) {
-		if (is_group_manager) {
-			MLX5_SET(modify_hca_vport_context_in, in, other_vport, 1);
-			MLX5_SET(modify_hca_vport_context_in, in, vport_number, vf);
-		} else {
-			err = -EPERM;
-			goto ex;
-		}
-	}
-
-	if (MLX5_CAP_GEN(dev, num_ports) > 1)
-		MLX5_SET(modify_hca_vport_context_in, in, port_num, port_num);
-
-	ctx = MLX5_ADDR_OF(modify_hca_vport_context_in, in, hca_vport_context);
-	MLX5_SET(hca_vport_context, ctx, field_select, req->field_select);
-	MLX5_SET(hca_vport_context, ctx, sm_virt_aware, req->sm_virt_aware);
-	MLX5_SET(hca_vport_context, ctx, has_smi, req->has_smi);
-	MLX5_SET(hca_vport_context, ctx, has_raw, req->has_raw);
-	MLX5_SET(hca_vport_context, ctx, vport_state_policy, req->policy);
-	MLX5_SET(hca_vport_context, ctx, port_physical_state, req->phys_state);
-	MLX5_SET(hca_vport_context, ctx, vport_state, req->vport_state);
-	MLX5_SET64(hca_vport_context, ctx, port_guid, req->port_guid);
-	MLX5_SET64(hca_vport_context, ctx, node_guid, req->node_guid);
-	MLX5_SET(hca_vport_context, ctx, cap_mask1, req->cap_mask1);
-	MLX5_SET(hca_vport_context, ctx, cap_mask1_field_select, req->cap_mask1_perm);
-	MLX5_SET(hca_vport_context, ctx, cap_mask2, req->cap_mask2);
-	MLX5_SET(hca_vport_context, ctx, cap_mask2_field_select, req->cap_mask2_perm);
-	MLX5_SET(hca_vport_context, ctx, lid, req->lid);
-	MLX5_SET(hca_vport_context, ctx, init_type_reply, req->init_type_reply);
-	MLX5_SET(hca_vport_context, ctx, lmc, req->lmc);
-	MLX5_SET(hca_vport_context, ctx, subnet_timeout, req->subnet_timeout);
-	MLX5_SET(hca_vport_context, ctx, sm_lid, req->sm_lid);
-	MLX5_SET(hca_vport_context, ctx, sm_sl, req->sm_sl);
-	MLX5_SET(hca_vport_context, ctx, qkey_violation_counter, req->qkey_violation_counter);
-	MLX5_SET(hca_vport_context, ctx, pkey_violation_counter, req->pkey_violation_counter);
-	err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
-ex:
-	kfree(in);
-	return err;
-}
-EXPORT_SYMBOL_GPL(mlx5_core_modify_hca_vport_context);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
deleted file mode 100644
index 07a9ba6..0000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
+++ /dev/null
@@ -1,182 +0,0 @@
-/*
- * Copyright (c) 2016, Mellanox Technologies, Ltd.  All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/mlx5/driver.h>
-#include "mlx5_core.h"
-#include "vxlan.h"
-
-void mlx5e_vxlan_init(struct mlx5e_priv *priv)
-{
-	struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
-
-	spin_lock_init(&vxlan_db->lock);
-	INIT_RADIX_TREE(&vxlan_db->tree, GFP_ATOMIC);
-}
-
-static int mlx5e_vxlan_core_add_port_cmd(struct mlx5_core_dev *mdev, u16 port)
-{
-	u32 in[MLX5_ST_SZ_DW(add_vxlan_udp_dport_in)]   = {0};
-	u32 out[MLX5_ST_SZ_DW(add_vxlan_udp_dport_out)] = {0};
-
-	MLX5_SET(add_vxlan_udp_dport_in, in, opcode,
-		 MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT);
-	MLX5_SET(add_vxlan_udp_dport_in, in, vxlan_udp_port, port);
-	return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
-}
-
-static int mlx5e_vxlan_core_del_port_cmd(struct mlx5_core_dev *mdev, u16 port)
-{
-	u32 in[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_in)]   = {0};
-	u32 out[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_out)] = {0};
-
-	MLX5_SET(delete_vxlan_udp_dport_in, in, opcode,
-		 MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT);
-	MLX5_SET(delete_vxlan_udp_dport_in, in, vxlan_udp_port, port);
-	return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
-}
-
-struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port)
-{
-	struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
-	struct mlx5e_vxlan *vxlan;
-
-	spin_lock(&vxlan_db->lock);
-	vxlan = radix_tree_lookup(&vxlan_db->tree, port);
-	spin_unlock(&vxlan_db->lock);
-
-	return vxlan;
-}
-
-static void mlx5e_vxlan_add_port(struct work_struct *work)
-{
-	struct mlx5e_vxlan_work *vxlan_work =
-		container_of(work, struct mlx5e_vxlan_work, work);
-	struct mlx5e_priv *priv = vxlan_work->priv;
-	struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
-	u16 port = vxlan_work->port;
-	struct mlx5e_vxlan *vxlan;
-	int err;
-
-	if (mlx5e_vxlan_lookup_port(priv, port))
-		goto free_work;
-
-	if (mlx5e_vxlan_core_add_port_cmd(priv->mdev, port))
-		goto free_work;
-
-	vxlan = kzalloc(sizeof(*vxlan), GFP_KERNEL);
-	if (!vxlan)
-		goto err_delete_port;
-
-	vxlan->udp_port = port;
-
-	spin_lock_irq(&vxlan_db->lock);
-	err = radix_tree_insert(&vxlan_db->tree, vxlan->udp_port, vxlan);
-	spin_unlock_irq(&vxlan_db->lock);
-	if (err)
-		goto err_free;
-
-	goto free_work;
-
-err_free:
-	kfree(vxlan);
-err_delete_port:
-	mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
-free_work:
-	kfree(vxlan_work);
-}
-
-static void __mlx5e_vxlan_core_del_port(struct mlx5e_priv *priv, u16 port)
-{
-	struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
-	struct mlx5e_vxlan *vxlan;
-
-	spin_lock_irq(&vxlan_db->lock);
-	vxlan = radix_tree_delete(&vxlan_db->tree, port);
-	spin_unlock_irq(&vxlan_db->lock);
-
-	if (!vxlan)
-		return;
-
-	mlx5e_vxlan_core_del_port_cmd(priv->mdev, vxlan->udp_port);
-
-	kfree(vxlan);
-}
-
-static void mlx5e_vxlan_del_port(struct work_struct *work)
-{
-	struct mlx5e_vxlan_work *vxlan_work =
-		container_of(work, struct mlx5e_vxlan_work, work);
-	struct mlx5e_priv *priv = vxlan_work->priv;
-	u16 port = vxlan_work->port;
-
-	__mlx5e_vxlan_core_del_port(priv, port);
-
-	kfree(vxlan_work);
-}
-
-void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, sa_family_t sa_family,
-			    u16 port, int add)
-{
-	struct mlx5e_vxlan_work *vxlan_work;
-
-	vxlan_work = kmalloc(sizeof(*vxlan_work), GFP_ATOMIC);
-	if (!vxlan_work)
-		return;
-
-	if (add)
-		INIT_WORK(&vxlan_work->work, mlx5e_vxlan_add_port);
-	else
-		INIT_WORK(&vxlan_work->work, mlx5e_vxlan_del_port);
-
-	vxlan_work->priv = priv;
-	vxlan_work->port = port;
-	vxlan_work->sa_family = sa_family;
-	queue_work(priv->wq, &vxlan_work->work);
-}
-
-void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv)
-{
-	struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
-	struct mlx5e_vxlan *vxlan;
-	unsigned int port = 0;
-
-	spin_lock_irq(&vxlan_db->lock);
-	while (radix_tree_gang_lookup(&vxlan_db->tree, (void **)&vxlan, port, 1)) {
-		port = vxlan->udp_port;
-		spin_unlock_irq(&vxlan_db->lock);
-		__mlx5e_vxlan_core_del_port(priv, (u16)port);
-		spin_lock_irq(&vxlan_db->lock);
-	}
-	spin_unlock_irq(&vxlan_db->lock);
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
deleted file mode 100644
index 5def12c..0000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright (c) 2016, Mellanox Technologies, Ltd.  All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef __MLX5_VXLAN_H__
-#define __MLX5_VXLAN_H__
-
-#include <linux/mlx5/driver.h>
-#include "en.h"
-
-struct mlx5e_vxlan {
-	u16 udp_port;
-};
-
-struct mlx5e_vxlan_work {
-	struct work_struct	work;
-	struct mlx5e_priv	*priv;
-	sa_family_t		sa_family;
-	u16			port;
-};
-
-static inline bool mlx5e_vxlan_allowed(struct mlx5_core_dev *mdev)
-{
-	return (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) &&
-		mlx5_core_is_pf(mdev));
-}
-
-void mlx5e_vxlan_init(struct mlx5e_priv *priv);
-void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv);
-
-void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, sa_family_t sa_family,
-			    u16 port, int add);
-struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port);
-
-#endif /* __MLX5_VXLAN_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
deleted file mode 100644
index 921673c..0000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c
+++ /dev/null
@@ -1,196 +0,0 @@
-/*
- * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/mlx5/driver.h>
-#include "wq.h"
-#include "mlx5_core.h"
-
-u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq)
-{
-	return (u32)wq->sz_m1 + 1;
-}
-
-u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq)
-{
-	return wq->sz_m1 + 1;
-}
-
-u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq)
-{
-	return (u32)wq->sz_m1 + 1;
-}
-
-static u32 mlx5_wq_cyc_get_byte_size(struct mlx5_wq_cyc *wq)
-{
-	return mlx5_wq_cyc_get_size(wq) << wq->log_stride;
-}
-
-static u32 mlx5_cqwq_get_byte_size(struct mlx5_cqwq *wq)
-{
-	return mlx5_cqwq_get_size(wq) << wq->log_stride;
-}
-
-static u32 mlx5_wq_ll_get_byte_size(struct mlx5_wq_ll *wq)
-{
-	return mlx5_wq_ll_get_size(wq) << wq->log_stride;
-}
-
-int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
-		       void *wqc, struct mlx5_wq_cyc *wq,
-		       struct mlx5_wq_ctrl *wq_ctrl)
-{
-	int err;
-
-	wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride);
-	wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1;
-
-	err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
-	if (err) {
-		mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
-		return err;
-	}
-
-	err = mlx5_buf_alloc_node(mdev, mlx5_wq_cyc_get_byte_size(wq),
-				  &wq_ctrl->buf, param->buf_numa_node);
-	if (err) {
-		mlx5_core_warn(mdev, "mlx5_buf_alloc_node() failed, %d\n", err);
-		goto err_db_free;
-	}
-
-	wq->buf = wq_ctrl->buf.direct.buf;
-	wq->db  = wq_ctrl->db.db;
-
-	wq_ctrl->mdev = mdev;
-
-	return 0;
-
-err_db_free:
-	mlx5_db_free(mdev, &wq_ctrl->db);
-
-	return err;
-}
-
-int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
-		     void *cqc, struct mlx5_cqwq *wq,
-		     struct mlx5_frag_wq_ctrl *wq_ctrl)
-{
-	int err;
-
-	wq->log_stride	= 6 + MLX5_GET(cqc, cqc, cqe_sz);
-	wq->log_sz	= MLX5_GET(cqc, cqc, log_cq_size);
-	wq->sz_m1	= (1 << wq->log_sz) - 1;
-	wq->log_frag_strides = PAGE_SHIFT - wq->log_stride;
-	wq->frag_sz_m1	= (1 << wq->log_frag_strides) - 1;
-
-	err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
-	if (err) {
-		mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
-		return err;
-	}
-
-	err = mlx5_frag_buf_alloc_node(mdev, mlx5_cqwq_get_byte_size(wq),
-				       &wq_ctrl->frag_buf,
-				       param->buf_numa_node);
-	if (err) {
-		mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n",
-			       err);
-		goto err_db_free;
-	}
-
-	wq->frag_buf = wq_ctrl->frag_buf;
-	wq->db  = wq_ctrl->db.db;
-
-	wq_ctrl->mdev = mdev;
-
-	return 0;
-
-err_db_free:
-	mlx5_db_free(mdev, &wq_ctrl->db);
-
-	return err;
-}
-
-int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
-		      void *wqc, struct mlx5_wq_ll *wq,
-		      struct mlx5_wq_ctrl *wq_ctrl)
-{
-	struct mlx5_wqe_srq_next_seg *next_seg;
-	int err;
-	int i;
-
-	wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride);
-	wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1;
-
-	err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
-	if (err) {
-		mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
-		return err;
-	}
-
-	err = mlx5_buf_alloc_node(mdev, mlx5_wq_ll_get_byte_size(wq),
-				  &wq_ctrl->buf, param->buf_numa_node);
-	if (err) {
-		mlx5_core_warn(mdev, "mlx5_buf_alloc_node() failed, %d\n", err);
-		goto err_db_free;
-	}
-
-	wq->buf = wq_ctrl->buf.direct.buf;
-	wq->db  = wq_ctrl->db.db;
-
-	for (i = 0; i < wq->sz_m1; i++) {
-		next_seg = mlx5_wq_ll_get_wqe(wq, i);
-		next_seg->next_wqe_index = cpu_to_be16(i + 1);
-	}
-	next_seg = mlx5_wq_ll_get_wqe(wq, i);
-	wq->tail_next = &next_seg->next_wqe_index;
-
-	wq_ctrl->mdev = mdev;
-
-	return 0;
-
-err_db_free:
-	mlx5_db_free(mdev, &wq_ctrl->db);
-
-	return err;
-}
-
-void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl)
-{
-	mlx5_buf_free(wq_ctrl->mdev, &wq_ctrl->buf);
-	mlx5_db_free(wq_ctrl->mdev, &wq_ctrl->db);
-}
-
-void mlx5_cqwq_destroy(struct mlx5_frag_wq_ctrl *wq_ctrl)
-{
-	mlx5_frag_buf_free(wq_ctrl->mdev, &wq_ctrl->frag_buf);
-	mlx5_db_free(wq_ctrl->mdev, &wq_ctrl->db);
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
deleted file mode 100644
index d8afed8..0000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h
+++ /dev/null
@@ -1,184 +0,0 @@
-/*
- * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef __MLX5_WQ_H__
-#define __MLX5_WQ_H__
-
-#include <linux/mlx5/mlx5_ifc.h>
-
-struct mlx5_wq_param {
-	int		linear;
-	int		buf_numa_node;
-	int		db_numa_node;
-};
-
-struct mlx5_wq_ctrl {
-	struct mlx5_core_dev	*mdev;
-	struct mlx5_buf		buf;
-	struct mlx5_db		db;
-};
-
-struct mlx5_frag_wq_ctrl {
-	struct mlx5_core_dev	*mdev;
-	struct mlx5_frag_buf	frag_buf;
-	struct mlx5_db		db;
-};
-
-struct mlx5_wq_cyc {
-	void			*buf;
-	__be32			*db;
-	u16			sz_m1;
-	u8			log_stride;
-};
-
-struct mlx5_cqwq {
-	struct mlx5_frag_buf	frag_buf;
-	__be32			*db;
-	u32			sz_m1;
-	u32			frag_sz_m1;
-	u32			cc; /* consumer counter */
-	u8			log_sz;
-	u8			log_stride;
-	u8			log_frag_strides;
-};
-
-struct mlx5_wq_ll {
-	void			*buf;
-	__be32			*db;
-	__be16			*tail_next;
-	u16			sz_m1;
-	u16			head;
-	u16			wqe_ctr;
-	u16			cur_sz;
-	u8			log_stride;
-};
-
-int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
-		       void *wqc, struct mlx5_wq_cyc *wq,
-		       struct mlx5_wq_ctrl *wq_ctrl);
-u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq);
-
-int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
-		     void *cqc, struct mlx5_cqwq *wq,
-		     struct mlx5_frag_wq_ctrl *wq_ctrl);
-u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq);
-
-int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
-		      void *wqc, struct mlx5_wq_ll *wq,
-		      struct mlx5_wq_ctrl *wq_ctrl);
-u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq);
-
-void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl);
-void mlx5_cqwq_destroy(struct mlx5_frag_wq_ctrl *wq_ctrl);
-
-static inline u16 mlx5_wq_cyc_ctr2ix(struct mlx5_wq_cyc *wq, u16 ctr)
-{
-	return ctr & wq->sz_m1;
-}
-
-static inline void *mlx5_wq_cyc_get_wqe(struct mlx5_wq_cyc *wq, u16 ix)
-{
-	return wq->buf + (ix << wq->log_stride);
-}
-
-static inline int mlx5_wq_cyc_cc_bigger(u16 cc1, u16 cc2)
-{
-	int equal   = (cc1 == cc2);
-	int smaller = 0x8000 & (cc1 - cc2);
-
-	return !equal && !smaller;
-}
-
-static inline u32 mlx5_cqwq_get_ci(struct mlx5_cqwq *wq)
-{
-	return wq->cc & wq->sz_m1;
-}
-
-static inline void *mlx5_cqwq_get_wqe(struct mlx5_cqwq *wq, u32 ix)
-{
-	unsigned int frag = (ix >> wq->log_frag_strides);
-
-	return wq->frag_buf.frags[frag].buf +
-		((wq->frag_sz_m1 & ix) << wq->log_stride);
-}
-
-static inline u32 mlx5_cqwq_get_wrap_cnt(struct mlx5_cqwq *wq)
-{
-	return wq->cc >> wq->log_sz;
-}
-
-static inline void mlx5_cqwq_pop(struct mlx5_cqwq *wq)
-{
-	wq->cc++;
-}
-
-static inline void mlx5_cqwq_update_db_record(struct mlx5_cqwq *wq)
-{
-	*wq->db = cpu_to_be32(wq->cc & 0xffffff);
-}
-
-static inline int mlx5_wq_ll_is_full(struct mlx5_wq_ll *wq)
-{
-	return wq->cur_sz == wq->sz_m1;
-}
-
-static inline int mlx5_wq_ll_is_empty(struct mlx5_wq_ll *wq)
-{
-	return !wq->cur_sz;
-}
-
-static inline void *mlx5_wq_ll_get_wqe(struct mlx5_wq_ll *wq, u16 ix)
-{
-	return wq->buf + (ix << wq->log_stride);
-}
-
-static inline void mlx5_wq_ll_push(struct mlx5_wq_ll *wq, u16 head_next)
-{
-	wq->head = head_next;
-	wq->wqe_ctr++;
-	wq->cur_sz++;
-}
-
-static inline void mlx5_wq_ll_pop(struct mlx5_wq_ll *wq, __be16 ix,
-				  __be16 *next_tail_next)
-{
-	*wq->tail_next = ix;
-	wq->tail_next = next_tail_next;
-	wq->cur_sz--;
-}
-
-static inline void mlx5_wq_ll_update_db_record(struct mlx5_wq_ll *wq)
-{
-	*wq->db = cpu_to_be32(wq->wqe_ctr);
-}
-
-#endif /* __MLX5_WQ_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/debugfs.c
new file mode 100644
index 0000000..0dfa686
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/debugfs.c
@@ -0,0 +1,614 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include "lib/qp.h"
+#include "lib/cq.h"
+#include "lib/driver.h"
+#include "mlx5.h"
+
+enum {
+	QP_PID,
+	QP_STATE,
+	QP_XPORT,
+	QP_MTU,
+	QP_N_RECV,
+	QP_RECV_SZ,
+	QP_N_SEND,
+	QP_LOG_PG_SZ,
+	QP_RQPN,
+};
+
+static char *qp_fields[] = {
+	[QP_PID]	= "pid",
+	[QP_STATE]	= "state",
+	[QP_XPORT]	= "transport",
+	[QP_MTU]	= "mtu",
+	[QP_N_RECV]	= "num_recv",
+	[QP_RECV_SZ]	= "rcv_wqe_sz",
+	[QP_N_SEND]	= "num_send",
+	[QP_LOG_PG_SZ]	= "log2_page_sz",
+	[QP_RQPN]	= "remote_qpn",
+};
+
+enum {
+	EQ_NUM_EQES,
+	EQ_INTR,
+	EQ_LOG_PG_SZ,
+};
+
+static char *eq_fields[] = {
+	[EQ_NUM_EQES]	= "num_eqes",
+	[EQ_INTR]	= "intr",
+	[EQ_LOG_PG_SZ]	= "log_page_size",
+};
+
+enum {
+	CQ_PID,
+	CQ_NUM_CQES,
+	CQ_LOG_PG_SZ,
+};
+
+static char *cq_fields[] = {
+	[CQ_PID]	= "pid",
+	[CQ_NUM_CQES]	= "num_cqes",
+	[CQ_LOG_PG_SZ]	= "log_page_size",
+};
+
+struct dentry *mlx5_debugfs_root;
+EXPORT_SYMBOL(mlx5_debugfs_root);
+
+void mlx5_register_debugfs(void)
+{
+	mlx5_debugfs_root = debugfs_create_dir("mlx5", NULL);
+	if (IS_ERR_OR_NULL(mlx5_debugfs_root))
+		mlx5_debugfs_root = NULL;
+}
+
+void mlx5_unregister_debugfs(void)
+{
+	debugfs_remove(mlx5_debugfs_root);
+}
+
+int mlx5_qp_debugfs_init(struct mlx5_core_dev *dev)
+{
+	if (!mlx5_debugfs_root)
+		return 0;
+
+	atomic_set(&dev->num_qps, 0);
+
+	dev->priv.qp_debugfs = debugfs_create_dir("QPs",  dev->priv.dbg_root);
+	if (!dev->priv.qp_debugfs)
+		return -ENOMEM;
+
+	return 0;
+}
+
+void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev)
+{
+	if (!mlx5_debugfs_root)
+		return;
+
+	debugfs_remove_recursive(dev->priv.qp_debugfs);
+}
+
+int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev)
+{
+	if (!mlx5_debugfs_root)
+		return 0;
+
+	dev->priv.eq_debugfs = debugfs_create_dir("EQs",  dev->priv.dbg_root);
+	if (!dev->priv.eq_debugfs)
+		return -ENOMEM;
+
+	return 0;
+}
+
+void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev)
+{
+	if (!mlx5_debugfs_root)
+		return;
+
+	debugfs_remove_recursive(dev->priv.eq_debugfs);
+}
+
+static ssize_t average_read(struct file *filp, char __user *buf, size_t count,
+			    loff_t *pos)
+{
+	struct mlx5_cmd_stats *stats;
+	u64 field = 0;
+	int ret;
+	char tbuf[22];
+
+	if (*pos)
+		return 0;
+
+	stats = filp->private_data;
+	spin_lock_irq(&stats->lock);
+	if (stats->n)
+		field = div64_u64(stats->sum, stats->n);
+	spin_unlock_irq(&stats->lock);
+	ret = snprintf(tbuf, sizeof(tbuf), "%llu\n", field);
+	if (ret > 0) {
+		if (copy_to_user(buf, tbuf, ret))
+			return -EFAULT;
+	}
+
+	*pos += ret;
+	return ret;
+}
+
+
+static ssize_t average_write(struct file *filp, const char __user *buf,
+			     size_t count, loff_t *pos)
+{
+	struct mlx5_cmd_stats *stats;
+
+	stats = filp->private_data;
+	spin_lock_irq(&stats->lock);
+	stats->sum = 0;
+	stats->n = 0;
+	spin_unlock_irq(&stats->lock);
+
+	*pos += count;
+
+	return count;
+}
+
+static const struct file_operations stats_fops = {
+	.owner	= THIS_MODULE,
+	.open	= simple_open,
+	.read	= average_read,
+	.write	= average_write,
+};
+
+int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev)
+{
+	struct mlx5_cmd_stats *stats;
+	struct dentry **cmd;
+	const char *namep;
+	int err;
+	int i;
+
+	if (!mlx5_debugfs_root)
+		return 0;
+
+	cmd = &dev->priv.cmdif_debugfs;
+	*cmd = debugfs_create_dir("commands", dev->priv.dbg_root);
+	if (!*cmd)
+		return -ENOMEM;
+
+	for (i = 0; i < ARRAY_SIZE(dev->cmd.stats); i++) {
+		stats = &dev->cmd.stats[i];
+		namep = mlx5_command_str(i);
+		if (strcmp(namep, "unknown command opcode")) {
+			stats->root = debugfs_create_dir(namep, *cmd);
+			if (!stats->root) {
+				mlx5_core_warn(dev, "failed adding command %d\n",
+					       i);
+				err = -ENOMEM;
+				goto out;
+			}
+
+			stats->avg = debugfs_create_file("average", 0400,
+							 stats->root, stats,
+							 &stats_fops);
+			if (!stats->avg) {
+				mlx5_core_warn(dev, "failed creating debugfs file\n");
+				err = -ENOMEM;
+				goto out;
+			}
+
+			stats->count = debugfs_create_u64("n", 0400,
+							  stats->root,
+							  &stats->n);
+			if (!stats->count) {
+				mlx5_core_warn(dev, "failed creating debugfs file\n");
+				err = -ENOMEM;
+				goto out;
+			}
+		}
+	}
+
+	return 0;
+out:
+	debugfs_remove_recursive(dev->priv.cmdif_debugfs);
+	return err;
+}
+
+void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev)
+{
+	if (!mlx5_debugfs_root)
+		return;
+
+	debugfs_remove_recursive(dev->priv.cmdif_debugfs);
+}
+
+int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev)
+{
+	if (!mlx5_debugfs_root)
+		return 0;
+
+	dev->priv.cq_debugfs = debugfs_create_dir("CQs",  dev->priv.dbg_root);
+	if (!dev->priv.cq_debugfs)
+		return -ENOMEM;
+
+	return 0;
+}
+
+void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev)
+{
+	if (!mlx5_debugfs_root)
+		return;
+
+	debugfs_remove_recursive(dev->priv.cq_debugfs);
+}
+
+static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
+			 int index, int *is_str)
+{
+	int outlen = MLX5_ST_SZ_BYTES(query_qp_out);
+	struct mlx5_qp_context *ctx;
+	u64 param = 0;
+	u32 *out;
+	int err;
+	int no_sq;
+
+	out = kzalloc(outlen, GFP_KERNEL);
+	if (!out)
+		return param;
+
+	err = mlx5_core_qp_query(dev, qp, out, outlen);
+	if (err) {
+		mlx5_core_warn(dev, "failed to query qp err=%d\n", err);
+		goto out;
+	}
+
+	*is_str = 0;
+
+	/* FIXME: use MLX5_GET rather than mlx5_qp_context manual struct */
+	ctx = (struct mlx5_qp_context *)MLX5_ADDR_OF(query_qp_out, out, qpc);
+
+	switch (index) {
+	case QP_PID:
+		param = qp->pid;
+		break;
+	case QP_STATE:
+		param = (unsigned long)mlx5_qp_state_str(be32_to_cpu(ctx->flags) >> 28);
+		*is_str = 1;
+		break;
+	case QP_XPORT:
+		param = (unsigned long)mlx5_qp_type_str((be32_to_cpu(ctx->flags) >> 16) & 0xff);
+		*is_str = 1;
+		break;
+	case QP_MTU:
+		switch (ctx->mtu_msgmax >> 5) {
+		case IB_MTU_256:
+			param = 256;
+			break;
+		case IB_MTU_512:
+			param = 512;
+			break;
+		case IB_MTU_1024:
+			param = 1024;
+			break;
+		case IB_MTU_2048:
+			param = 2048;
+			break;
+		case IB_MTU_4096:
+			param = 4096;
+			break;
+		default:
+			param = 0;
+		}
+		break;
+	case QP_N_RECV:
+		param = 1 << ((ctx->rq_size_stride >> 3) & 0xf);
+		break;
+	case QP_RECV_SZ:
+		param = 1 << ((ctx->rq_size_stride & 7) + 4);
+		break;
+	case QP_N_SEND:
+		no_sq = be16_to_cpu(ctx->sq_crq_size) >> 15;
+		if (!no_sq)
+			param = 1 << (be16_to_cpu(ctx->sq_crq_size) >> 11);
+		else
+			param = 0;
+		break;
+	case QP_LOG_PG_SZ:
+		param = (be32_to_cpu(ctx->log_pg_sz_remote_qpn) >> 24) & 0x1f;
+		param += 12;
+		break;
+	case QP_RQPN:
+		param = be32_to_cpu(ctx->log_pg_sz_remote_qpn) & 0xffffff;
+		break;
+	}
+
+out:
+	kfree(out);
+	return param;
+}
+
+static u64 eq_read_field(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
+			 int index)
+{
+	int outlen = MLX5_ST_SZ_BYTES(query_eq_out);
+	u64 param = 0;
+	void *ctx;
+	u32 *out;
+	int err;
+
+	out = kzalloc(outlen, GFP_KERNEL);
+	if (!out)
+		return param;
+
+	err = mlx5_core_eq_query(dev, eq, out, outlen);
+	if (err) {
+		mlx5_core_warn(dev, "failed to query eq\n");
+		goto out;
+	}
+	ctx = MLX5_ADDR_OF(query_eq_out, out, eq_context_entry);
+
+	switch (index) {
+	case EQ_NUM_EQES:
+		param = 1 << MLX5_GET(eqc, ctx, log_eq_size);
+		break;
+	case EQ_INTR:
+		param = MLX5_GET(eqc, ctx, intr);
+		break;
+	case EQ_LOG_PG_SZ:
+		param = MLX5_GET(eqc, ctx, log_page_size) + 12;
+		break;
+	}
+
+out:
+	kfree(out);
+	return param;
+}
+
+static u64 cq_read_field(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
+			 int index)
+{
+	int outlen = MLX5_ST_SZ_BYTES(query_cq_out);
+	u64 param = 0;
+	void *ctx;
+	u32 *out;
+	int err;
+
+	out = mlx5_vzalloc(outlen);
+	if (!out)
+		return param;
+
+	err = mlx5_core_query_cq(dev, cq, out, outlen);
+	if (err) {
+		mlx5_core_warn(dev, "failed to query cq\n");
+		goto out;
+	}
+	ctx = MLX5_ADDR_OF(query_cq_out, out, cq_context);
+
+	switch (index) {
+	case CQ_PID:
+		param = cq->pid;
+		break;
+	case CQ_NUM_CQES:
+		param = 1 << MLX5_GET(cqc, ctx, log_cq_size);
+		break;
+	case CQ_LOG_PG_SZ:
+		param = MLX5_GET(cqc, ctx, log_page_size);
+		break;
+	}
+
+out:
+	kvfree(out);
+	return param;
+}
+
+static ssize_t dbg_read(struct file *filp, char __user *buf, size_t count,
+			loff_t *pos)
+{
+	struct mlx5_field_desc *desc;
+	struct mlx5_rsc_debug *d;
+	char tbuf[18];
+	int is_str = 0;
+	u64 field;
+	int ret;
+
+	if (*pos)
+		return 0;
+
+	desc = filp->private_data;
+	d = (void *)(desc - desc->i) - sizeof(*d);
+	switch (d->type) {
+	case MLX5_DBG_RSC_QP:
+		field = qp_read_field(d->dev, d->object, desc->i, &is_str);
+		break;
+
+	case MLX5_DBG_RSC_EQ:
+		field = eq_read_field(d->dev, d->object, desc->i);
+		break;
+
+	case MLX5_DBG_RSC_CQ:
+		field = cq_read_field(d->dev, d->object, desc->i);
+		break;
+
+	default:
+		mlx5_core_warn(d->dev, "invalid resource type %d\n", d->type);
+		return -EINVAL;
+	}
+
+
+	if (is_str)
+		ret = snprintf(tbuf, sizeof(tbuf), "%s\n", (const char *)(unsigned long)field);
+	else
+		ret = snprintf(tbuf, sizeof(tbuf), "0x%llx\n", field);
+
+	if (ret > 0) {
+		if (copy_to_user(buf, tbuf, ret))
+			return -EFAULT;
+	}
+
+	*pos += ret;
+	return ret;
+}
+
+static const struct file_operations fops = {
+	.owner	= THIS_MODULE,
+	.open	= simple_open,
+	.read	= dbg_read,
+};
+
+static int add_res_tree(struct mlx5_core_dev *dev, enum dbg_rsc_type type,
+			struct dentry *root, struct mlx5_rsc_debug **dbg,
+			int rsn, char **field, int nfile, void *data)
+{
+	struct mlx5_rsc_debug *d;
+	char resn[32];
+	int err;
+	int i;
+
+	d = kzalloc(sizeof(*d) + nfile * sizeof(d->fields[0]), GFP_KERNEL);
+	if (!d)
+		return -ENOMEM;
+
+	d->dev = dev;
+	d->object = data;
+	d->type = type;
+	sprintf(resn, "0x%x", rsn);
+	d->root = debugfs_create_dir(resn,  root);
+	if (!d->root) {
+		err = -ENOMEM;
+		goto out_free;
+	}
+
+	for (i = 0; i < nfile; i++) {
+		d->fields[i].i = i;
+		d->fields[i].dent = debugfs_create_file(field[i], 0400,
+							d->root, &d->fields[i],
+							&fops);
+		if (!d->fields[i].dent) {
+			err = -ENOMEM;
+			goto out_rem;
+		}
+	}
+	*dbg = d;
+
+	return 0;
+out_rem:
+	debugfs_remove_recursive(d->root);
+
+out_free:
+	kfree(d);
+	return err;
+}
+
+static void rem_res_tree(struct mlx5_rsc_debug *d)
+{
+	debugfs_remove_recursive(d->root);
+	kfree(d);
+}
+
+int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp)
+{
+	int err;
+
+	if (!mlx5_debugfs_root)
+		return 0;
+
+	err = add_res_tree(dev, MLX5_DBG_RSC_QP, dev->priv.qp_debugfs,
+			   &qp->dbg, qp->qpn, qp_fields,
+			   ARRAY_SIZE(qp_fields), qp);
+	if (err)
+		qp->dbg = NULL;
+
+	return err;
+}
+
+void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp)
+{
+	if (!mlx5_debugfs_root)
+		return;
+
+	if (qp->dbg)
+		rem_res_tree(qp->dbg);
+}
+
+
+int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
+{
+	int err;
+
+	if (!mlx5_debugfs_root)
+		return 0;
+
+	err = add_res_tree(dev, MLX5_DBG_RSC_EQ, dev->priv.eq_debugfs,
+			   &eq->dbg, eq->eqn, eq_fields,
+			   ARRAY_SIZE(eq_fields), eq);
+	if (err)
+		eq->dbg = NULL;
+
+	return err;
+}
+
+void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
+{
+	if (!mlx5_debugfs_root)
+		return;
+
+	if (eq->dbg)
+		rem_res_tree(eq->dbg);
+}
+
+int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
+{
+	int err;
+
+	if (!mlx5_debugfs_root)
+		return 0;
+
+	err = add_res_tree(dev, MLX5_DBG_RSC_CQ, dev->priv.cq_debugfs,
+			   &cq->dbg, cq->cqn, cq_fields,
+			   ARRAY_SIZE(cq_fields), cq);
+	if (err)
+		cq->dbg = NULL;
+
+	return err;
+}
+
+void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
+{
+	if (!mlx5_debugfs_root)
+		return;
+
+	if (cq->dbg)
+		rem_res_tree(cq->dbg);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/dev.c b/drivers/net/ethernet/mellanox/mlx5/dev.c
new file mode 100644
index 0000000..3d43b1e
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/dev.c
@@ -0,0 +1,378 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "lib/driver.h"
+#include "mlx5.h"
+
+static LIST_HEAD(intf_list);
+static LIST_HEAD(mlx5_dev_list);
+/* intf dev list mutex */
+static DEFINE_MUTEX(mlx5_intf_mutex);
+
+struct mlx5_device_context {
+	struct list_head	list;
+	struct mlx5_interface  *intf;
+	void		       *context;
+	unsigned long		state;
+};
+
+enum {
+	MLX5_INTERFACE_ADDED,
+	MLX5_INTERFACE_ATTACHED,
+};
+
+void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
+{
+	struct mlx5_device_context *dev_ctx;
+	struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
+
+	if (!mlx5_lag_intf_add(intf, priv))
+		return;
+
+	dev_ctx = kzalloc(sizeof(*dev_ctx), GFP_KERNEL);
+	if (!dev_ctx)
+		return;
+
+	dev_ctx->intf = intf;
+	dev_ctx->context = intf->add(dev);
+	set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
+	if (intf->attach)
+		set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
+
+	if (dev_ctx->context) {
+		spin_lock_irq(&priv->ctx_lock);
+		list_add_tail(&dev_ctx->list, &priv->ctx_list);
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+		if (dev_ctx->intf->pfault) {
+			if (priv->pfault) {
+				mlx5_core_err(dev, "multiple page fault handlers not supported");
+			} else {
+				priv->pfault_ctx = dev_ctx->context;
+				priv->pfault = dev_ctx->intf->pfault;
+			}
+		}
+#endif
+		spin_unlock_irq(&priv->ctx_lock);
+	} else {
+		kfree(dev_ctx);
+	}
+}
+
+static struct mlx5_device_context *mlx5_get_device(struct mlx5_interface *intf,
+						   struct mlx5_priv *priv)
+{
+	struct mlx5_device_context *dev_ctx;
+
+	list_for_each_entry(dev_ctx, &priv->ctx_list, list)
+		if (dev_ctx->intf == intf)
+			return dev_ctx;
+	return NULL;
+}
+
+void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
+{
+	struct mlx5_device_context *dev_ctx;
+	struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
+
+	dev_ctx = mlx5_get_device(intf, priv);
+	if (!dev_ctx)
+		return;
+
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+	spin_lock_irq(&priv->ctx_lock);
+	if (priv->pfault == dev_ctx->intf->pfault)
+		priv->pfault = NULL;
+	spin_unlock_irq(&priv->ctx_lock);
+
+	synchronize_srcu(&priv->pfault_srcu);
+#endif
+
+	spin_lock_irq(&priv->ctx_lock);
+	list_del(&dev_ctx->list);
+	spin_unlock_irq(&priv->ctx_lock);
+
+	if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
+		intf->remove(dev, dev_ctx->context);
+
+	kfree(dev_ctx);
+}
+
+static void mlx5_attach_interface(struct mlx5_interface *intf, struct mlx5_priv *priv)
+{
+	struct mlx5_device_context *dev_ctx;
+	struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
+
+	dev_ctx = mlx5_get_device(intf, priv);
+	if (!dev_ctx)
+		return;
+
+	if (intf->attach) {
+		if (test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state))
+			return;
+		intf->attach(dev, dev_ctx->context);
+		set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
+	} else {
+		if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
+			return;
+		dev_ctx->context = intf->add(dev);
+		set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
+	}
+}
+
+void mlx5_attach_device(struct mlx5_core_dev *dev)
+{
+	struct mlx5_priv *priv = &dev->priv;
+	struct mlx5_interface *intf;
+
+	mutex_lock(&mlx5_intf_mutex);
+	list_for_each_entry(intf, &intf_list, list)
+		mlx5_attach_interface(intf, priv);
+	mutex_unlock(&mlx5_intf_mutex);
+}
+
+static void mlx5_detach_interface(struct mlx5_interface *intf, struct mlx5_priv *priv)
+{
+	struct mlx5_device_context *dev_ctx;
+	struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
+
+	dev_ctx = mlx5_get_device(intf, priv);
+	if (!dev_ctx)
+		return;
+
+	if (intf->detach) {
+		if (!test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state))
+			return;
+		intf->detach(dev, dev_ctx->context);
+		clear_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
+	} else {
+		if (!test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
+			return;
+		intf->remove(dev, dev_ctx->context);
+		clear_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
+	}
+}
+
+void mlx5_detach_device(struct mlx5_core_dev *dev)
+{
+	struct mlx5_priv *priv = &dev->priv;
+	struct mlx5_interface *intf;
+
+	mutex_lock(&mlx5_intf_mutex);
+	list_for_each_entry(intf, &intf_list, list)
+		mlx5_detach_interface(intf, priv);
+	mutex_unlock(&mlx5_intf_mutex);
+}
+
+bool mlx5_device_registered(struct mlx5_core_dev *dev)
+{
+	struct mlx5_priv *priv;
+	bool found = false;
+
+	mutex_lock(&mlx5_intf_mutex);
+	list_for_each_entry(priv, &mlx5_dev_list, dev_list)
+		if (priv == &dev->priv)
+			found = true;
+	mutex_unlock(&mlx5_intf_mutex);
+
+	return found;
+}
+
+int mlx5_register_device(struct mlx5_core_dev *dev)
+{
+	struct mlx5_priv *priv = &dev->priv;
+	struct mlx5_interface *intf;
+
+	mutex_lock(&mlx5_intf_mutex);
+	list_add_tail(&priv->dev_list, &mlx5_dev_list);
+	list_for_each_entry(intf, &intf_list, list)
+		mlx5_add_device(intf, priv);
+	mutex_unlock(&mlx5_intf_mutex);
+
+	return 0;
+}
+
+void mlx5_unregister_device(struct mlx5_core_dev *dev)
+{
+	struct mlx5_priv *priv = &dev->priv;
+	struct mlx5_interface *intf;
+
+	mutex_lock(&mlx5_intf_mutex);
+	list_for_each_entry(intf, &intf_list, list)
+		mlx5_remove_device(intf, priv);
+	list_del(&priv->dev_list);
+	mutex_unlock(&mlx5_intf_mutex);
+}
+
+int mlx5_register_interface(struct mlx5_interface *intf)
+{
+	struct mlx5_priv *priv;
+
+	if (!intf->add || !intf->remove)
+		return -EINVAL;
+
+	mutex_lock(&mlx5_intf_mutex);
+	list_add_tail(&intf->list, &intf_list);
+	list_for_each_entry(priv, &mlx5_dev_list, dev_list)
+		mlx5_add_device(intf, priv);
+	mutex_unlock(&mlx5_intf_mutex);
+
+	return 0;
+}
+EXPORT_SYMBOL(mlx5_register_interface);
+
+void mlx5_unregister_interface(struct mlx5_interface *intf)
+{
+	struct mlx5_priv *priv;
+
+	mutex_lock(&mlx5_intf_mutex);
+	list_for_each_entry(priv, &mlx5_dev_list, dev_list)
+		mlx5_remove_device(intf, priv);
+	list_del(&intf->list);
+	mutex_unlock(&mlx5_intf_mutex);
+}
+EXPORT_SYMBOL(mlx5_unregister_interface);
+
+void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol)
+{
+	struct mlx5_priv *priv = &mdev->priv;
+	struct mlx5_device_context *dev_ctx;
+	unsigned long flags;
+	void *result = NULL;
+
+	spin_lock_irqsave(&priv->ctx_lock, flags);
+
+	list_for_each_entry(dev_ctx, &mdev->priv.ctx_list, list)
+		if ((dev_ctx->intf->protocol == protocol) &&
+		    dev_ctx->intf->get_dev) {
+			result = dev_ctx->intf->get_dev(dev_ctx->context);
+			break;
+		}
+
+	spin_unlock_irqrestore(&priv->ctx_lock, flags);
+
+	return result;
+}
+EXPORT_SYMBOL(mlx5_get_protocol_dev);
+
+/* Must be called with intf_mutex held */
+void mlx5_add_dev_by_protocol(struct mlx5_core_dev *dev, int protocol)
+{
+	struct mlx5_interface *intf;
+
+	list_for_each_entry(intf, &intf_list, list)
+		if (intf->protocol == protocol) {
+			mlx5_add_device(intf, &dev->priv);
+			break;
+		}
+}
+
+/* Must be called with intf_mutex held */
+void mlx5_remove_dev_by_protocol(struct mlx5_core_dev *dev, int protocol)
+{
+	struct mlx5_interface *intf;
+
+	list_for_each_entry(intf, &intf_list, list)
+		if (intf->protocol == protocol) {
+			mlx5_remove_device(intf, &dev->priv);
+			break;
+		}
+}
+
+static u16 mlx5_gen_pci_id(struct mlx5_core_dev *dev)
+{
+	return (u16)((dev->pdev->bus->number << 8) |
+		     PCI_SLOT(dev->pdev->devfn));
+}
+
+/* Must be called with intf_mutex held */
+struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
+{
+	u16 pci_id = mlx5_gen_pci_id(dev);
+	struct mlx5_core_dev *res = NULL;
+	struct mlx5_core_dev *tmp_dev;
+	struct mlx5_priv *priv;
+
+	list_for_each_entry(priv, &mlx5_dev_list, dev_list) {
+		tmp_dev = container_of(priv, struct mlx5_core_dev, priv);
+		if ((dev != tmp_dev) && (mlx5_gen_pci_id(tmp_dev) == pci_id)) {
+			res = tmp_dev;
+			break;
+		}
+	}
+
+	return res;
+}
+
+void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
+		     unsigned long param)
+{
+	struct mlx5_priv *priv = &dev->priv;
+	struct mlx5_device_context *dev_ctx;
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->ctx_lock, flags);
+
+	list_for_each_entry(dev_ctx, &priv->ctx_list, list)
+		if (dev_ctx->intf->event)
+			dev_ctx->intf->event(dev, dev_ctx->context, event, param);
+
+	spin_unlock_irqrestore(&priv->ctx_lock, flags);
+}
+
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+void mlx5_core_page_fault(struct mlx5_core_dev *dev,
+			  struct mlx5_pagefault *pfault)
+{
+	struct mlx5_priv *priv = &dev->priv;
+	int srcu_idx;
+
+	srcu_idx = srcu_read_lock(&priv->pfault_srcu);
+	if (priv->pfault)
+		priv->pfault(dev, priv->pfault_ctx, pfault);
+	srcu_read_unlock(&priv->pfault_srcu, srcu_idx);
+}
+#endif
+
+void mlx5_dev_list_lock(void)
+{
+	mutex_lock(&mlx5_intf_mutex);
+}
+
+void mlx5_dev_list_unlock(void)
+{
+	mutex_unlock(&mlx5_intf_mutex);
+}
+
+int mlx5_dev_list_trylock(void)
+{
+	return mutex_trylock(&mlx5_intf_mutex);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/en/arfs.c b/drivers/net/ethernet/mellanox/mlx5/en/arfs.c
new file mode 100644
index 0000000..6d3bcb8
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/en/arfs.c
@@ -0,0 +1,749 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifdef CONFIG_RFS_ACCEL
+
+#include <linux/hash.h>
+#include "lib/fs.h"
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include "en/en.h"
+
+struct arfs_tuple {
+	__be16 etype;
+	u8     ip_proto;
+	union {
+		__be32 src_ipv4;
+		struct in6_addr src_ipv6;
+	};
+	union {
+		__be32 dst_ipv4;
+		struct in6_addr dst_ipv6;
+	};
+	__be16 src_port;
+	__be16 dst_port;
+};
+
+struct arfs_rule {
+	struct mlx5e_priv	*priv;
+	struct work_struct      arfs_work;
+	struct mlx5_flow_handle *rule;
+	struct hlist_node	hlist;
+	int			rxq;
+	/* Flow ID passed to ndo_rx_flow_steer */
+	int			flow_id;
+	/* Filter ID returned by ndo_rx_flow_steer */
+	int			filter_id;
+	struct arfs_tuple	tuple;
+};
+
+#define mlx5e_for_each_arfs_rule(hn, tmp, arfs_tables, i, j) \
+	for (i = 0; i < ARFS_NUM_TYPES; i++) \
+		mlx5e_for_each_hash_arfs_rule(hn, tmp, arfs_tables[i].rules_hash, j)
+
+#define mlx5e_for_each_hash_arfs_rule(hn, tmp, hash, j) \
+	for (j = 0; j < ARFS_HASH_SIZE; j++) \
+		hlist_for_each_entry_safe(hn, tmp, &hash[j], hlist)
+
+static enum mlx5e_traffic_types arfs_get_tt(enum arfs_type type)
+{
+	switch (type) {
+	case ARFS_IPV4_TCP:
+		return MLX5E_TT_IPV4_TCP;
+	case ARFS_IPV4_UDP:
+		return MLX5E_TT_IPV4_UDP;
+	case ARFS_IPV6_TCP:
+		return MLX5E_TT_IPV6_TCP;
+	case ARFS_IPV6_UDP:
+		return MLX5E_TT_IPV6_UDP;
+	default:
+		return -EINVAL;
+	}
+}
+
+static int arfs_disable(struct mlx5e_priv *priv)
+{
+	struct mlx5_flow_destination dest;
+	struct mlx5e_tir *tir = priv->indir_tir;
+	int err = 0;
+	int tt;
+	int i;
+
+	dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+	for (i = 0; i < ARFS_NUM_TYPES; i++) {
+		dest.tir_num = tir[i].tirn;
+		tt = arfs_get_tt(i);
+		/* Modify ttc rules destination to bypass the aRFS tables*/
+		err = mlx5_modify_rule_destination(priv->fs.ttc.rules[tt],
+						   &dest, NULL);
+		if (err) {
+			netdev_err(priv->netdev,
+				   "%s: modify ttc destination failed\n",
+				   __func__);
+			return err;
+		}
+	}
+	return 0;
+}
+
+static void arfs_del_rules(struct mlx5e_priv *priv);
+
+int mlx5e_arfs_disable(struct mlx5e_priv *priv)
+{
+	arfs_del_rules(priv);
+
+	return arfs_disable(priv);
+}
+
+int mlx5e_arfs_enable(struct mlx5e_priv *priv)
+{
+	struct mlx5_flow_destination dest;
+	int err = 0;
+	int tt;
+	int i;
+
+	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+	for (i = 0; i < ARFS_NUM_TYPES; i++) {
+		dest.ft = priv->fs.arfs.arfs_tables[i].ft.t;
+		tt = arfs_get_tt(i);
+		/* Modify ttc rules destination to point on the aRFS FTs */
+		err = mlx5_modify_rule_destination(priv->fs.ttc.rules[tt],
+						   &dest, NULL);
+		if (err) {
+			netdev_err(priv->netdev,
+				   "%s: modify ttc destination failed err=%d\n",
+				   __func__, err);
+			arfs_disable(priv);
+			return err;
+		}
+	}
+	return 0;
+}
+
+static void arfs_destroy_table(struct arfs_table *arfs_t)
+{
+	mlx5_del_flow_rules(arfs_t->default_rule);
+	mlx5e_destroy_flow_table(&arfs_t->ft);
+}
+
+void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv)
+{
+	int i;
+
+	if (!(priv->netdev->hw_features & NETIF_F_NTUPLE))
+		return;
+
+	arfs_del_rules(priv);
+	destroy_workqueue(priv->fs.arfs.wq);
+	for (i = 0; i < ARFS_NUM_TYPES; i++) {
+		if (!IS_ERR_OR_NULL(priv->fs.arfs.arfs_tables[i].ft.t))
+			arfs_destroy_table(&priv->fs.arfs.arfs_tables[i]);
+	}
+}
+
+static int arfs_add_default_rule(struct mlx5e_priv *priv,
+				 enum arfs_type type)
+{
+	struct arfs_table *arfs_t = &priv->fs.arfs.arfs_tables[type];
+	struct mlx5_flow_act flow_act = {
+		.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+		.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG,
+		.encap_id = 0,
+	};
+	struct mlx5_flow_destination dest;
+	struct mlx5e_tir *tir = priv->indir_tir;
+	struct mlx5_flow_spec *spec;
+	int err = 0;
+
+	spec = mlx5_vzalloc(sizeof(*spec));
+	if (!spec) {
+		netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
+		err = -ENOMEM;
+		goto out;
+	}
+
+	dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+	switch (type) {
+	case ARFS_IPV4_TCP:
+		dest.tir_num = tir[MLX5E_TT_IPV4_TCP].tirn;
+		break;
+	case ARFS_IPV4_UDP:
+		dest.tir_num = tir[MLX5E_TT_IPV4_UDP].tirn;
+		break;
+	case ARFS_IPV6_TCP:
+		dest.tir_num = tir[MLX5E_TT_IPV6_TCP].tirn;
+		break;
+	case ARFS_IPV6_UDP:
+		dest.tir_num = tir[MLX5E_TT_IPV6_UDP].tirn;
+		break;
+	default:
+		err = -EINVAL;
+		goto out;
+	}
+
+	arfs_t->default_rule = mlx5_add_flow_rules(arfs_t->ft.t, spec,
+						   &flow_act,
+						   &dest, 1);
+	if (IS_ERR(arfs_t->default_rule)) {
+		err = PTR_ERR(arfs_t->default_rule);
+		arfs_t->default_rule = NULL;
+		netdev_err(priv->netdev, "%s: add rule failed, arfs type=%d\n",
+			   __func__, type);
+	}
+out:
+	kvfree(spec);
+	return err;
+}
+
+#define MLX5E_ARFS_NUM_GROUPS	2
+#define MLX5E_ARFS_GROUP1_SIZE	BIT(12)
+#define MLX5E_ARFS_GROUP2_SIZE	BIT(0)
+#define MLX5E_ARFS_TABLE_SIZE	(MLX5E_ARFS_GROUP1_SIZE +\
+				 MLX5E_ARFS_GROUP2_SIZE)
+static int arfs_create_groups(struct mlx5e_flow_table *ft,
+			      enum  arfs_type type)
+{
+	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+	void *outer_headers_c;
+	int ix = 0;
+	u32 *in;
+	int err;
+	u8 *mc;
+
+	ft->g = kcalloc(MLX5E_ARFS_NUM_GROUPS,
+			sizeof(*ft->g), GFP_KERNEL);
+	in = mlx5_vzalloc(inlen);
+	if  (!in || !ft->g) {
+		kvfree(ft->g);
+		kvfree(in);
+		return -ENOMEM;
+	}
+
+	mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+	outer_headers_c = MLX5_ADDR_OF(fte_match_param, mc,
+				       outer_headers);
+	MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ethertype);
+	switch (type) {
+	case ARFS_IPV4_TCP:
+	case ARFS_IPV6_TCP:
+		MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, tcp_dport);
+		MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, tcp_sport);
+		break;
+	case ARFS_IPV4_UDP:
+	case ARFS_IPV6_UDP:
+		MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, udp_dport);
+		MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, udp_sport);
+		break;
+	default:
+		err = -EINVAL;
+		goto out;
+	}
+
+	switch (type) {
+	case ARFS_IPV4_TCP:
+	case ARFS_IPV4_UDP:
+		MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c,
+				 src_ipv4_src_ipv6.ipv4_layout.ipv4);
+		MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c,
+				 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
+		break;
+	case ARFS_IPV6_TCP:
+	case ARFS_IPV6_UDP:
+		memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
+				    src_ipv4_src_ipv6.ipv6_layout.ipv6),
+		       0xff, 16);
+		memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
+				    dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+		       0xff, 16);
+		break;
+	default:
+		err = -EINVAL;
+		goto out;
+	}
+
+	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+	MLX5_SET_CFG(in, start_flow_index, ix);
+	ix += MLX5E_ARFS_GROUP1_SIZE;
+	MLX5_SET_CFG(in, end_flow_index, ix - 1);
+	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+	if (IS_ERR(ft->g[ft->num_groups]))
+		goto err;
+	ft->num_groups++;
+
+	memset(in, 0, inlen);
+	MLX5_SET_CFG(in, start_flow_index, ix);
+	ix += MLX5E_ARFS_GROUP2_SIZE;
+	MLX5_SET_CFG(in, end_flow_index, ix - 1);
+	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+	if (IS_ERR(ft->g[ft->num_groups]))
+		goto err;
+	ft->num_groups++;
+
+	kvfree(in);
+	return 0;
+
+err:
+	err = PTR_ERR(ft->g[ft->num_groups]);
+	ft->g[ft->num_groups] = NULL;
+out:
+	kvfree(in);
+
+	return err;
+}
+
+static int arfs_create_table(struct mlx5e_priv *priv,
+			     enum arfs_type type)
+{
+	struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
+	struct mlx5e_flow_table *ft = &arfs->arfs_tables[type].ft;
+	int err;
+
+	ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO,
+				       MLX5E_ARFS_TABLE_SIZE, MLX5E_ARFS_FT_LEVEL, 0);
+	if (IS_ERR(ft->t)) {
+		err = PTR_ERR(ft->t);
+		ft->t = NULL;
+		return err;
+	}
+
+	err = arfs_create_groups(ft, type);
+	if (err)
+		goto err;
+
+	err = arfs_add_default_rule(priv, type);
+	if (err)
+		goto err;
+
+	return 0;
+err:
+	mlx5e_destroy_flow_table(ft);
+	return err;
+}
+
+int mlx5e_arfs_create_tables(struct mlx5e_priv *priv)
+{
+	int err = 0;
+	int i;
+
+	if (!(priv->netdev->hw_features & NETIF_F_NTUPLE))
+		return 0;
+
+	spin_lock_init(&priv->fs.arfs.arfs_lock);
+	INIT_LIST_HEAD(&priv->fs.arfs.rules);
+	priv->fs.arfs.wq = create_singlethread_workqueue("mlx5e_arfs");
+	if (!priv->fs.arfs.wq)
+		return -ENOMEM;
+
+	for (i = 0; i < ARFS_NUM_TYPES; i++) {
+		err = arfs_create_table(priv, i);
+		if (err)
+			goto err;
+	}
+	return 0;
+err:
+	mlx5e_arfs_destroy_tables(priv);
+	return err;
+}
+
+#define MLX5E_ARFS_EXPIRY_QUOTA 60
+
+static void arfs_may_expire_flow(struct mlx5e_priv *priv)
+{
+	struct arfs_rule *arfs_rule;
+	struct hlist_node *htmp;
+	int quota = 0;
+	int i;
+	int j;
+
+	HLIST_HEAD(del_list);
+	spin_lock_bh(&priv->fs.arfs.arfs_lock);
+	mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs.arfs.arfs_tables, i, j) {
+		if (quota++ > MLX5E_ARFS_EXPIRY_QUOTA)
+			break;
+		if (!work_pending(&arfs_rule->arfs_work) &&
+		    rps_may_expire_flow(priv->netdev,
+					arfs_rule->rxq, arfs_rule->flow_id,
+					arfs_rule->filter_id)) {
+			hlist_del_init(&arfs_rule->hlist);
+			hlist_add_head(&arfs_rule->hlist, &del_list);
+		}
+	}
+	spin_unlock_bh(&priv->fs.arfs.arfs_lock);
+	hlist_for_each_entry_safe(arfs_rule, htmp, &del_list, hlist) {
+		if (arfs_rule->rule)
+			mlx5_del_flow_rules(arfs_rule->rule);
+		hlist_del(&arfs_rule->hlist);
+		kfree(arfs_rule);
+	}
+}
+
+static void arfs_del_rules(struct mlx5e_priv *priv)
+{
+	struct hlist_node *htmp;
+	struct arfs_rule *rule;
+	int i;
+	int j;
+
+	HLIST_HEAD(del_list);
+	spin_lock_bh(&priv->fs.arfs.arfs_lock);
+	mlx5e_for_each_arfs_rule(rule, htmp, priv->fs.arfs.arfs_tables, i, j) {
+		hlist_del_init(&rule->hlist);
+		hlist_add_head(&rule->hlist, &del_list);
+	}
+	spin_unlock_bh(&priv->fs.arfs.arfs_lock);
+
+	hlist_for_each_entry_safe(rule, htmp, &del_list, hlist) {
+		cancel_work_sync(&rule->arfs_work);
+		if (rule->rule)
+			mlx5_del_flow_rules(rule->rule);
+		hlist_del(&rule->hlist);
+		kfree(rule);
+	}
+}
+
+static struct hlist_head *
+arfs_hash_bucket(struct arfs_table *arfs_t, __be16 src_port,
+		 __be16 dst_port)
+{
+	unsigned long l;
+	int bucket_idx;
+
+	l = (__force unsigned long)src_port |
+	    ((__force unsigned long)dst_port << 2);
+
+	bucket_idx = hash_long(l, ARFS_HASH_SHIFT);
+
+	return &arfs_t->rules_hash[bucket_idx];
+}
+
+static u8 arfs_get_ip_proto(const struct sk_buff *skb)
+{
+	return (skb->protocol == htons(ETH_P_IP)) ?
+		ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
+}
+
+static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs,
+					 u8 ip_proto, __be16 etype)
+{
+	if (etype == htons(ETH_P_IP) && ip_proto == IPPROTO_TCP)
+		return &arfs->arfs_tables[ARFS_IPV4_TCP];
+	if (etype == htons(ETH_P_IP) && ip_proto == IPPROTO_UDP)
+		return &arfs->arfs_tables[ARFS_IPV4_UDP];
+	if (etype == htons(ETH_P_IPV6) && ip_proto == IPPROTO_TCP)
+		return &arfs->arfs_tables[ARFS_IPV6_TCP];
+	if (etype == htons(ETH_P_IPV6) && ip_proto == IPPROTO_UDP)
+		return &arfs->arfs_tables[ARFS_IPV6_UDP];
+
+	return NULL;
+}
+
+static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv,
+					      struct arfs_rule *arfs_rule)
+{
+	struct mlx5_flow_act flow_act = {
+		.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+		.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG,
+		.encap_id = 0,
+	};
+	struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
+	struct arfs_tuple *tuple = &arfs_rule->tuple;
+	struct mlx5_flow_handle *rule = NULL;
+	struct mlx5_flow_destination dest;
+	struct arfs_table *arfs_table;
+	struct mlx5_flow_spec *spec;
+	struct mlx5_flow_table *ft;
+	int err = 0;
+
+	spec = mlx5_vzalloc(sizeof(*spec));
+	if (!spec) {
+		netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
+		err = -ENOMEM;
+		goto out;
+	}
+	spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+			 outer_headers.ethertype);
+	MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype,
+		 ntohs(tuple->etype));
+	arfs_table = arfs_get_table(arfs, tuple->ip_proto, tuple->etype);
+	if (!arfs_table) {
+		err = -EINVAL;
+		goto out;
+	}
+
+	ft = arfs_table->ft.t;
+	if (tuple->ip_proto == IPPROTO_TCP) {
+		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+				 outer_headers.tcp_dport);
+		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+				 outer_headers.tcp_sport);
+		MLX5_SET(fte_match_param, spec->match_value, outer_headers.tcp_dport,
+			 ntohs(tuple->dst_port));
+		MLX5_SET(fte_match_param, spec->match_value, outer_headers.tcp_sport,
+			 ntohs(tuple->src_port));
+	} else {
+		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+				 outer_headers.udp_dport);
+		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+				 outer_headers.udp_sport);
+		MLX5_SET(fte_match_param, spec->match_value, outer_headers.udp_dport,
+			 ntohs(tuple->dst_port));
+		MLX5_SET(fte_match_param, spec->match_value, outer_headers.udp_sport,
+			 ntohs(tuple->src_port));
+	}
+	if (tuple->etype == htons(ETH_P_IP)) {
+		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
+				    outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4),
+		       &tuple->src_ipv4,
+		       4);
+		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
+				    outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
+		       &tuple->dst_ipv4,
+		       4);
+		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+				 outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4);
+		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+				 outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
+	} else {
+		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
+				    outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
+		       &tuple->src_ipv6,
+		       16);
+		memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
+				    outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+		       &tuple->dst_ipv6,
+		       16);
+		memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+				    outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
+		       0xff,
+		       16);
+		memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+				    outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+		       0xff,
+		       16);
+	}
+	dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+	dest.tir_num = priv->direct_tir[arfs_rule->rxq].tirn;
+	rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
+	if (IS_ERR(rule)) {
+		err = PTR_ERR(rule);
+		netdev_err(priv->netdev, "%s: add rule(filter id=%d, rq idx=%d) failed, err=%d\n",
+			   __func__, arfs_rule->filter_id, arfs_rule->rxq, err);
+	}
+
+out:
+	kvfree(spec);
+	return err ? ERR_PTR(err) : rule;
+}
+
+static void arfs_modify_rule_rq(struct mlx5e_priv *priv,
+				struct mlx5_flow_handle *rule, u16 rxq)
+{
+	struct mlx5_flow_destination dst;
+	int err = 0;
+
+	dst.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+	dst.tir_num = priv->direct_tir[rxq].tirn;
+	err =  mlx5_modify_rule_destination(rule, &dst, NULL);
+	if (err)
+		netdev_warn(priv->netdev,
+			    "Failed to modfiy aRFS rule destination to rq=%d\n", rxq);
+}
+
+static void arfs_handle_work(struct work_struct *work)
+{
+	struct arfs_rule *arfs_rule = container_of(work,
+						   struct arfs_rule,
+						   arfs_work);
+	struct mlx5e_priv *priv = arfs_rule->priv;
+	struct mlx5_flow_handle *rule;
+
+	mutex_lock(&priv->state_lock);
+	if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+		spin_lock_bh(&priv->fs.arfs.arfs_lock);
+		hlist_del(&arfs_rule->hlist);
+		spin_unlock_bh(&priv->fs.arfs.arfs_lock);
+
+		mutex_unlock(&priv->state_lock);
+		kfree(arfs_rule);
+		goto out;
+	}
+	mutex_unlock(&priv->state_lock);
+
+	if (!arfs_rule->rule) {
+		rule = arfs_add_rule(priv, arfs_rule);
+		if (IS_ERR(rule))
+			goto out;
+		arfs_rule->rule = rule;
+	} else {
+		arfs_modify_rule_rq(priv, arfs_rule->rule,
+				    arfs_rule->rxq);
+	}
+out:
+	arfs_may_expire_flow(priv);
+}
+
+/* return L4 destination port from ip4/6 packets */
+static __be16 arfs_get_dst_port(const struct sk_buff *skb)
+{
+	char *transport_header;
+
+	transport_header = skb_transport_header(skb);
+	if (arfs_get_ip_proto(skb) == IPPROTO_TCP)
+		return ((struct tcphdr *)transport_header)->dest;
+	return ((struct udphdr *)transport_header)->dest;
+}
+
+/* return L4 source port from ip4/6 packets */
+static __be16 arfs_get_src_port(const struct sk_buff *skb)
+{
+	char *transport_header;
+
+	transport_header = skb_transport_header(skb);
+	if (arfs_get_ip_proto(skb) == IPPROTO_TCP)
+		return ((struct tcphdr *)transport_header)->source;
+	return ((struct udphdr *)transport_header)->source;
+}
+
+static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
+					 struct arfs_table *arfs_t,
+					 const struct sk_buff *skb,
+					 u16 rxq, u32 flow_id)
+{
+	struct arfs_rule *rule;
+	struct arfs_tuple *tuple;
+
+	rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
+	if (!rule)
+		return NULL;
+
+	rule->priv = priv;
+	rule->rxq = rxq;
+	INIT_WORK(&rule->arfs_work, arfs_handle_work);
+
+	tuple = &rule->tuple;
+	tuple->etype = skb->protocol;
+	if (tuple->etype == htons(ETH_P_IP)) {
+		tuple->src_ipv4 = ip_hdr(skb)->saddr;
+		tuple->dst_ipv4 = ip_hdr(skb)->daddr;
+	} else {
+		memcpy(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr,
+		       sizeof(struct in6_addr));
+		memcpy(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr,
+		       sizeof(struct in6_addr));
+	}
+	tuple->ip_proto = arfs_get_ip_proto(skb);
+	tuple->src_port = arfs_get_src_port(skb);
+	tuple->dst_port = arfs_get_dst_port(skb);
+
+	rule->flow_id = flow_id;
+	rule->filter_id = priv->fs.arfs.last_filter_id++ % RPS_NO_FILTER;
+
+	hlist_add_head(&rule->hlist,
+		       arfs_hash_bucket(arfs_t, tuple->src_port,
+					tuple->dst_port));
+	return rule;
+}
+
+static bool arfs_cmp_ips(struct arfs_tuple *tuple,
+			 const struct sk_buff *skb)
+{
+	if (tuple->etype == htons(ETH_P_IP) &&
+	    tuple->src_ipv4 == ip_hdr(skb)->saddr &&
+	    tuple->dst_ipv4 == ip_hdr(skb)->daddr)
+		return true;
+	if (tuple->etype == htons(ETH_P_IPV6) &&
+	    (!memcmp(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr,
+		     sizeof(struct in6_addr))) &&
+	    (!memcmp(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr,
+		     sizeof(struct in6_addr))))
+		return true;
+	return false;
+}
+
+static struct arfs_rule *arfs_find_rule(struct arfs_table *arfs_t,
+					const struct sk_buff *skb)
+{
+	struct arfs_rule *arfs_rule;
+	struct hlist_head *head;
+	__be16 src_port = arfs_get_src_port(skb);
+	__be16 dst_port = arfs_get_dst_port(skb);
+
+	head = arfs_hash_bucket(arfs_t, src_port, dst_port);
+	hlist_for_each_entry(arfs_rule, head, hlist) {
+		if (arfs_rule->tuple.src_port == src_port &&
+		    arfs_rule->tuple.dst_port == dst_port &&
+		    arfs_cmp_ips(&arfs_rule->tuple, skb)) {
+			return arfs_rule;
+		}
+	}
+
+	return NULL;
+}
+
+int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
+			u16 rxq_index, u32 flow_id)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+	struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
+	struct arfs_table *arfs_t;
+	struct arfs_rule *arfs_rule;
+
+	if (skb->protocol != htons(ETH_P_IP) &&
+	    skb->protocol != htons(ETH_P_IPV6))
+		return -EPROTONOSUPPORT;
+
+	arfs_t = arfs_get_table(arfs, arfs_get_ip_proto(skb), skb->protocol);
+	if (!arfs_t)
+		return -EPROTONOSUPPORT;
+
+	spin_lock_bh(&arfs->arfs_lock);
+	arfs_rule = arfs_find_rule(arfs_t, skb);
+	if (arfs_rule) {
+		if (arfs_rule->rxq == rxq_index) {
+			spin_unlock_bh(&arfs->arfs_lock);
+			return arfs_rule->filter_id;
+		}
+		arfs_rule->rxq = rxq_index;
+	} else {
+		arfs_rule = arfs_alloc_rule(priv, arfs_t, skb,
+					    rxq_index, flow_id);
+		if (!arfs_rule) {
+			spin_unlock_bh(&arfs->arfs_lock);
+			return -ENOMEM;
+		}
+	}
+	queue_work(priv->fs.arfs.wq, &arfs_rule->arfs_work);
+	spin_unlock_bh(&arfs->arfs_lock);
+	return arfs_rule->filter_id;
+}
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/en/clock.c b/drivers/net/ethernet/mellanox/mlx5/en/clock.c
new file mode 100644
index 0000000..a72faff
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/en/clock.c
@@ -0,0 +1,297 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/clocksource.h>
+#include "en/en.h"
+
+enum {
+	MLX5E_CYCLES_SHIFT	= 23
+};
+
+void mlx5e_fill_hwstamp(struct mlx5e_tstamp *tstamp, u64 timestamp,
+			struct skb_shared_hwtstamps *hwts)
+{
+	u64 nsec;
+
+	read_lock(&tstamp->lock);
+	nsec = timecounter_cyc2time(&tstamp->clock, timestamp);
+	read_unlock(&tstamp->lock);
+
+	hwts->hwtstamp = ns_to_ktime(nsec);
+}
+
+static u64 mlx5e_read_internal_timer(const struct cyclecounter *cc)
+{
+	struct mlx5e_tstamp *tstamp = container_of(cc, struct mlx5e_tstamp,
+						   cycles);
+
+	return mlx5_read_internal_timer(tstamp->mdev) & cc->mask;
+}
+
+static void mlx5e_timestamp_overflow(struct work_struct *work)
+{
+	struct delayed_work *dwork = to_delayed_work(work);
+	struct mlx5e_tstamp *tstamp = container_of(dwork, struct mlx5e_tstamp,
+						   overflow_work);
+	unsigned long flags;
+
+	write_lock_irqsave(&tstamp->lock, flags);
+	timecounter_read(&tstamp->clock);
+	write_unlock_irqrestore(&tstamp->lock, flags);
+	schedule_delayed_work(&tstamp->overflow_work, tstamp->overflow_period);
+}
+
+int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+	struct hwtstamp_config config;
+
+	if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
+		return -EOPNOTSUPP;
+
+	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+		return -EFAULT;
+
+	/* TX HW timestamp */
+	switch (config.tx_type) {
+	case HWTSTAMP_TX_OFF:
+	case HWTSTAMP_TX_ON:
+		break;
+	default:
+		return -ERANGE;
+	}
+
+	/* RX HW timestamp */
+	switch (config.rx_filter) {
+	case HWTSTAMP_FILTER_NONE:
+		/* Reset CQE compression to Admin default */
+		mlx5e_modify_rx_cqe_compression(priv, priv->params.rx_cqe_compress_def);
+		break;
+	case HWTSTAMP_FILTER_ALL:
+	case HWTSTAMP_FILTER_SOME:
+	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+	case HWTSTAMP_FILTER_PTP_V2_EVENT:
+	case HWTSTAMP_FILTER_PTP_V2_SYNC:
+	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+		/* Disable CQE compression */
+		netdev_warn(dev, "Disabling cqe compression");
+		mlx5e_modify_rx_cqe_compression(priv, false);
+		config.rx_filter = HWTSTAMP_FILTER_ALL;
+		break;
+	default:
+		return -ERANGE;
+	}
+
+	memcpy(&priv->tstamp.hwtstamp_config, &config, sizeof(config));
+
+	return copy_to_user(ifr->ifr_data, &config,
+			    sizeof(config)) ? -EFAULT : 0;
+}
+
+int mlx5e_hwstamp_get(struct net_device *dev, struct ifreq *ifr)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+	struct hwtstamp_config *cfg = &priv->tstamp.hwtstamp_config;
+
+	if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
+		return -EOPNOTSUPP;
+
+	return copy_to_user(ifr->ifr_data, cfg, sizeof(*cfg)) ? -EFAULT : 0;
+}
+
+static int mlx5e_ptp_settime(struct ptp_clock_info *ptp,
+			     const struct timespec64 *ts)
+{
+	struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp,
+						   ptp_info);
+	u64 ns = timespec64_to_ns(ts);
+	unsigned long flags;
+
+	write_lock_irqsave(&tstamp->lock, flags);
+	timecounter_init(&tstamp->clock, &tstamp->cycles, ns);
+	write_unlock_irqrestore(&tstamp->lock, flags);
+
+	return 0;
+}
+
+static int mlx5e_ptp_gettime(struct ptp_clock_info *ptp,
+			     struct timespec64 *ts)
+{
+	struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp,
+						   ptp_info);
+	u64 ns;
+	unsigned long flags;
+
+	write_lock_irqsave(&tstamp->lock, flags);
+	ns = timecounter_read(&tstamp->clock);
+	write_unlock_irqrestore(&tstamp->lock, flags);
+
+	*ts = ns_to_timespec64(ns);
+
+	return 0;
+}
+
+static int mlx5e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+	struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp,
+						   ptp_info);
+	unsigned long flags;
+
+	write_lock_irqsave(&tstamp->lock, flags);
+	timecounter_adjtime(&tstamp->clock, delta);
+	write_unlock_irqrestore(&tstamp->lock, flags);
+
+	return 0;
+}
+
+static int mlx5e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
+{
+	u64 adj;
+	u32 diff;
+	unsigned long flags;
+	int neg_adj = 0;
+	struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp,
+						  ptp_info);
+
+	if (delta < 0) {
+		neg_adj = 1;
+		delta = -delta;
+	}
+
+	adj = tstamp->nominal_c_mult;
+	adj *= delta;
+	diff = div_u64(adj, 1000000000ULL);
+
+	write_lock_irqsave(&tstamp->lock, flags);
+	timecounter_read(&tstamp->clock);
+	tstamp->cycles.mult = neg_adj ? tstamp->nominal_c_mult - diff :
+					tstamp->nominal_c_mult + diff;
+	write_unlock_irqrestore(&tstamp->lock, flags);
+
+	return 0;
+}
+
+static const struct ptp_clock_info mlx5e_ptp_clock_info = {
+	.owner		= THIS_MODULE,
+	.max_adj	= 100000000,
+	.n_alarm	= 0,
+	.n_ext_ts	= 0,
+	.n_per_out	= 0,
+	.n_pins		= 0,
+	.pps		= 0,
+	.adjfreq	= mlx5e_ptp_adjfreq,
+	.adjtime	= mlx5e_ptp_adjtime,
+	.gettime64	= mlx5e_ptp_gettime,
+	.settime64	= mlx5e_ptp_settime,
+	.enable		= NULL,
+};
+
+static void mlx5e_timestamp_init_config(struct mlx5e_tstamp *tstamp)
+{
+	tstamp->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF;
+	tstamp->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+}
+
+void mlx5e_timestamp_init(struct mlx5e_priv *priv)
+{
+	struct mlx5e_tstamp *tstamp = &priv->tstamp;
+	u64 ns;
+	u64 frac = 0;
+	u32 dev_freq;
+
+	mlx5e_timestamp_init_config(tstamp);
+	dev_freq = MLX5_CAP_GEN(priv->mdev, device_frequency_khz);
+	if (!dev_freq) {
+		mlx5_core_warn(priv->mdev, "invalid device_frequency_khz, aborting HW clock init\n");
+		return;
+	}
+	rwlock_init(&tstamp->lock);
+	tstamp->cycles.read = mlx5e_read_internal_timer;
+	tstamp->cycles.shift = MLX5E_CYCLES_SHIFT;
+	tstamp->cycles.mult = clocksource_khz2mult(dev_freq,
+						   tstamp->cycles.shift);
+	tstamp->nominal_c_mult = tstamp->cycles.mult;
+	tstamp->cycles.mask = CLOCKSOURCE_MASK(41);
+	tstamp->mdev = priv->mdev;
+
+	timecounter_init(&tstamp->clock, &tstamp->cycles,
+			 ktime_to_ns(ktime_get_real()));
+
+	/* Calculate period in seconds to call the overflow watchdog - to make
+	 * sure counter is checked at least once every wrap around.
+	 */
+	ns = cyclecounter_cyc2ns(&tstamp->cycles, tstamp->cycles.mask,
+				 frac, &frac);
+	do_div(ns, NSEC_PER_SEC / 2 / HZ);
+	tstamp->overflow_period = ns;
+
+	INIT_DELAYED_WORK(&tstamp->overflow_work, mlx5e_timestamp_overflow);
+	if (tstamp->overflow_period)
+		schedule_delayed_work(&tstamp->overflow_work, 0);
+	else
+		mlx5_core_warn(priv->mdev, "invalid overflow period, overflow_work is not scheduled\n");
+
+	/* Configure the PHC */
+	tstamp->ptp_info = mlx5e_ptp_clock_info;
+	snprintf(tstamp->ptp_info.name, 16, "mlx5 ptp");
+
+	tstamp->ptp = ptp_clock_register(&tstamp->ptp_info,
+					 &priv->mdev->pdev->dev);
+	if (IS_ERR(tstamp->ptp)) {
+		mlx5_core_warn(priv->mdev, "ptp_clock_register failed %ld\n",
+			       PTR_ERR(tstamp->ptp));
+		tstamp->ptp = NULL;
+	}
+}
+
+void mlx5e_timestamp_cleanup(struct mlx5e_priv *priv)
+{
+	struct mlx5e_tstamp *tstamp = &priv->tstamp;
+
+	if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
+		return;
+
+	if (priv->tstamp.ptp) {
+		ptp_clock_unregister(priv->tstamp.ptp);
+		priv->tstamp.ptp = NULL;
+	}
+
+	cancel_delayed_work_sync(&tstamp->overflow_work);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/en/common.c b/drivers/net/ethernet/mellanox/mlx5/en/common.c
new file mode 100644
index 0000000..bf4f824
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/en/common.c
@@ -0,0 +1,159 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "en/en.h"
+
+/* mlx5e global resources should be placed in this file.
+ * Global resources are common to all the netdevices crated on the same nic.
+ */
+
+int mlx5e_create_tir(struct mlx5_core_dev *mdev,
+		     struct mlx5e_tir *tir, u32 *in, int inlen)
+{
+	int err;
+
+	err = mlx5_core_create_tir(mdev, in, inlen, &tir->tirn);
+	if (err)
+		return err;
+
+	list_add(&tir->list, &mdev->mlx5e_res.td.tirs_list);
+
+	return 0;
+}
+
+void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
+		       struct mlx5e_tir *tir)
+{
+	mlx5_core_destroy_tir(mdev, tir->tirn);
+	list_del(&tir->list);
+}
+
+static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
+			     struct mlx5_core_mkey *mkey)
+{
+	int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
+	void *mkc;
+	u32 *in;
+	int err;
+
+	in = mlx5_vzalloc(inlen);
+	if (!in)
+		return -ENOMEM;
+
+	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+	MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_PA);
+	MLX5_SET(mkc, mkc, lw, 1);
+	MLX5_SET(mkc, mkc, lr, 1);
+
+	MLX5_SET(mkc, mkc, pd, pdn);
+	MLX5_SET(mkc, mkc, length64, 1);
+	MLX5_SET(mkc, mkc, qpn, 0xffffff);
+
+	err = mlx5_core_create_mkey(mdev, mkey, in, inlen);
+
+	kvfree(in);
+	return err;
+}
+
+int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
+{
+	struct mlx5e_resources *res = &mdev->mlx5e_res;
+	int err;
+
+	err = mlx5_core_alloc_pd(mdev, &res->pdn);
+	if (err) {
+		mlx5_core_err(mdev, "alloc pd failed, %d\n", err);
+		return err;
+	}
+
+	err = mlx5_core_alloc_transport_domain(mdev, &res->td.tdn);
+	if (err) {
+		mlx5_core_err(mdev, "alloc td failed, %d\n", err);
+		goto err_dealloc_pd;
+	}
+
+	err = mlx5e_create_mkey(mdev, res->pdn, &res->mkey);
+	if (err) {
+		mlx5_core_err(mdev, "create mkey failed, %d\n", err);
+		goto err_dealloc_transport_domain;
+	}
+
+	INIT_LIST_HEAD(&mdev->mlx5e_res.td.tirs_list);
+
+	return 0;
+
+err_dealloc_transport_domain:
+	mlx5_core_dealloc_transport_domain(mdev, res->td.tdn);
+err_dealloc_pd:
+	mlx5_core_dealloc_pd(mdev, res->pdn);
+	return err;
+}
+
+void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev)
+{
+	struct mlx5e_resources *res = &mdev->mlx5e_res;
+
+	mlx5_core_destroy_mkey(mdev, &res->mkey);
+	mlx5_core_dealloc_transport_domain(mdev, res->td.tdn);
+	mlx5_core_dealloc_pd(mdev, res->pdn);
+}
+
+int mlx5e_refresh_tirs_self_loopback(struct mlx5_core_dev *mdev,
+				     bool enable_uc_lb)
+{
+	struct mlx5e_tir *tir;
+	void *in;
+	int inlen;
+	int err = 0;
+
+	inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
+	in = mlx5_vzalloc(inlen);
+	if (!in)
+		return -ENOMEM;
+
+	if (enable_uc_lb)
+		MLX5_SET(modify_tir_in, in, ctx.self_lb_block,
+			 MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST_);
+
+	MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
+
+	list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) {
+		err = mlx5_core_modify_tir(mdev, tir->tirn, in, inlen);
+		if (err)
+			goto out;
+	}
+
+out:
+	kvfree(in);
+
+	return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/en/dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/en/dcbnl.c
new file mode 100644
index 0000000..4a5a89e
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/en/dcbnl.c
@@ -0,0 +1,751 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#include <linux/device.h>
+#include <linux/netdevice.h>
+#include "en/en.h"
+
+#define MLX5E_MAX_PRIORITY 8
+
+#define MLX5E_100MB (100000)
+#define MLX5E_1GB   (1000000)
+
+#define MLX5E_CEE_STATE_UP    1
+#define MLX5E_CEE_STATE_DOWN  0
+
+/* If dcbx mode is non-host set the dcbx mode to host.
+ */
+static int mlx5e_dcbnl_set_dcbx_mode(struct mlx5e_priv *priv,
+				     enum mlx5_dcbx_oper_mode mode)
+{
+	struct mlx5_core_dev *mdev = priv->mdev;
+	u32 param[MLX5_ST_SZ_DW(dcbx_param)];
+	int err;
+
+	err = mlx5_query_port_dcbx_param(mdev, param);
+	if (err)
+		return err;
+
+	MLX5_SET(dcbx_param, param, version_admin, mode);
+	if (mode != MLX5E_DCBX_PARAM_VER_OPER_HOST)
+		MLX5_SET(dcbx_param, param, willing_admin, 1);
+
+	return mlx5_set_port_dcbx_param(mdev, param);
+}
+
+static int mlx5e_dcbnl_switch_to_host_mode(struct mlx5e_priv *priv)
+{
+	struct mlx5e_dcbx *dcbx = &priv->dcbx;
+	int err;
+
+	if (!MLX5_CAP_GEN(priv->mdev, dcbx))
+		return 0;
+
+	if (dcbx->mode == MLX5E_DCBX_PARAM_VER_OPER_HOST)
+		return 0;
+
+	err = mlx5e_dcbnl_set_dcbx_mode(priv, MLX5E_DCBX_PARAM_VER_OPER_HOST);
+	if (err)
+		return err;
+
+	dcbx->mode = MLX5E_DCBX_PARAM_VER_OPER_HOST;
+	return 0;
+}
+
+static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev,
+				   struct ieee_ets *ets)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+	struct mlx5_core_dev *mdev = priv->mdev;
+	int err = 0;
+	int i;
+
+	if (!MLX5_CAP_GEN(priv->mdev, ets))
+		return -ENOTSUPP;
+
+	ets->ets_cap = mlx5_max_tc(priv->mdev) + 1;
+	for (i = 0; i < ets->ets_cap; i++) {
+		err = mlx5_query_port_prio_tc(mdev, i, &ets->prio_tc[i]);
+		if (err)
+			return err;
+	}
+
+	for (i = 0; i < ets->ets_cap; i++) {
+		err = mlx5_query_port_tc_bw_alloc(mdev, i, &ets->tc_tx_bw[i]);
+		if (err)
+			return err;
+		if (ets->tc_tx_bw[i] < MLX5E_MAX_BW_ALLOC)
+			priv->dcbx.tc_tsa[i] = IEEE_8021QAZ_TSA_ETS;
+	}
+
+	memcpy(ets->tc_tsa, priv->dcbx.tc_tsa, sizeof(ets->tc_tsa));
+
+	return err;
+}
+
+enum {
+	MLX5E_VENDOR_TC_GROUP_NUM = 7,
+	MLX5E_ETS_TC_GROUP_NUM    = 0,
+};
+
+static void mlx5e_build_tc_group(struct ieee_ets *ets, u8 *tc_group, int max_tc)
+{
+	bool any_tc_mapped_to_ets = false;
+	int strict_group;
+	int i;
+
+	for (i = 0; i <= max_tc; i++)
+		if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS)
+			any_tc_mapped_to_ets = true;
+
+	strict_group = any_tc_mapped_to_ets ? 1 : 0;
+
+	for (i = 0; i <= max_tc; i++) {
+		switch (ets->tc_tsa[i]) {
+		case IEEE_8021QAZ_TSA_VENDOR:
+			tc_group[i] = MLX5E_VENDOR_TC_GROUP_NUM;
+			break;
+		case IEEE_8021QAZ_TSA_STRICT:
+			tc_group[i] = strict_group++;
+			break;
+		case IEEE_8021QAZ_TSA_ETS:
+			tc_group[i] = MLX5E_ETS_TC_GROUP_NUM;
+			break;
+		}
+	}
+}
+
+static void mlx5e_build_tc_tx_bw(struct ieee_ets *ets, u8 *tc_tx_bw,
+				 u8 *tc_group, int max_tc)
+{
+	int i;
+
+	for (i = 0; i <= max_tc; i++) {
+		switch (ets->tc_tsa[i]) {
+		case IEEE_8021QAZ_TSA_VENDOR:
+			tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
+			break;
+		case IEEE_8021QAZ_TSA_STRICT:
+			tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
+			break;
+		case IEEE_8021QAZ_TSA_ETS:
+			tc_tx_bw[i] = ets->tc_tx_bw[i];
+			break;
+		}
+	}
+}
+
+int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
+{
+	struct mlx5_core_dev *mdev = priv->mdev;
+	u8 tc_tx_bw[IEEE_8021QAZ_MAX_TCS];
+	u8 tc_group[IEEE_8021QAZ_MAX_TCS];
+	int max_tc = mlx5_max_tc(mdev);
+	int err;
+
+	mlx5e_build_tc_group(ets, tc_group, max_tc);
+	mlx5e_build_tc_tx_bw(ets, tc_tx_bw, tc_group, max_tc);
+
+	err = mlx5_set_port_prio_tc(mdev, ets->prio_tc);
+	if (err)
+		return err;
+
+	err = mlx5_set_port_tc_group(mdev, tc_group);
+	if (err)
+		return err;
+
+	err = mlx5_set_port_tc_bw_alloc(mdev, tc_tx_bw);
+
+	if (err)
+		return err;
+
+	memcpy(priv->dcbx.tc_tsa, ets->tc_tsa, sizeof(ets->tc_tsa));
+
+	return err;
+}
+
+static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
+				    struct ieee_ets *ets)
+{
+	int bw_sum = 0;
+	int i;
+
+	/* Validate Priority */
+	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+		if (ets->prio_tc[i] >= MLX5E_MAX_PRIORITY) {
+			netdev_err(netdev,
+				   "Failed to validate ETS: priority value greater than max(%d)\n",
+				    MLX5E_MAX_PRIORITY);
+			return -EINVAL;
+		}
+	}
+
+	/* Validate Bandwidth Sum */
+	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+		if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
+			if (!ets->tc_tx_bw[i]) {
+				netdev_err(netdev,
+					   "Failed to validate ETS: BW 0 is illegal\n");
+				return -EINVAL;
+			}
+
+			bw_sum += ets->tc_tx_bw[i];
+		}
+	}
+
+	if (bw_sum != 0 && bw_sum != 100) {
+		netdev_err(netdev,
+			   "Failed to validate ETS: BW sum is illegal\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int mlx5e_dcbnl_ieee_setets(struct net_device *netdev,
+				   struct ieee_ets *ets)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+	int err;
+
+	if (!MLX5_CAP_GEN(priv->mdev, ets))
+		return -ENOTSUPP;
+
+	err = mlx5e_dbcnl_validate_ets(netdev, ets);
+	if (err)
+		return err;
+
+	err = mlx5e_dcbnl_ieee_setets_core(priv, ets);
+	if (err)
+		return err;
+
+	return 0;
+}
+
+static int mlx5e_dcbnl_ieee_getpfc(struct net_device *dev,
+				   struct ieee_pfc *pfc)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+	struct mlx5_core_dev *mdev = priv->mdev;
+	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
+	int i;
+
+	pfc->pfc_cap = mlx5_max_tc(mdev) + 1;
+	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+		pfc->requests[i]    = PPORT_PER_PRIO_GET(pstats, i, tx_pause);
+		pfc->indications[i] = PPORT_PER_PRIO_GET(pstats, i, rx_pause);
+	}
+
+	return mlx5_query_port_pfc(mdev, &pfc->pfc_en, NULL);
+}
+
+static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev,
+				   struct ieee_pfc *pfc)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+	struct mlx5_core_dev *mdev = priv->mdev;
+	u8 curr_pfc_en;
+	int ret;
+
+	mlx5_query_port_pfc(mdev, &curr_pfc_en, NULL);
+
+	if (pfc->pfc_en == curr_pfc_en)
+		return 0;
+
+	ret = mlx5_set_port_pfc(mdev, pfc->pfc_en, pfc->pfc_en);
+	mlx5_toggle_port_link(mdev);
+
+	return ret;
+}
+
+static u8 mlx5e_dcbnl_getdcbx(struct net_device *dev)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+	struct mlx5e_dcbx *dcbx = &priv->dcbx;
+	u8 mode = DCB_CAP_DCBX_VER_IEEE | DCB_CAP_DCBX_VER_CEE;
+
+	if (dcbx->mode == MLX5E_DCBX_PARAM_VER_OPER_HOST)
+		mode |= DCB_CAP_DCBX_HOST;
+
+	return mode;
+}
+
+static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+	struct mlx5e_dcbx *dcbx = &priv->dcbx;
+
+	if ((!mode) && MLX5_CAP_GEN(priv->mdev, dcbx)) {
+		if (dcbx->mode == MLX5E_DCBX_PARAM_VER_OPER_AUTO)
+			return 0;
+
+		/* set dcbx to fw controlled */
+		if (!mlx5e_dcbnl_set_dcbx_mode(priv, MLX5E_DCBX_PARAM_VER_OPER_AUTO)) {
+			dcbx->mode = MLX5E_DCBX_PARAM_VER_OPER_AUTO;
+			return 0;
+		}
+
+		return 1;
+	}
+
+	if (mlx5e_dcbnl_switch_to_host_mode(netdev_priv(dev)))
+		return 1;
+
+	if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
+	    !(mode & DCB_CAP_DCBX_VER_CEE) ||
+	    !(mode & DCB_CAP_DCBX_VER_IEEE) ||
+	    !(mode & DCB_CAP_DCBX_HOST))
+		return 1;
+
+	return 0;
+}
+
+static int mlx5e_dcbnl_ieee_getmaxrate(struct net_device *netdev,
+				       struct ieee_maxrate *maxrate)
+{
+	struct mlx5e_priv *priv    = netdev_priv(netdev);
+	struct mlx5_core_dev *mdev = priv->mdev;
+	u8 max_bw_value[IEEE_8021QAZ_MAX_TCS];
+	u8 max_bw_unit[IEEE_8021QAZ_MAX_TCS];
+	int err;
+	int i;
+
+	err = mlx5_query_port_ets_rate_limit(mdev, max_bw_value, max_bw_unit);
+	if (err)
+		return err;
+
+	memset(maxrate->tc_maxrate, 0, sizeof(maxrate->tc_maxrate));
+
+	for (i = 0; i <= mlx5_max_tc(mdev); i++) {
+		switch (max_bw_unit[i]) {
+		case MLX5_100_MBPS_UNIT:
+			maxrate->tc_maxrate[i] = max_bw_value[i] * MLX5E_100MB;
+			break;
+		case MLX5_GBPS_UNIT:
+			maxrate->tc_maxrate[i] = max_bw_value[i] * MLX5E_1GB;
+			break;
+		case MLX5_BW_NO_LIMIT:
+			break;
+		default:
+			WARN(true, "non-supported BW unit");
+			break;
+		}
+	}
+
+	return 0;
+}
+
+static int mlx5e_dcbnl_ieee_setmaxrate(struct net_device *netdev,
+				       struct ieee_maxrate *maxrate)
+{
+	struct mlx5e_priv *priv    = netdev_priv(netdev);
+	struct mlx5_core_dev *mdev = priv->mdev;
+	u8 max_bw_value[IEEE_8021QAZ_MAX_TCS];
+	u8 max_bw_unit[IEEE_8021QAZ_MAX_TCS];
+	__u64 upper_limit_mbps = roundup(255 * MLX5E_100MB, MLX5E_1GB);
+	int i;
+
+	memset(max_bw_value, 0, sizeof(max_bw_value));
+	memset(max_bw_unit, 0, sizeof(max_bw_unit));
+
+	for (i = 0; i <= mlx5_max_tc(mdev); i++) {
+		if (!maxrate->tc_maxrate[i]) {
+			max_bw_unit[i]  = MLX5_BW_NO_LIMIT;
+			continue;
+		}
+		if (maxrate->tc_maxrate[i] < upper_limit_mbps) {
+			max_bw_value[i] = div_u64(maxrate->tc_maxrate[i],
+						  MLX5E_100MB);
+			max_bw_value[i] = max_bw_value[i] ? max_bw_value[i] : 1;
+			max_bw_unit[i]  = MLX5_100_MBPS_UNIT;
+		} else {
+			max_bw_value[i] = div_u64(maxrate->tc_maxrate[i],
+						  MLX5E_1GB);
+			max_bw_unit[i]  = MLX5_GBPS_UNIT;
+		}
+	}
+
+	return mlx5_modify_port_ets_rate_limit(mdev, max_bw_value, max_bw_unit);
+}
+
+static u8 mlx5e_dcbnl_setall(struct net_device *netdev)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+	struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
+	struct mlx5_core_dev *mdev = priv->mdev;
+	struct ieee_ets ets;
+	struct ieee_pfc pfc;
+	int err = -ENOTSUPP;
+	int i;
+
+	if (!MLX5_CAP_GEN(mdev, ets))
+		goto out;
+
+	memset(&ets, 0, sizeof(ets));
+	memset(&pfc, 0, sizeof(pfc));
+
+	ets.ets_cap = IEEE_8021QAZ_MAX_TCS;
+	for (i = 0; i < CEE_DCBX_MAX_PGS; i++) {
+		ets.tc_tx_bw[i] = cee_cfg->pg_bw_pct[i];
+		ets.tc_rx_bw[i] = cee_cfg->pg_bw_pct[i];
+		ets.tc_tsa[i]   = IEEE_8021QAZ_TSA_ETS;
+		ets.prio_tc[i]  = cee_cfg->prio_to_pg_map[i];
+	}
+
+	err = mlx5e_dbcnl_validate_ets(netdev, &ets);
+	if (err) {
+		netdev_err(netdev,
+			   "%s, Failed to validate ETS: %d\n", __func__, err);
+		goto out;
+	}
+
+	err = mlx5e_dcbnl_ieee_setets_core(priv, &ets);
+	if (err) {
+		netdev_err(netdev,
+			   "%s, Failed to set ETS: %d\n", __func__, err);
+		goto out;
+	}
+
+	/* Set PFC */
+	pfc.pfc_cap = mlx5_max_tc(mdev) + 1;
+	if (!cee_cfg->pfc_enable)
+		pfc.pfc_en = 0;
+	else
+		for (i = 0; i < CEE_DCBX_MAX_PRIO; i++)
+			pfc.pfc_en |= cee_cfg->pfc_setting[i] << i;
+
+	err = mlx5e_dcbnl_ieee_setpfc(netdev, &pfc);
+	if (err) {
+		netdev_err(netdev,
+			   "%s, Failed to set PFC: %d\n", __func__, err);
+		goto out;
+	}
+out:
+	return err ? MLX5_DCB_NO_CHG : MLX5_DCB_CHG_RESET;
+}
+
+static u8 mlx5e_dcbnl_getstate(struct net_device *netdev)
+{
+	return MLX5E_CEE_STATE_UP;
+}
+
+static void mlx5e_dcbnl_getpermhwaddr(struct net_device *netdev,
+				      u8 *perm_addr)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+
+	if (!perm_addr)
+		return;
+
+	mlx5_query_nic_vport_mac_address(priv->mdev, 0, perm_addr);
+}
+
+static void mlx5e_dcbnl_setpgtccfgtx(struct net_device *netdev,
+				     int priority, u8 prio_type,
+				     u8 pgid, u8 bw_pct, u8 up_map)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+	struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
+
+	if (priority >= CEE_DCBX_MAX_PRIO) {
+		netdev_err(netdev,
+			   "%s, priority is out of range\n", __func__);
+		return;
+	}
+
+	if (pgid >= CEE_DCBX_MAX_PGS) {
+		netdev_err(netdev,
+			   "%s, priority group is out of range\n", __func__);
+		return;
+	}
+
+	cee_cfg->prio_to_pg_map[priority] = pgid;
+}
+
+static void mlx5e_dcbnl_setpgbwgcfgtx(struct net_device *netdev,
+				      int pgid, u8 bw_pct)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+	struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
+
+	if (pgid >= CEE_DCBX_MAX_PGS) {
+		netdev_err(netdev,
+			   "%s, priority group is out of range\n", __func__);
+		return;
+	}
+
+	cee_cfg->pg_bw_pct[pgid] = bw_pct;
+}
+
+static void mlx5e_dcbnl_getpgtccfgtx(struct net_device *netdev,
+				     int priority, u8 *prio_type,
+				     u8 *pgid, u8 *bw_pct, u8 *up_map)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+	struct mlx5_core_dev *mdev = priv->mdev;
+
+	if (priority >= CEE_DCBX_MAX_PRIO) {
+		netdev_err(netdev,
+			   "%s, priority is out of range\n", __func__);
+		return;
+	}
+
+	*prio_type = 0;
+	*bw_pct = 0;
+	*up_map = 0;
+
+	if (mlx5_query_port_prio_tc(mdev, priority, pgid))
+		*pgid = 0;
+}
+
+static void mlx5e_dcbnl_getpgbwgcfgtx(struct net_device *netdev,
+				      int pgid, u8 *bw_pct)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+	struct mlx5_core_dev *mdev = priv->mdev;
+
+	if (pgid >= CEE_DCBX_MAX_PGS) {
+		netdev_err(netdev,
+			   "%s, priority group is out of range\n", __func__);
+		return;
+	}
+
+	if (mlx5_query_port_tc_bw_alloc(mdev, pgid, bw_pct))
+		*bw_pct = 0;
+}
+
+static void mlx5e_dcbnl_setpfccfg(struct net_device *netdev,
+				  int priority, u8 setting)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+	struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
+
+	if (priority >= CEE_DCBX_MAX_PRIO) {
+		netdev_err(netdev,
+			   "%s, priority is out of range\n", __func__);
+		return;
+	}
+
+	if (setting > 1)
+		return;
+
+	cee_cfg->pfc_setting[priority] = setting;
+}
+
+static int
+mlx5e_dcbnl_get_priority_pfc(struct net_device *netdev,
+			     int priority, u8 *setting)
+{
+	struct ieee_pfc pfc;
+	int err;
+
+	err = mlx5e_dcbnl_ieee_getpfc(netdev, &pfc);
+
+	if (err)
+		*setting = 0;
+	else
+		*setting = (pfc.pfc_en >> priority) & 0x01;
+
+	return err;
+}
+
+static void mlx5e_dcbnl_getpfccfg(struct net_device *netdev,
+				  int priority, u8 *setting)
+{
+	if (priority >= CEE_DCBX_MAX_PRIO) {
+		netdev_err(netdev,
+			   "%s, priority is out of range\n", __func__);
+		return;
+	}
+
+	if (!setting)
+		return;
+
+	mlx5e_dcbnl_get_priority_pfc(netdev, priority, setting);
+}
+
+static u8 mlx5e_dcbnl_getcap(struct net_device *netdev,
+			     int capid, u8 *cap)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+	struct mlx5_core_dev *mdev = priv->mdev;
+	u8 rval = 0;
+
+	switch (capid) {
+	case DCB_CAP_ATTR_PG:
+		*cap = true;
+		break;
+	case DCB_CAP_ATTR_PFC:
+		*cap = true;
+		break;
+	case DCB_CAP_ATTR_UP2TC:
+		*cap = false;
+		break;
+	case DCB_CAP_ATTR_PG_TCS:
+		*cap = 1 << mlx5_max_tc(mdev);
+		break;
+	case DCB_CAP_ATTR_PFC_TCS:
+		*cap = 1 << mlx5_max_tc(mdev);
+		break;
+	case DCB_CAP_ATTR_GSP:
+		*cap = false;
+		break;
+	case DCB_CAP_ATTR_BCN:
+		*cap = false;
+		break;
+	case DCB_CAP_ATTR_DCBX:
+		*cap = (DCB_CAP_DCBX_LLD_MANAGED |
+			DCB_CAP_DCBX_VER_CEE |
+			DCB_CAP_DCBX_STATIC);
+		break;
+	default:
+		*cap = 0;
+		rval = 1;
+		break;
+	}
+
+	return rval;
+}
+
+static int mlx5e_dcbnl_getnumtcs(struct net_device *netdev,
+				 int tcs_id, u8 *num)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+	struct mlx5_core_dev *mdev = priv->mdev;
+
+	switch (tcs_id) {
+	case DCB_NUMTCS_ATTR_PG:
+	case DCB_NUMTCS_ATTR_PFC:
+		*num = mlx5_max_tc(mdev) + 1;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static u8 mlx5e_dcbnl_getpfcstate(struct net_device *netdev)
+{
+	struct ieee_pfc pfc;
+
+	if (mlx5e_dcbnl_ieee_getpfc(netdev, &pfc))
+		return MLX5E_CEE_STATE_DOWN;
+
+	return pfc.pfc_en ? MLX5E_CEE_STATE_UP : MLX5E_CEE_STATE_DOWN;
+}
+
+static void mlx5e_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+	struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
+
+	if ((state != MLX5E_CEE_STATE_UP) && (state != MLX5E_CEE_STATE_DOWN))
+		return;
+
+	cee_cfg->pfc_enable = state;
+}
+
+const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops = {
+	.ieee_getets	= mlx5e_dcbnl_ieee_getets,
+	.ieee_setets	= mlx5e_dcbnl_ieee_setets,
+	.ieee_getmaxrate = mlx5e_dcbnl_ieee_getmaxrate,
+	.ieee_setmaxrate = mlx5e_dcbnl_ieee_setmaxrate,
+	.ieee_getpfc	= mlx5e_dcbnl_ieee_getpfc,
+	.ieee_setpfc	= mlx5e_dcbnl_ieee_setpfc,
+	.getdcbx	= mlx5e_dcbnl_getdcbx,
+	.setdcbx	= mlx5e_dcbnl_setdcbx,
+
+/* CEE interfaces */
+	.setall         = mlx5e_dcbnl_setall,
+	.getstate       = mlx5e_dcbnl_getstate,
+	.getpermhwaddr  = mlx5e_dcbnl_getpermhwaddr,
+
+	.setpgtccfgtx   = mlx5e_dcbnl_setpgtccfgtx,
+	.setpgbwgcfgtx  = mlx5e_dcbnl_setpgbwgcfgtx,
+	.getpgtccfgtx   = mlx5e_dcbnl_getpgtccfgtx,
+	.getpgbwgcfgtx  = mlx5e_dcbnl_getpgbwgcfgtx,
+
+	.setpfccfg      = mlx5e_dcbnl_setpfccfg,
+	.getpfccfg      = mlx5e_dcbnl_getpfccfg,
+	.getcap         = mlx5e_dcbnl_getcap,
+	.getnumtcs      = mlx5e_dcbnl_getnumtcs,
+	.getpfcstate    = mlx5e_dcbnl_getpfcstate,
+	.setpfcstate    = mlx5e_dcbnl_setpfcstate,
+};
+
+static void mlx5e_dcbnl_query_dcbx_mode(struct mlx5e_priv *priv,
+					enum mlx5_dcbx_oper_mode *mode)
+{
+	u32 out[MLX5_ST_SZ_DW(dcbx_param)];
+
+	*mode = MLX5E_DCBX_PARAM_VER_OPER_HOST;
+
+	if (!mlx5_query_port_dcbx_param(priv->mdev, out))
+		*mode = MLX5_GET(dcbx_param, out, version_oper);
+
+	/* From driver's point of view, we only care if the mode
+	 * is host (HOST) or non-host (AUTO)
+	 */
+	if (*mode != MLX5E_DCBX_PARAM_VER_OPER_HOST)
+		*mode = MLX5E_DCBX_PARAM_VER_OPER_AUTO;
+}
+
+static void mlx5e_ets_init(struct mlx5e_priv *priv)
+{
+	int i;
+	struct ieee_ets ets;
+
+	memset(&ets, 0, sizeof(ets));
+	ets.ets_cap = mlx5_max_tc(priv->mdev) + 1;
+	for (i = 0; i < ets.ets_cap; i++) {
+		ets.tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
+		ets.tc_tsa[i] = IEEE_8021QAZ_TSA_VENDOR;
+		ets.prio_tc[i] = i;
+	}
+
+	memcpy(priv->dcbx.tc_tsa, ets.tc_tsa, sizeof(ets.tc_tsa));
+
+	/* tclass[prio=0]=1, tclass[prio=1]=0, tclass[prio=i]=i (for i>1) */
+	ets.prio_tc[0] = 1;
+	ets.prio_tc[1] = 0;
+
+	mlx5e_dcbnl_ieee_setets_core(priv, &ets);
+}
+
+void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv)
+{
+	struct mlx5e_dcbx *dcbx = &priv->dcbx;
+
+	if (MLX5_CAP_GEN(priv->mdev, dcbx))
+		mlx5e_dcbnl_query_dcbx_mode(priv, &dcbx->mode);
+
+	mlx5e_ets_init(priv);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/en/en.h b/drivers/net/ethernet/mellanox/mlx5/en/en.h
new file mode 100644
index 0000000..5f5f209
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/en/en.h
@@ -0,0 +1,932 @@
+/*
+ * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __MLX5_EN_H__
+#define __MLX5_EN_H__
+
+#include <linux/if_vlan.h>
+#include <linux/etherdevice.h>
+#include <linux/timecounter.h>
+#include <linux/net_tstamp.h>
+#include <linux/ptp_clock_kernel.h>
+#include "lib/driver.h"
+#include "lib/qp.h"
+#include "lib/cq.h"
+#include "lib/port.h"
+#include "lib/vport.h"
+#include "lib/transobj.h"
+#include <linux/rhashtable.h>
+#include <net/switchdev.h>
+#include "en/wq.h"
+#include "mlx5.h"
+#include "en/stats.h"
+
+#define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
+
+#define MLX5E_MAX_NUM_TC	8
+
+#define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE                0x6
+#define MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE                0xa
+#define MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE                0xd
+
+#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE                0x1
+#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE                0xa
+#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE                0xd
+
+#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW            0x1
+#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW            0x3
+#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW            0x6
+
+#define MLX5_RX_HEADROOM NET_SKB_PAD
+
+#define MLX5_MPWRQ_LOG_STRIDE_SIZE		6  /* >= 6, HW restriction */
+#define MLX5_MPWRQ_LOG_STRIDE_SIZE_CQE_COMPRESS	8  /* >= 6, HW restriction */
+#define MLX5_MPWRQ_LOG_WQE_SZ			18
+#define MLX5_MPWRQ_WQE_PAGE_ORDER  (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \
+				    MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT : 0)
+#define MLX5_MPWRQ_PAGES_PER_WQE		BIT(MLX5_MPWRQ_WQE_PAGE_ORDER)
+#define MLX5_MPWRQ_STRIDES_PER_PAGE		(MLX5_MPWRQ_NUM_STRIDES >> \
+						 MLX5_MPWRQ_WQE_PAGE_ORDER)
+
+#define MLX5_MTT_OCTW(npages) (ALIGN(npages, 8) / 2)
+#define MLX5E_REQUIRED_MTTS(wqes)		\
+	(wqes * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8))
+#define MLX5E_VALID_NUM_MTTS(num_mtts) (MLX5_MTT_OCTW(num_mtts) - 1 <= U16_MAX)
+
+#define MLX5_UMR_ALIGN				(2048)
+#define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD	(128)
+
+#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ                 (64 * 1024)
+#define MLX5E_DEFAULT_LRO_TIMEOUT                       32
+#define MLX5E_LRO_TIMEOUT_ARR_SIZE                      4
+
+#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC      0x10
+#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE 0x3
+#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS      0x20
+#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC      0x10
+#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS      0x20
+#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES                0x80
+#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW            0x2
+
+#define MLX5E_LOG_INDIR_RQT_SIZE       0x7
+#define MLX5E_INDIR_RQT_SIZE           BIT(MLX5E_LOG_INDIR_RQT_SIZE)
+#define MLX5E_MAX_NUM_CHANNELS         (MLX5E_INDIR_RQT_SIZE >> 1)
+#define MLX5E_MAX_NUM_SQS              (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC)
+#define MLX5E_TX_CQ_POLL_BUDGET        128
+#define MLX5E_UPDATE_STATS_INTERVAL    200 /* msecs */
+#define MLX5E_SQ_BF_BUDGET             16
+
+#define MLX5E_ICOSQ_MAX_WQEBBS \
+	(DIV_ROUND_UP(sizeof(struct mlx5e_umr_wqe), MLX5_SEND_WQE_BB))
+
+#define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
+#define MLX5E_XDP_IHS_DS_COUNT \
+	DIV_ROUND_UP(MLX5E_XDP_MIN_INLINE - 2, MLX5_SEND_WQE_DS)
+#define MLX5E_XDP_TX_DS_COUNT \
+	(MLX5E_XDP_IHS_DS_COUNT + \
+	 (sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) + 1 /* SG DS */)
+#define MLX5E_XDP_TX_WQEBBS \
+	DIV_ROUND_UP(MLX5E_XDP_TX_DS_COUNT, MLX5_SEND_WQEBB_NUM_DS)
+
+#define MLX5E_NUM_MAIN_GROUPS 9
+
+static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size)
+{
+	switch (wq_type) {
+	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+		return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW,
+			     wq_size / 2);
+	default:
+		return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES,
+			     wq_size / 2);
+	}
+}
+
+static inline int mlx5_min_log_rq_size(int wq_type)
+{
+	switch (wq_type) {
+	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+		return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW;
+	default:
+		return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE;
+	}
+}
+
+static inline int mlx5_max_log_rq_size(int wq_type)
+{
+	switch (wq_type) {
+	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+		return MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW;
+	default:
+		return MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE;
+	}
+}
+
+struct mlx5e_tx_wqe {
+	struct mlx5_wqe_ctrl_seg ctrl;
+	struct mlx5_wqe_eth_seg  eth;
+};
+
+struct mlx5e_rx_wqe {
+	struct mlx5_wqe_srq_next_seg  next;
+	struct mlx5_wqe_data_seg      data;
+};
+
+struct mlx5e_umr_wqe {
+	struct mlx5_wqe_ctrl_seg       ctrl;
+	struct mlx5_wqe_umr_ctrl_seg   uctrl;
+	struct mlx5_mkey_seg           mkc;
+	struct mlx5_wqe_data_seg       data;
+};
+
+extern const char mlx5e_self_tests[][ETH_GSTRING_LEN];
+
+static const char mlx5e_priv_flags[][ETH_GSTRING_LEN] = {
+	"rx_cqe_moder",
+	"rx_cqe_compress",
+};
+
+enum mlx5e_priv_flag {
+	MLX5E_PFLAG_RX_CQE_BASED_MODER = (1 << 0),
+	MLX5E_PFLAG_RX_CQE_COMPRESS = (1 << 1),
+};
+
+#define MLX5E_SET_PFLAG(priv, pflag, enable)			\
+	do {							\
+		if (enable)					\
+			(priv)->params.pflags |= (pflag);	\
+		else						\
+			(priv)->params.pflags &= ~(pflag);	\
+	} while (0)
+
+#define MLX5E_GET_PFLAG(priv, pflag) (!!((priv)->params.pflags & (pflag)))
+
+#ifdef CONFIG_MLX5_CORE_EN_DCB
+#define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */
+#endif
+
+struct mlx5e_cq_moder {
+	u16 usec;
+	u16 pkts;
+};
+
+struct mlx5e_params {
+	u8  log_sq_size;
+	u8  rq_wq_type;
+	u8  mpwqe_log_stride_sz;
+	u8  mpwqe_log_num_strides;
+	u8  log_rq_size;
+	u16 num_channels;
+	u8  num_tc;
+	u8  rx_cq_period_mode;
+	bool rx_cqe_compress_def;
+	struct mlx5e_cq_moder rx_cq_moderation;
+	struct mlx5e_cq_moder tx_cq_moderation;
+	u16 min_rx_wqes;
+	bool lro_en;
+	u32 lro_wqe_sz;
+	u16 tx_max_inline;
+	u8  tx_min_inline_mode;
+	u8  rss_hfunc;
+	u8  toeplitz_hash_key[40];
+	u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE];
+	bool vlan_strip_disable;
+	bool rx_am_enabled;
+	u32 lro_timeout;
+	u32 pflags;
+};
+
+#ifdef CONFIG_MLX5_CORE_EN_DCB
+struct mlx5e_cee_config {
+	/* bw pct for priority group */
+	u8                         pg_bw_pct[CEE_DCBX_MAX_PGS];
+	u8                         prio_to_pg_map[CEE_DCBX_MAX_PRIO];
+	bool                       pfc_setting[CEE_DCBX_MAX_PRIO];
+	bool                       pfc_enable;
+};
+
+enum {
+	MLX5_DCB_CHG_RESET,
+	MLX5_DCB_NO_CHG,
+	MLX5_DCB_CHG_NO_RESET,
+};
+
+struct mlx5e_dcbx {
+	enum mlx5_dcbx_oper_mode   mode;
+	struct mlx5e_cee_config    cee_cfg; /* pending configuration */
+
+	/* The only setting that cannot be read from FW */
+	u8                         tc_tsa[IEEE_8021QAZ_MAX_TCS];
+};
+#endif
+
+struct mlx5e_tstamp {
+	rwlock_t                   lock;
+	struct cyclecounter        cycles;
+	struct timecounter         clock;
+	struct hwtstamp_config     hwtstamp_config;
+	u32                        nominal_c_mult;
+	unsigned long              overflow_period;
+	struct delayed_work        overflow_work;
+	struct mlx5_core_dev      *mdev;
+	struct ptp_clock          *ptp;
+	struct ptp_clock_info      ptp_info;
+};
+
+enum {
+	MLX5E_RQ_STATE_ENABLED,
+	MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS,
+	MLX5E_RQ_STATE_AM,
+};
+
+struct mlx5e_cq {
+	/* data path - accessed per cqe */
+	struct mlx5_cqwq           wq;
+
+	/* data path - accessed per napi poll */
+	u16                        event_ctr;
+	struct napi_struct        *napi;
+	struct mlx5_core_cq        mcq;
+	struct mlx5e_channel      *channel;
+	struct mlx5e_priv         *priv;
+
+	/* cqe decompression */
+	struct mlx5_cqe64          title;
+	struct mlx5_mini_cqe8      mini_arr[MLX5_MINI_CQE_ARRAY_SIZE];
+	u8                         mini_arr_idx;
+	u16                        decmprs_left;
+	u16                        decmprs_wqe_counter;
+
+	/* control */
+	struct mlx5_frag_wq_ctrl   wq_ctrl;
+} ____cacheline_aligned_in_smp;
+
+struct mlx5e_rq;
+typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq *rq,
+				       struct mlx5_cqe64 *cqe);
+typedef int (*mlx5e_fp_alloc_wqe)(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe,
+				  u16 ix);
+
+typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq *rq, u16 ix);
+
+struct mlx5e_dma_info {
+	struct page	*page;
+	dma_addr_t	addr;
+};
+
+struct mlx5e_rx_am_stats {
+	int ppms; /* packets per msec */
+	int epms; /* events per msec */
+};
+
+struct mlx5e_rx_am_sample {
+	ktime_t		time;
+	unsigned int	pkt_ctr;
+	u16		event_ctr;
+};
+
+struct mlx5e_rx_am { /* Adaptive Moderation */
+	u8					state;
+	struct mlx5e_rx_am_stats		prev_stats;
+	struct mlx5e_rx_am_sample		start_sample;
+	struct work_struct			work;
+	u8					profile_ix;
+	u8					mode;
+	u8					tune_state;
+	u8					steps_right;
+	u8					steps_left;
+	u8					tired;
+};
+
+/* a single cache unit is capable to serve one napi call (for non-striding rq)
+ * or a MPWQE (for striding rq).
+ */
+#define MLX5E_CACHE_UNIT	(MLX5_MPWRQ_PAGES_PER_WQE > NAPI_POLL_WEIGHT ? \
+				 MLX5_MPWRQ_PAGES_PER_WQE : NAPI_POLL_WEIGHT)
+#define MLX5E_CACHE_SIZE	(2 * roundup_pow_of_two(MLX5E_CACHE_UNIT))
+struct mlx5e_page_cache {
+	u32 head;
+	u32 tail;
+	struct mlx5e_dma_info page_cache[MLX5E_CACHE_SIZE];
+};
+
+struct mlx5e_rq {
+	/* data path */
+	struct mlx5_wq_ll      wq;
+
+	union {
+		struct mlx5e_dma_info *dma_info;
+		struct {
+			struct mlx5e_mpw_info *info;
+			void                  *mtt_no_align;
+		} mpwqe;
+	};
+	struct {
+		u8             page_order;
+		u32            wqe_sz;    /* wqe data buffer size */
+		u8             map_dir;   /* dma map direction */
+	} buff;
+	__be32                 mkey_be;
+
+	struct device         *pdev;
+	struct net_device     *netdev;
+	struct mlx5e_tstamp   *tstamp;
+	struct mlx5e_rq_stats  stats;
+	struct mlx5e_cq        cq;
+	struct mlx5e_page_cache page_cache;
+
+	mlx5e_fp_handle_rx_cqe handle_rx_cqe;
+	mlx5e_fp_alloc_wqe     alloc_wqe;
+	mlx5e_fp_dealloc_wqe   dealloc_wqe;
+
+	unsigned long          state;
+	int                    ix;
+
+	struct mlx5e_rx_am     am; /* Adaptive Moderation */
+	struct bpf_prog       *xdp_prog;
+
+	/* control */
+	struct mlx5_wq_ctrl    wq_ctrl;
+	u8                     wq_type;
+	u32                    mpwqe_stride_sz;
+	u32                    mpwqe_num_strides;
+	u32                    rqn;
+	struct mlx5e_channel  *channel;
+	struct mlx5e_priv     *priv;
+	struct mlx5_core_mkey  umr_mkey;
+} ____cacheline_aligned_in_smp;
+
+struct mlx5e_umr_dma_info {
+	__be64                *mtt;
+	dma_addr_t             mtt_addr;
+	struct mlx5e_dma_info  dma_info[MLX5_MPWRQ_PAGES_PER_WQE];
+	struct mlx5e_umr_wqe   wqe;
+};
+
+struct mlx5e_mpw_info {
+	struct mlx5e_umr_dma_info umr;
+	u16 consumed_strides;
+	u16 skbs_frags[MLX5_MPWRQ_PAGES_PER_WQE];
+};
+
+struct mlx5e_tx_wqe_info {
+	u32 num_bytes;
+	u8  num_wqebbs;
+	u8  num_dma;
+};
+
+enum mlx5e_dma_map_type {
+	MLX5E_DMA_MAP_SINGLE,
+	MLX5E_DMA_MAP_PAGE
+};
+
+struct mlx5e_sq_dma {
+	dma_addr_t              addr;
+	u32                     size;
+	enum mlx5e_dma_map_type type;
+};
+
+enum {
+	MLX5E_SQ_STATE_ENABLED,
+	MLX5E_SQ_STATE_BF_ENABLE,
+};
+
+struct mlx5e_sq_wqe_info {
+	u8  opcode;
+	u8  num_wqebbs;
+};
+
+enum mlx5e_sq_type {
+	MLX5E_SQ_TXQ,
+	MLX5E_SQ_ICO,
+	MLX5E_SQ_XDP
+};
+
+struct mlx5e_sq {
+	/* data path */
+
+	/* dirtied @completion */
+	u16                        cc;
+	u32                        dma_fifo_cc;
+
+	/* dirtied @xmit */
+	u16                        pc ____cacheline_aligned_in_smp;
+	u32                        dma_fifo_pc;
+	u16                        bf_offset;
+	u16                        prev_cc;
+	u8                         bf_budget;
+	struct mlx5e_sq_stats      stats;
+
+	struct mlx5e_cq            cq;
+
+	/* pointers to per tx element info: write@...t, read@...pletion */
+	union {
+		struct {
+			struct sk_buff           **skb;
+			struct mlx5e_sq_dma       *dma_fifo;
+			struct mlx5e_tx_wqe_info  *wqe_info;
+		} txq;
+		struct mlx5e_sq_wqe_info *ico_wqe;
+		struct {
+			struct mlx5e_sq_wqe_info  *wqe_info;
+			struct mlx5e_dma_info     *di;
+			bool                       doorbell;
+		} xdp;
+	} db;
+
+	/* read only */
+	struct mlx5_wq_cyc         wq;
+	u32                        dma_fifo_mask;
+	struct netdev_queue       *txq;
+	u32                        sqn;
+	u16                        bf_buf_size;
+	u16                        max_inline;
+	u8                         min_inline_mode;
+	u16                        edge;
+	struct device             *pdev;
+	struct mlx5e_tstamp       *tstamp;
+	__be32                     mkey_be;
+	unsigned long              state;
+
+	/* control path */
+	struct mlx5_wq_ctrl        wq_ctrl;
+	struct mlx5_sq_bfreg	   bfreg;
+	struct mlx5e_channel      *channel;
+	int                        tc;
+	u32                        rate_limit;
+	u8                         type;
+} ____cacheline_aligned_in_smp;
+
+static inline bool mlx5e_sq_has_room_for(struct mlx5e_sq *sq, u16 n)
+{
+	return (((sq->wq.sz_m1 & (sq->cc - sq->pc)) >= n) ||
+		(sq->cc  == sq->pc));
+}
+
+enum channel_flags {
+	MLX5E_CHANNEL_NAPI_SCHED = 1,
+};
+
+struct mlx5e_channel {
+	/* data path */
+	struct mlx5e_rq            rq;
+	struct mlx5e_sq            xdp_sq;
+	struct mlx5e_sq            sq[MLX5E_MAX_NUM_TC];
+	struct mlx5e_sq            icosq;   /* internal control operations */
+	bool                       xdp;
+	struct napi_struct         napi;
+	struct device             *pdev;
+	struct net_device         *netdev;
+	__be32                     mkey_be;
+	u8                         num_tc;
+	unsigned long              flags;
+
+	/* control */
+	struct mlx5e_priv         *priv;
+	int                        ix;
+	int                        cpu;
+};
+
+enum mlx5e_traffic_types {
+	MLX5E_TT_IPV4_TCP,
+	MLX5E_TT_IPV6_TCP,
+	MLX5E_TT_IPV4_UDP,
+	MLX5E_TT_IPV6_UDP,
+	MLX5E_TT_IPV4_IPSEC_AH,
+	MLX5E_TT_IPV6_IPSEC_AH,
+	MLX5E_TT_IPV4_IPSEC_ESP,
+	MLX5E_TT_IPV6_IPSEC_ESP,
+	MLX5E_TT_IPV4,
+	MLX5E_TT_IPV6,
+	MLX5E_TT_ANY,
+	MLX5E_NUM_TT,
+	MLX5E_NUM_INDIR_TIRS = MLX5E_TT_ANY,
+};
+
+enum {
+	MLX5E_STATE_ASYNC_EVENTS_ENABLED,
+	MLX5E_STATE_OPENED,
+	MLX5E_STATE_DESTROYING,
+};
+
+struct mlx5e_vxlan_db {
+	spinlock_t			lock; /* protect vxlan table */
+	struct radix_tree_root		tree;
+};
+
+struct mlx5e_l2_rule {
+	u8  addr[ETH_ALEN + 2];
+	struct mlx5_flow_handle *rule;
+};
+
+struct mlx5e_flow_table {
+	int num_groups;
+	struct mlx5_flow_table *t;
+	struct mlx5_flow_group **g;
+};
+
+#define MLX5E_L2_ADDR_HASH_SIZE BIT(BITS_PER_BYTE)
+
+struct mlx5e_tc_table {
+	struct mlx5_flow_table		*t;
+
+	struct rhashtable_params        ht_params;
+	struct rhashtable               ht;
+};
+
+struct mlx5e_vlan_table {
+	struct mlx5e_flow_table		ft;
+	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+	struct mlx5_flow_handle	*active_vlans_rule[VLAN_N_VID];
+	struct mlx5_flow_handle	*untagged_rule;
+	struct mlx5_flow_handle	*any_vlan_rule;
+	bool		filter_disabled;
+};
+
+struct mlx5e_l2_table {
+	struct mlx5e_flow_table    ft;
+	struct hlist_head          netdev_uc[MLX5E_L2_ADDR_HASH_SIZE];
+	struct hlist_head          netdev_mc[MLX5E_L2_ADDR_HASH_SIZE];
+	struct mlx5e_l2_rule	   broadcast;
+	struct mlx5e_l2_rule	   allmulti;
+	struct mlx5e_l2_rule	   promisc;
+	bool                       broadcast_enabled;
+	bool                       allmulti_enabled;
+	bool                       promisc_enabled;
+};
+
+/* L3/L4 traffic type classifier */
+struct mlx5e_ttc_table {
+	struct mlx5e_flow_table  ft;
+	struct mlx5_flow_handle	 *rules[MLX5E_NUM_TT];
+};
+
+#define ARFS_HASH_SHIFT BITS_PER_BYTE
+#define ARFS_HASH_SIZE BIT(BITS_PER_BYTE)
+struct arfs_table {
+	struct mlx5e_flow_table  ft;
+	struct mlx5_flow_handle	 *default_rule;
+	struct hlist_head	 rules_hash[ARFS_HASH_SIZE];
+};
+
+enum  arfs_type {
+	ARFS_IPV4_TCP,
+	ARFS_IPV6_TCP,
+	ARFS_IPV4_UDP,
+	ARFS_IPV6_UDP,
+	ARFS_NUM_TYPES,
+};
+
+struct mlx5e_arfs_tables {
+	struct arfs_table arfs_tables[ARFS_NUM_TYPES];
+	/* Protect aRFS rules list */
+	spinlock_t                     arfs_lock;
+	struct list_head               rules;
+	int                            last_filter_id;
+	struct workqueue_struct        *wq;
+};
+
+/* NIC prio FTS */
+enum {
+	MLX5E_VLAN_FT_LEVEL = 0,
+	MLX5E_L2_FT_LEVEL,
+	MLX5E_TTC_FT_LEVEL,
+	MLX5E_ARFS_FT_LEVEL
+};
+
+struct mlx5e_ethtool_table {
+	struct mlx5_flow_table *ft;
+	int                    num_rules;
+};
+
+#define ETHTOOL_NUM_L3_L4_FTS 7
+#define ETHTOOL_NUM_L2_FTS 4
+
+struct mlx5e_ethtool_steering {
+	struct mlx5e_ethtool_table      l3_l4_ft[ETHTOOL_NUM_L3_L4_FTS];
+	struct mlx5e_ethtool_table      l2_ft[ETHTOOL_NUM_L2_FTS];
+	struct list_head                rules;
+	int                             tot_num_rules;
+};
+
+struct mlx5e_flow_steering {
+	struct mlx5_flow_namespace      *ns;
+	struct mlx5e_ethtool_steering   ethtool;
+	struct mlx5e_tc_table           tc;
+	struct mlx5e_vlan_table         vlan;
+	struct mlx5e_l2_table           l2;
+	struct mlx5e_ttc_table          ttc;
+	struct mlx5e_arfs_tables        arfs;
+};
+
+struct mlx5e_rqt {
+	u32              rqtn;
+	bool		 enabled;
+};
+
+struct mlx5e_tir {
+	u32		  tirn;
+	struct mlx5e_rqt  rqt;
+	struct list_head  list;
+};
+
+enum {
+	MLX5E_TC_PRIO = 0,
+	MLX5E_NIC_PRIO
+};
+
+struct mlx5e_profile {
+	void	(*init)(struct mlx5_core_dev *mdev,
+			struct net_device *netdev,
+			const struct mlx5e_profile *profile, void *ppriv);
+	void	(*cleanup)(struct mlx5e_priv *priv);
+	int	(*init_rx)(struct mlx5e_priv *priv);
+	void	(*cleanup_rx)(struct mlx5e_priv *priv);
+	int	(*init_tx)(struct mlx5e_priv *priv);
+	void	(*cleanup_tx)(struct mlx5e_priv *priv);
+	void	(*enable)(struct mlx5e_priv *priv);
+	void	(*disable)(struct mlx5e_priv *priv);
+	void	(*update_stats)(struct mlx5e_priv *priv);
+	int	(*max_nch)(struct mlx5_core_dev *mdev);
+	int	max_tc;
+};
+
+struct mlx5e_priv {
+	/* priv data path fields - start */
+	struct mlx5e_sq            **txq_to_sq_map;
+	int channeltc_to_txq_map[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC];
+	struct bpf_prog *xdp_prog;
+	/* priv data path fields - end */
+
+	unsigned long              state;
+	struct mutex               state_lock; /* Protects Interface state */
+	struct mlx5e_rq            drop_rq;
+
+	struct mlx5e_channel     **channel;
+	u32                        tisn[MLX5E_MAX_NUM_TC];
+	struct mlx5e_rqt           indir_rqt;
+	struct mlx5e_tir           indir_tir[MLX5E_NUM_INDIR_TIRS];
+	struct mlx5e_tir           direct_tir[MLX5E_MAX_NUM_CHANNELS];
+	u32                        tx_rates[MLX5E_MAX_NUM_SQS];
+
+	struct mlx5e_flow_steering fs;
+	struct mlx5e_vxlan_db      vxlan;
+
+	struct mlx5e_params        params;
+	struct workqueue_struct    *wq;
+	struct work_struct         update_carrier_work;
+	struct work_struct         set_rx_mode_work;
+	struct work_struct         tx_timeout_work;
+	struct delayed_work        update_stats_work;
+
+	struct mlx5_core_dev      *mdev;
+	struct net_device         *netdev;
+	struct mlx5e_stats         stats;
+	struct mlx5e_tstamp        tstamp;
+	u16 q_counter;
+#ifdef CONFIG_MLX5_CORE_EN_DCB
+	struct mlx5e_dcbx          dcbx;
+#endif
+
+	const struct mlx5e_profile *profile;
+	void                      *ppriv;
+};
+
+void mlx5e_build_ptys2ethtool_map(void);
+
+void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw);
+u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
+		       void *accel_priv, select_queue_fallback_t fallback);
+netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
+
+void mlx5e_completion_event(struct mlx5_core_cq *mcq);
+void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
+int mlx5e_napi_poll(struct napi_struct *napi, int budget);
+bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
+int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
+void mlx5e_free_sq_descs(struct mlx5e_sq *sq);
+
+void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info,
+			bool recycle);
+void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
+void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
+bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq);
+int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix);
+int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe,	u16 ix);
+void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix);
+void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
+void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq);
+void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi);
+struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq);
+
+void mlx5e_rx_am(struct mlx5e_rq *rq);
+void mlx5e_rx_am_work(struct work_struct *work);
+struct mlx5e_cq_moder mlx5e_am_get_def_profile(u8 rx_cq_period_mode);
+
+void mlx5e_update_stats(struct mlx5e_priv *priv);
+
+int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
+void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv);
+void mlx5e_init_l2_addr(struct mlx5e_priv *priv);
+void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft);
+int mlx5e_self_test_num(struct mlx5e_priv *priv);
+void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest,
+		     u64 *buf);
+int mlx5e_ethtool_get_flow(struct mlx5e_priv *priv, struct ethtool_rxnfc *info,
+			   int location);
+int mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv,
+				struct ethtool_rxnfc *info, u32 *rule_locs);
+int mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
+			       struct ethtool_rx_flow_spec *fs);
+int mlx5e_ethtool_flow_remove(struct mlx5e_priv *priv,
+			      int location);
+void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv);
+void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv);
+void mlx5e_set_rx_mode_work(struct work_struct *work);
+
+void mlx5e_fill_hwstamp(struct mlx5e_tstamp *clock, u64 timestamp,
+			struct skb_shared_hwtstamps *hwts);
+void mlx5e_timestamp_init(struct mlx5e_priv *priv);
+void mlx5e_timestamp_cleanup(struct mlx5e_priv *priv);
+int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr);
+int mlx5e_hwstamp_get(struct net_device *dev, struct ifreq *ifr);
+void mlx5e_modify_rx_cqe_compression(struct mlx5e_priv *priv, bool val);
+
+int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
+			  u16 vid);
+int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
+			   u16 vid);
+void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv);
+void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv);
+
+int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd);
+
+int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix);
+void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv);
+
+int mlx5e_open_locked(struct net_device *netdev);
+int mlx5e_close_locked(struct net_device *netdev);
+void mlx5e_build_default_indir_rqt(struct mlx5_core_dev *mdev,
+				   u32 *indirection_rqt, int len,
+				   int num_channels);
+int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
+
+void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params,
+				 u8 cq_period_mode);
+
+static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
+				      struct mlx5_wqe_ctrl_seg *ctrl, int bf_sz)
+{
+	u16 ofst = sq->bf_offset;
+
+	/* ensure wqe is visible to device before updating doorbell record */
+	dma_wmb();
+
+	*sq->wq.db = cpu_to_be32(sq->pc);
+
+	/* ensure doorbell record is visible to device before ringing the
+	 * doorbell
+	 */
+	wmb();
+	if (bf_sz)
+		__iowrite64_copy(sq->bfreg.map + ofst, ctrl, bf_sz);
+	else
+		mlx5_write64((__be32 *)ctrl, sq->bfreg.map + ofst, NULL);
+	/* flush the write-combining mapped buffer */
+	wmb();
+
+	sq->bf_offset ^= sq->bf_buf_size;
+}
+
+static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
+{
+	struct mlx5_core_cq *mcq;
+
+	mcq = &cq->mcq;
+	mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, cq->wq.cc);
+}
+
+static inline u32 mlx5e_get_wqe_mtt_offset(struct mlx5e_rq *rq, u16 wqe_ix)
+{
+	return wqe_ix * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8);
+}
+
+static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
+{
+	return min_t(int, mdev->priv.eq_table.num_comp_vectors,
+		     MLX5E_MAX_NUM_CHANNELS);
+}
+
+extern const struct ethtool_ops mlx5e_ethtool_ops;
+#ifdef CONFIG_MLX5_CORE_EN_DCB
+extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops;
+int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets);
+void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv);
+#endif
+
+#ifndef CONFIG_RFS_ACCEL
+static inline int mlx5e_arfs_create_tables(struct mlx5e_priv *priv)
+{
+	return 0;
+}
+
+static inline void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv) {}
+
+static inline int mlx5e_arfs_enable(struct mlx5e_priv *priv)
+{
+	return -ENOTSUPP;
+}
+
+static inline int mlx5e_arfs_disable(struct mlx5e_priv *priv)
+{
+	return -ENOTSUPP;
+}
+#else
+int mlx5e_arfs_create_tables(struct mlx5e_priv *priv);
+void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv);
+int mlx5e_arfs_enable(struct mlx5e_priv *priv);
+int mlx5e_arfs_disable(struct mlx5e_priv *priv);
+int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
+			u16 rxq_index, u32 flow_id);
+#endif
+
+u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev);
+int mlx5e_create_tir(struct mlx5_core_dev *mdev,
+		     struct mlx5e_tir *tir, u32 *in, int inlen);
+void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
+		       struct mlx5e_tir *tir);
+int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev);
+void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev);
+int mlx5e_refresh_tirs_self_loopback(struct mlx5_core_dev *mdev,
+				     bool enable_uc_lb);
+
+struct mlx5_eswitch_rep;
+int mlx5e_vport_rep_load(struct mlx5_eswitch *esw,
+			 struct mlx5_eswitch_rep *rep);
+void mlx5e_vport_rep_unload(struct mlx5_eswitch *esw,
+			    struct mlx5_eswitch_rep *rep);
+int mlx5e_nic_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep);
+void mlx5e_nic_rep_unload(struct mlx5_eswitch *esw,
+			  struct mlx5_eswitch_rep *rep);
+int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv);
+void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv);
+int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr);
+void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
+void mlx5e_update_hw_rep_counters(struct mlx5e_priv *priv);
+
+int mlx5e_create_direct_rqts(struct mlx5e_priv *priv);
+void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt);
+int mlx5e_create_direct_tirs(struct mlx5e_priv *priv);
+void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv);
+int mlx5e_create_tises(struct mlx5e_priv *priv);
+void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv);
+int mlx5e_close(struct net_device *netdev);
+int mlx5e_open(struct net_device *netdev);
+void mlx5e_update_stats_work(struct work_struct *work);
+struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
+				       const struct mlx5e_profile *profile,
+				       void *ppriv);
+void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv);
+int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev);
+void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev);
+u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout);
+void mlx5e_add_vxlan_port(struct net_device *netdev,
+			  struct udp_tunnel_info *ti);
+void mlx5e_del_vxlan_port(struct net_device *netdev,
+			  struct udp_tunnel_info *ti);
+
+int mlx5e_get_offload_stats(int attr_id, const struct net_device *dev,
+			    void *sp);
+bool mlx5e_has_offload_stats(const struct net_device *dev, int attr_id);
+
+bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv);
+bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv);
+#endif /* __MLX5_EN_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/en/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/en/ethtool.c
new file mode 100644
index 0000000..0cae50d
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/en/ethtool.c
@@ -0,0 +1,1608 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "en/en.h"
+
+static void mlx5e_get_drvinfo(struct net_device *dev,
+			      struct ethtool_drvinfo *drvinfo)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+	struct mlx5_core_dev *mdev = priv->mdev;
+
+	strlcpy(drvinfo->driver, DRIVER_NAME, sizeof(drvinfo->driver));
+	strlcpy(drvinfo->version, DRIVER_VERSION " (" DRIVER_RELDATE ")",
+		sizeof(drvinfo->version));
+	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+		 "%d.%d.%d",
+		 fw_rev_maj(mdev), fw_rev_min(mdev), fw_rev_sub(mdev));
+	strlcpy(drvinfo->bus_info, pci_name(mdev->pdev),
+		sizeof(drvinfo->bus_info));
+}
+
+struct ptys2ethtool_config {
+	__ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
+	__ETHTOOL_DECLARE_LINK_MODE_MASK(advertised);
+	u32 speed;
+};
+
+static struct ptys2ethtool_config ptys2ethtool_table[MLX5E_LINK_MODES_NUMBER];
+
+#define MLX5_BUILD_PTYS2ETHTOOL_CONFIG(reg_, speed_, ...)               \
+	({                                                              \
+		struct ptys2ethtool_config *cfg;                        \
+		const unsigned int modes[] = { __VA_ARGS__ };           \
+		unsigned int i;                                         \
+		cfg = &ptys2ethtool_table[reg_];                        \
+		cfg->speed = speed_;                                    \
+		bitmap_zero(cfg->supported,                             \
+			    __ETHTOOL_LINK_MODE_MASK_NBITS);            \
+		bitmap_zero(cfg->advertised,                            \
+			    __ETHTOOL_LINK_MODE_MASK_NBITS);            \
+		for (i = 0 ; i < ARRAY_SIZE(modes) ; ++i) {             \
+			__set_bit(modes[i], cfg->supported);            \
+			__set_bit(modes[i], cfg->advertised);           \
+		}                                                       \
+	})
+
+void mlx5e_build_ptys2ethtool_map(void)
+{
+	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1000BASE_CX_SGMII, SPEED_1000,
+				       ETHTOOL_LINK_MODE_1000baseKX_Full_BIT);
+	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1000BASE_KX, SPEED_1000,
+				       ETHTOOL_LINK_MODE_1000baseKX_Full_BIT);
+	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_CX4, SPEED_10000,
+				       ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT);
+	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_KX4, SPEED_10000,
+				       ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT);
+	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_KR, SPEED_10000,
+				       ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
+	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_20GBASE_KR2, SPEED_20000,
+				       ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT);
+	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_CR4, SPEED_40000,
+				       ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT);
+	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_KR4, SPEED_40000,
+				       ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT);
+	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_56GBASE_R4, SPEED_56000,
+				       ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT);
+	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_CR, SPEED_10000,
+				       ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
+	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_SR, SPEED_10000,
+				       ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
+	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_ER, SPEED_10000,
+				       ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
+	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_SR4, SPEED_40000,
+				       ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT);
+	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_LR4, SPEED_40000,
+				       ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT);
+	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_SR2, SPEED_50000,
+				       ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT);
+	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_CR4, SPEED_100000,
+				       ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT);
+	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_SR4, SPEED_100000,
+				       ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT);
+	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_KR4, SPEED_100000,
+				       ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT);
+	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_LR4, SPEED_100000,
+				       ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT);
+	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_T, SPEED_10000,
+				       ETHTOOL_LINK_MODE_10000baseT_Full_BIT);
+	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_CR, SPEED_25000,
+				       ETHTOOL_LINK_MODE_25000baseCR_Full_BIT);
+	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_KR, SPEED_25000,
+				       ETHTOOL_LINK_MODE_25000baseKR_Full_BIT);
+	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_SR, SPEED_25000,
+				       ETHTOOL_LINK_MODE_25000baseSR_Full_BIT);
+	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_CR2, SPEED_50000,
+				       ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT);
+	MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_KR2, SPEED_50000,
+				       ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT);
+}
+
+static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv)
+{
+	struct mlx5_core_dev *mdev = priv->mdev;
+	u8 pfc_en_tx;
+	u8 pfc_en_rx;
+	int err;
+
+	err = mlx5_query_port_pfc(mdev, &pfc_en_tx, &pfc_en_rx);
+
+	return err ? 0 : pfc_en_tx | pfc_en_rx;
+}
+
+static bool mlx5e_query_global_pause_combined(struct mlx5e_priv *priv)
+{
+	struct mlx5_core_dev *mdev = priv->mdev;
+	u32 rx_pause;
+	u32 tx_pause;
+	int err;
+
+	err = mlx5_query_port_pause(mdev, &rx_pause, &tx_pause);
+
+	return err ? false : rx_pause | tx_pause;
+}
+
+#define MLX5E_NUM_Q_CNTRS(priv) (NUM_Q_COUNTERS * (!!priv->q_counter))
+#define MLX5E_NUM_RQ_STATS(priv) \
+	(NUM_RQ_STATS * priv->params.num_channels * \
+	 test_bit(MLX5E_STATE_OPENED, &priv->state))
+#define MLX5E_NUM_SQ_STATS(priv) \
+	(NUM_SQ_STATS * priv->params.num_channels * priv->params.num_tc * \
+	 test_bit(MLX5E_STATE_OPENED, &priv->state))
+#define MLX5E_NUM_PFC_COUNTERS(priv) \
+	((mlx5e_query_global_pause_combined(priv) + hweight8(mlx5e_query_pfc_combined(priv))) * \
+	  NUM_PPORT_PER_PRIO_PFC_COUNTERS)
+
+static int mlx5e_get_sset_count(struct net_device *dev, int sset)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+
+	switch (sset) {
+	case ETH_SS_STATS:
+		return NUM_SW_COUNTERS +
+		       MLX5E_NUM_Q_CNTRS(priv) +
+		       NUM_VPORT_COUNTERS + NUM_PPORT_COUNTERS +
+		       NUM_PCIE_COUNTERS +
+		       MLX5E_NUM_RQ_STATS(priv) +
+		       MLX5E_NUM_SQ_STATS(priv) +
+		       MLX5E_NUM_PFC_COUNTERS(priv) +
+		       ARRAY_SIZE(mlx5e_pme_status_desc) +
+		       ARRAY_SIZE(mlx5e_pme_error_desc);
+
+	case ETH_SS_PRIV_FLAGS:
+		return ARRAY_SIZE(mlx5e_priv_flags);
+	case ETH_SS_TEST:
+		return mlx5e_self_test_num(priv);
+	/* fallthrough */
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data)
+{
+	int i, j, tc, prio, idx = 0;
+	unsigned long pfc_combined;
+
+	/* SW counters */
+	for (i = 0; i < NUM_SW_COUNTERS; i++)
+		strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].format);
+
+	/* Q counters */
+	for (i = 0; i < MLX5E_NUM_Q_CNTRS(priv); i++)
+		strcpy(data + (idx++) * ETH_GSTRING_LEN, q_stats_desc[i].format);
+
+	/* VPORT counters */
+	for (i = 0; i < NUM_VPORT_COUNTERS; i++)
+		strcpy(data + (idx++) * ETH_GSTRING_LEN,
+		       vport_stats_desc[i].format);
+
+	/* PPORT counters */
+	for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
+		strcpy(data + (idx++) * ETH_GSTRING_LEN,
+		       pport_802_3_stats_desc[i].format);
+
+	for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
+		strcpy(data + (idx++) * ETH_GSTRING_LEN,
+		       pport_2863_stats_desc[i].format);
+
+	for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
+		strcpy(data + (idx++) * ETH_GSTRING_LEN,
+		       pport_2819_stats_desc[i].format);
+
+	for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
+		strcpy(data + (idx++) * ETH_GSTRING_LEN,
+		       pcie_perf_stats_desc[i].format);
+
+	for (i = 0; i < NUM_PCIE_TAS_COUNTERS; i++)
+		strcpy(data + (idx++) * ETH_GSTRING_LEN,
+		       pcie_tas_stats_desc[i].format);
+
+	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
+		for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
+			sprintf(data + (idx++) * ETH_GSTRING_LEN,
+				pport_per_prio_traffic_stats_desc[i].format, prio);
+	}
+
+	pfc_combined = mlx5e_query_pfc_combined(priv);
+	for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
+		for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
+			char pfc_string[ETH_GSTRING_LEN];
+
+			snprintf(pfc_string, sizeof(pfc_string), "prio%d", prio);
+			sprintf(data + (idx++) * ETH_GSTRING_LEN,
+				pport_per_prio_pfc_stats_desc[i].format, pfc_string);
+		}
+	}
+
+	if (mlx5e_query_global_pause_combined(priv)) {
+		for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
+			sprintf(data + (idx++) * ETH_GSTRING_LEN,
+				pport_per_prio_pfc_stats_desc[i].format, "global");
+		}
+	}
+
+	/* port module event counters */
+	for (i = 0; i < ARRAY_SIZE(mlx5e_pme_status_desc); i++)
+		strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_status_desc[i].format);
+
+	for (i = 0; i < ARRAY_SIZE(mlx5e_pme_error_desc); i++)
+		strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_error_desc[i].format);
+
+	if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+		return;
+
+	/* per channel counters */
+	for (i = 0; i < priv->params.num_channels; i++)
+		for (j = 0; j < NUM_RQ_STATS; j++)
+			sprintf(data + (idx++) * ETH_GSTRING_LEN,
+				rq_stats_desc[j].format, i);
+
+	for (tc = 0; tc < priv->params.num_tc; tc++)
+		for (i = 0; i < priv->params.num_channels; i++)
+			for (j = 0; j < NUM_SQ_STATS; j++)
+				sprintf(data + (idx++) * ETH_GSTRING_LEN,
+					sq_stats_desc[j].format,
+					priv->channeltc_to_txq_map[i][tc]);
+}
+
+static void mlx5e_get_strings(struct net_device *dev,
+			      uint32_t stringset, uint8_t *data)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+	int i;
+
+	switch (stringset) {
+	case ETH_SS_PRIV_FLAGS:
+		for (i = 0; i < ARRAY_SIZE(mlx5e_priv_flags); i++)
+			strcpy(data + i * ETH_GSTRING_LEN, mlx5e_priv_flags[i]);
+		break;
+
+	case ETH_SS_TEST:
+		for (i = 0; i < mlx5e_self_test_num(priv); i++)
+			strcpy(data + i * ETH_GSTRING_LEN,
+			       mlx5e_self_tests[i]);
+		break;
+
+	case ETH_SS_STATS:
+		mlx5e_fill_stats_strings(priv, data);
+		break;
+	}
+}
+
+static void mlx5e_get_ethtool_stats(struct net_device *dev,
+				    struct ethtool_stats *stats, u64 *data)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+	struct mlx5_priv *mlx5_priv;
+	int i, j, tc, prio, idx = 0;
+	unsigned long pfc_combined;
+
+	if (!data)
+		return;
+
+	mutex_lock(&priv->state_lock);
+	if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+		mlx5e_update_stats(priv);
+	mutex_unlock(&priv->state_lock);
+
+	for (i = 0; i < NUM_SW_COUNTERS; i++)
+		data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw,
+						   sw_stats_desc, i);
+
+	for (i = 0; i < MLX5E_NUM_Q_CNTRS(priv); i++)
+		data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
+						   q_stats_desc, i);
+
+	for (i = 0; i < NUM_VPORT_COUNTERS; i++)
+		data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out,
+						  vport_stats_desc, i);
+
+	for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
+		data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.IEEE_802_3_counters,
+						  pport_802_3_stats_desc, i);
+
+	for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
+		data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2863_counters,
+						  pport_2863_stats_desc, i);
+
+	for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
+		data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters,
+						  pport_2819_stats_desc, i);
+
+	for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
+		data[idx++] = MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
+						  pcie_perf_stats_desc, i);
+
+	for (i = 0; i < NUM_PCIE_TAS_COUNTERS; i++)
+		data[idx++] = MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_tas_counters,
+						  pcie_tas_stats_desc, i);
+
+	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
+		for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
+			data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
+						 pport_per_prio_traffic_stats_desc, i);
+	}
+
+	pfc_combined = mlx5e_query_pfc_combined(priv);
+	for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
+		for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
+			data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
+							  pport_per_prio_pfc_stats_desc, i);
+		}
+	}
+
+	if (mlx5e_query_global_pause_combined(priv)) {
+		for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
+			data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
+							  pport_per_prio_pfc_stats_desc, i);
+		}
+	}
+
+	/* port module event counters */
+	mlx5_priv =  &priv->mdev->priv;
+	for (i = 0; i < ARRAY_SIZE(mlx5e_pme_status_desc); i++)
+		data[idx++] = MLX5E_READ_CTR64_CPU(mlx5_priv->pme_stats.status_counters,
+						   mlx5e_pme_status_desc, i);
+
+	for (i = 0; i < ARRAY_SIZE(mlx5e_pme_error_desc); i++)
+		data[idx++] = MLX5E_READ_CTR64_CPU(mlx5_priv->pme_stats.error_counters,
+						   mlx5e_pme_error_desc, i);
+
+	if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+		return;
+
+	/* per channel counters */
+	for (i = 0; i < priv->params.num_channels; i++)
+		for (j = 0; j < NUM_RQ_STATS; j++)
+			data[idx++] =
+			       MLX5E_READ_CTR64_CPU(&priv->channel[i]->rq.stats,
+						    rq_stats_desc, j);
+
+	for (tc = 0; tc < priv->params.num_tc; tc++)
+		for (i = 0; i < priv->params.num_channels; i++)
+			for (j = 0; j < NUM_SQ_STATS; j++)
+				data[idx++] = MLX5E_READ_CTR64_CPU(&priv->channel[i]->sq[tc].stats,
+								   sq_stats_desc, j);
+}
+
+static u32 mlx5e_rx_wqes_to_packets(struct mlx5e_priv *priv, int rq_wq_type,
+				    int num_wqe)
+{
+	int packets_per_wqe;
+	int stride_size;
+	int num_strides;
+	int wqe_size;
+
+	if (rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
+		return num_wqe;
+
+	stride_size = 1 << priv->params.mpwqe_log_stride_sz;
+	num_strides = 1 << priv->params.mpwqe_log_num_strides;
+	wqe_size = stride_size * num_strides;
+
+	packets_per_wqe = wqe_size /
+			  ALIGN(ETH_DATA_LEN, stride_size);
+	return (1 << (order_base_2(num_wqe * packets_per_wqe) - 1));
+}
+
+static u32 mlx5e_packets_to_rx_wqes(struct mlx5e_priv *priv, int rq_wq_type,
+				    int num_packets)
+{
+	int packets_per_wqe;
+	int stride_size;
+	int num_strides;
+	int wqe_size;
+	int num_wqes;
+
+	if (rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
+		return num_packets;
+
+	stride_size = 1 << priv->params.mpwqe_log_stride_sz;
+	num_strides = 1 << priv->params.mpwqe_log_num_strides;
+	wqe_size = stride_size * num_strides;
+
+	num_packets = (1 << order_base_2(num_packets));
+
+	packets_per_wqe = wqe_size /
+			  ALIGN(ETH_DATA_LEN, stride_size);
+	num_wqes = DIV_ROUND_UP(num_packets, packets_per_wqe);
+	return 1 << (order_base_2(num_wqes));
+}
+
+static void mlx5e_get_ringparam(struct net_device *dev,
+				struct ethtool_ringparam *param)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+	int rq_wq_type = priv->params.rq_wq_type;
+
+	param->rx_max_pending = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
+							 1 << mlx5_max_log_rq_size(rq_wq_type));
+	param->tx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE;
+	param->rx_pending = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
+						     1 << priv->params.log_rq_size);
+	param->tx_pending     = 1 << priv->params.log_sq_size;
+}
+
+static int mlx5e_set_ringparam(struct net_device *dev,
+			       struct ethtool_ringparam *param)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+	bool was_opened;
+	int rq_wq_type = priv->params.rq_wq_type;
+	u32 rx_pending_wqes;
+	u32 min_rq_size;
+	u32 max_rq_size;
+	u16 min_rx_wqes;
+	u8 log_rq_size;
+	u8 log_sq_size;
+	u32 num_mtts;
+	int err = 0;
+
+	if (param->rx_jumbo_pending) {
+		netdev_info(dev, "%s: rx_jumbo_pending not supported\n",
+			    __func__);
+		return -EINVAL;
+	}
+	if (param->rx_mini_pending) {
+		netdev_info(dev, "%s: rx_mini_pending not supported\n",
+			    __func__);
+		return -EINVAL;
+	}
+
+	min_rq_size = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
+					       1 << mlx5_min_log_rq_size(rq_wq_type));
+	max_rq_size = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
+					       1 << mlx5_max_log_rq_size(rq_wq_type));
+	rx_pending_wqes = mlx5e_packets_to_rx_wqes(priv, rq_wq_type,
+						   param->rx_pending);
+
+	if (param->rx_pending < min_rq_size) {
+		netdev_info(dev, "%s: rx_pending (%d) < min (%d)\n",
+			    __func__, param->rx_pending,
+			    min_rq_size);
+		return -EINVAL;
+	}
+	if (param->rx_pending > max_rq_size) {
+		netdev_info(dev, "%s: rx_pending (%d) > max (%d)\n",
+			    __func__, param->rx_pending,
+			    max_rq_size);
+		return -EINVAL;
+	}
+
+	num_mtts = MLX5E_REQUIRED_MTTS(rx_pending_wqes);
+	if (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
+	    !MLX5E_VALID_NUM_MTTS(num_mtts)) {
+		netdev_info(dev, "%s: rx_pending (%d) request can't be satisfied, try to reduce.\n",
+			    __func__, param->rx_pending);
+		return -EINVAL;
+	}
+
+	if (param->tx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)) {
+		netdev_info(dev, "%s: tx_pending (%d) < min (%d)\n",
+			    __func__, param->tx_pending,
+			    1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE);
+		return -EINVAL;
+	}
+	if (param->tx_pending > (1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE)) {
+		netdev_info(dev, "%s: tx_pending (%d) > max (%d)\n",
+			    __func__, param->tx_pending,
+			    1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE);
+		return -EINVAL;
+	}
+
+	log_rq_size = order_base_2(rx_pending_wqes);
+	log_sq_size = order_base_2(param->tx_pending);
+	min_rx_wqes = mlx5_min_rx_wqes(rq_wq_type, rx_pending_wqes);
+
+	if (log_rq_size == priv->params.log_rq_size &&
+	    log_sq_size == priv->params.log_sq_size &&
+	    min_rx_wqes == priv->params.min_rx_wqes)
+		return 0;
+
+	mutex_lock(&priv->state_lock);
+
+	was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+	if (was_opened)
+		mlx5e_close_locked(dev);
+
+	priv->params.log_rq_size = log_rq_size;
+	priv->params.log_sq_size = log_sq_size;
+	priv->params.min_rx_wqes = min_rx_wqes;
+
+	if (was_opened)
+		err = mlx5e_open_locked(dev);
+
+	mutex_unlock(&priv->state_lock);
+
+	return err;
+}
+
+static void mlx5e_get_channels(struct net_device *dev,
+			       struct ethtool_channels *ch)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+
+	ch->max_combined   = mlx5e_get_max_num_channels(priv->mdev);
+	ch->combined_count = priv->params.num_channels;
+}
+
+static int mlx5e_set_channels(struct net_device *dev,
+			      struct ethtool_channels *ch)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+	int ncv = mlx5e_get_max_num_channels(priv->mdev);
+	unsigned int count = ch->combined_count;
+	bool arfs_enabled;
+	bool was_opened;
+	int err = 0;
+
+	if (!count) {
+		netdev_info(dev, "%s: combined_count=0 not supported\n",
+			    __func__);
+		return -EINVAL;
+	}
+	if (ch->rx_count || ch->tx_count) {
+		netdev_info(dev, "%s: separate rx/tx count not supported\n",
+			    __func__);
+		return -EINVAL;
+	}
+	if (count > ncv) {
+		netdev_info(dev, "%s: count (%d) > max (%d)\n",
+			    __func__, count, ncv);
+		return -EINVAL;
+	}
+
+	if (priv->params.num_channels == count)
+		return 0;
+
+	mutex_lock(&priv->state_lock);
+
+	was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+	if (was_opened)
+		mlx5e_close_locked(dev);
+
+	arfs_enabled = dev->features & NETIF_F_NTUPLE;
+	if (arfs_enabled)
+		mlx5e_arfs_disable(priv);
+
+	priv->params.num_channels = count;
+	mlx5e_build_default_indir_rqt(priv->mdev, priv->params.indirection_rqt,
+				      MLX5E_INDIR_RQT_SIZE, count);
+
+	if (was_opened)
+		err = mlx5e_open_locked(dev);
+	if (err)
+		goto out;
+
+	if (arfs_enabled) {
+		err = mlx5e_arfs_enable(priv);
+		if (err)
+			netdev_err(dev, "%s: mlx5e_arfs_enable failed: %d\n",
+				   __func__, err);
+	}
+
+out:
+	mutex_unlock(&priv->state_lock);
+
+	return err;
+}
+
+static int mlx5e_get_coalesce(struct net_device *netdev,
+			      struct ethtool_coalesce *coal)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+
+	if (!MLX5_CAP_GEN(priv->mdev, cq_moderation))
+		return -ENOTSUPP;
+
+	coal->rx_coalesce_usecs       = priv->params.rx_cq_moderation.usec;
+	coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation.pkts;
+	coal->tx_coalesce_usecs       = priv->params.tx_cq_moderation.usec;
+	coal->tx_max_coalesced_frames = priv->params.tx_cq_moderation.pkts;
+	coal->use_adaptive_rx_coalesce = priv->params.rx_am_enabled;
+
+	return 0;
+}
+
+static int mlx5e_set_coalesce(struct net_device *netdev,
+			      struct ethtool_coalesce *coal)
+{
+	struct mlx5e_priv *priv    = netdev_priv(netdev);
+	struct mlx5_core_dev *mdev = priv->mdev;
+	struct mlx5e_channel *c;
+	bool restart =
+		!!coal->use_adaptive_rx_coalesce != priv->params.rx_am_enabled;
+	bool was_opened;
+	int err = 0;
+	int tc;
+	int i;
+
+	if (!MLX5_CAP_GEN(mdev, cq_moderation))
+		return -ENOTSUPP;
+
+	mutex_lock(&priv->state_lock);
+
+	was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+	if (was_opened && restart) {
+		mlx5e_close_locked(netdev);
+		priv->params.rx_am_enabled = !!coal->use_adaptive_rx_coalesce;
+	}
+
+	priv->params.tx_cq_moderation.usec = coal->tx_coalesce_usecs;
+	priv->params.tx_cq_moderation.pkts = coal->tx_max_coalesced_frames;
+	priv->params.rx_cq_moderation.usec = coal->rx_coalesce_usecs;
+	priv->params.rx_cq_moderation.pkts = coal->rx_max_coalesced_frames;
+
+	if (!was_opened || restart)
+		goto out;
+
+	for (i = 0; i < priv->params.num_channels; ++i) {
+		c = priv->channel[i];
+
+		for (tc = 0; tc < c->num_tc; tc++) {
+			mlx5_core_modify_cq_moderation(mdev,
+						&c->sq[tc].cq.mcq,
+						coal->tx_coalesce_usecs,
+						coal->tx_max_coalesced_frames);
+		}
+
+		mlx5_core_modify_cq_moderation(mdev, &c->rq.cq.mcq,
+					       coal->rx_coalesce_usecs,
+					       coal->rx_max_coalesced_frames);
+	}
+
+out:
+	if (was_opened && restart)
+		err = mlx5e_open_locked(netdev);
+
+	mutex_unlock(&priv->state_lock);
+	return err;
+}
+
+static void ptys2ethtool_supported_link(unsigned long *supported_modes,
+					u32 eth_proto_cap)
+{
+	unsigned long proto_cap = eth_proto_cap;
+	int proto;
+
+	for_each_set_bit(proto, &proto_cap, MLX5E_LINK_MODES_NUMBER)
+		bitmap_or(supported_modes, supported_modes,
+			  ptys2ethtool_table[proto].supported,
+			  __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static void ptys2ethtool_adver_link(unsigned long *advertising_modes,
+				    u32 eth_proto_cap)
+{
+	unsigned long proto_cap = eth_proto_cap;
+	int proto;
+
+	for_each_set_bit(proto, &proto_cap, MLX5E_LINK_MODES_NUMBER)
+		bitmap_or(advertising_modes, advertising_modes,
+			  ptys2ethtool_table[proto].advertised,
+			  __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static void ptys2ethtool_supported_port(struct ethtool_link_ksettings *link_ksettings,
+					u32 eth_proto_cap)
+{
+	if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_10GBASE_CR)
+			   | MLX5E_PROT_MASK(MLX5E_10GBASE_SR)
+			   | MLX5E_PROT_MASK(MLX5E_40GBASE_CR4)
+			   | MLX5E_PROT_MASK(MLX5E_40GBASE_SR4)
+			   | MLX5E_PROT_MASK(MLX5E_100GBASE_SR4)
+			   | MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII))) {
+		ethtool_link_ksettings_add_link_mode(link_ksettings, supported, FIBRE);
+	}
+
+	if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_100GBASE_KR4)
+			   | MLX5E_PROT_MASK(MLX5E_40GBASE_KR4)
+			   | MLX5E_PROT_MASK(MLX5E_10GBASE_KR)
+			   | MLX5E_PROT_MASK(MLX5E_10GBASE_KX4)
+			   | MLX5E_PROT_MASK(MLX5E_1000BASE_KX))) {
+		ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Backplane);
+	}
+}
+
+int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
+{
+	u32 max_speed = 0;
+	u32 proto_cap;
+	int err;
+	int i;
+
+	err = mlx5_query_port_proto_cap(mdev, &proto_cap, MLX5_PTYS_EN);
+	if (err)
+		return err;
+
+	for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i)
+		if (proto_cap & MLX5E_PROT_MASK(i))
+			max_speed = max(max_speed, ptys2ethtool_table[i].speed);
+
+	*speed = max_speed;
+	return 0;
+}
+
+static void get_speed_duplex(struct net_device *netdev,
+			     u32 eth_proto_oper,
+			     struct ethtool_link_ksettings *link_ksettings)
+{
+	int i;
+	u32 speed = SPEED_UNKNOWN;
+	u8 duplex = DUPLEX_UNKNOWN;
+
+	if (!netif_carrier_ok(netdev))
+		goto out;
+
+	for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
+		if (eth_proto_oper & MLX5E_PROT_MASK(i)) {
+			speed = ptys2ethtool_table[i].speed;
+			duplex = DUPLEX_FULL;
+			break;
+		}
+	}
+out:
+	link_ksettings->base.speed = speed;
+	link_ksettings->base.duplex = duplex;
+}
+
+static void get_supported(u32 eth_proto_cap,
+			  struct ethtool_link_ksettings *link_ksettings)
+{
+	unsigned long *supported = link_ksettings->link_modes.supported;
+
+	ptys2ethtool_supported_port(link_ksettings, eth_proto_cap);
+	ptys2ethtool_supported_link(supported, eth_proto_cap);
+	ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Pause);
+	ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Asym_Pause);
+}
+
+static void get_advertising(u32 eth_proto_cap, u8 tx_pause,
+			    u8 rx_pause,
+			    struct ethtool_link_ksettings *link_ksettings)
+{
+	unsigned long *advertising = link_ksettings->link_modes.advertising;
+
+	ptys2ethtool_adver_link(advertising, eth_proto_cap);
+	if (tx_pause)
+		ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Pause);
+	if (tx_pause ^ rx_pause)
+		ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Asym_Pause);
+}
+
+static u8 get_connector_port(u32 eth_proto)
+{
+	if (eth_proto & (MLX5E_PROT_MASK(MLX5E_10GBASE_SR)
+			 | MLX5E_PROT_MASK(MLX5E_40GBASE_SR4)
+			 | MLX5E_PROT_MASK(MLX5E_100GBASE_SR4)
+			 | MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII))) {
+			return PORT_FIBRE;
+	}
+
+	if (eth_proto & (MLX5E_PROT_MASK(MLX5E_40GBASE_CR4)
+			 | MLX5E_PROT_MASK(MLX5E_10GBASE_CR)
+			 | MLX5E_PROT_MASK(MLX5E_100GBASE_CR4))) {
+			return PORT_DA;
+	}
+
+	if (eth_proto & (MLX5E_PROT_MASK(MLX5E_10GBASE_KX4)
+			 | MLX5E_PROT_MASK(MLX5E_10GBASE_KR)
+			 | MLX5E_PROT_MASK(MLX5E_40GBASE_KR4)
+			 | MLX5E_PROT_MASK(MLX5E_100GBASE_KR4))) {
+			return PORT_NONE;
+	}
+
+	return PORT_OTHER;
+}
+
+static void get_lp_advertising(u32 eth_proto_lp,
+			       struct ethtool_link_ksettings *link_ksettings)
+{
+	unsigned long *lp_advertising = link_ksettings->link_modes.lp_advertising;
+
+	ptys2ethtool_adver_link(lp_advertising, eth_proto_lp);
+}
+
+static int mlx5e_get_link_ksettings(struct net_device *netdev,
+				    struct ethtool_link_ksettings *link_ksettings)
+{
+	struct mlx5e_priv *priv    = netdev_priv(netdev);
+	struct mlx5_core_dev *mdev = priv->mdev;
+	u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {0};
+	u32 eth_proto_cap;
+	u32 eth_proto_admin;
+	u32 eth_proto_lp;
+	u32 eth_proto_oper;
+	u8 an_disable_admin;
+	u8 an_status;
+	int err;
+
+	err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1);
+	if (err) {
+		netdev_err(netdev, "%s: query port ptys failed: %d\n",
+			   __func__, err);
+		goto err_query_ptys;
+	}
+
+	eth_proto_cap    = MLX5_GET(ptys_reg, out, eth_proto_capability);
+	eth_proto_admin  = MLX5_GET(ptys_reg, out, eth_proto_admin);
+	eth_proto_oper   = MLX5_GET(ptys_reg, out, eth_proto_oper);
+	eth_proto_lp     = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise);
+	an_disable_admin = MLX5_GET(ptys_reg, out, an_disable_admin);
+	an_status        = MLX5_GET(ptys_reg, out, an_status);
+
+	ethtool_link_ksettings_zero_link_mode(link_ksettings, supported);
+	ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
+
+	get_supported(eth_proto_cap, link_ksettings);
+	get_advertising(eth_proto_admin, 0, 0, link_ksettings);
+	get_speed_duplex(netdev, eth_proto_oper, link_ksettings);
+
+	eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
+
+	link_ksettings->base.port = get_connector_port(eth_proto_oper);
+	get_lp_advertising(eth_proto_lp, link_ksettings);
+
+	if (an_status == MLX5_AN_COMPLETE)
+		ethtool_link_ksettings_add_link_mode(link_ksettings,
+						     lp_advertising, Autoneg);
+
+	link_ksettings->base.autoneg = an_disable_admin ? AUTONEG_DISABLE :
+							  AUTONEG_ENABLE;
+	ethtool_link_ksettings_add_link_mode(link_ksettings, supported,
+					     Autoneg);
+	if (!an_disable_admin)
+		ethtool_link_ksettings_add_link_mode(link_ksettings,
+						     advertising, Autoneg);
+
+err_query_ptys:
+	return err;
+}
+
+static u32 mlx5e_ethtool2ptys_adver_link(const unsigned long *link_modes)
+{
+	u32 i, ptys_modes = 0;
+
+	for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
+		if (bitmap_intersects(ptys2ethtool_table[i].advertised,
+				      link_modes,
+				      __ETHTOOL_LINK_MODE_MASK_NBITS))
+			ptys_modes |= MLX5E_PROT_MASK(i);
+	}
+
+	return ptys_modes;
+}
+
+static u32 mlx5e_ethtool2ptys_speed_link(u32 speed)
+{
+	u32 i, speed_links = 0;
+
+	for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
+		if (ptys2ethtool_table[i].speed == speed)
+			speed_links |= MLX5E_PROT_MASK(i);
+	}
+
+	return speed_links;
+}
+
+static int mlx5e_set_link_ksettings(struct net_device *netdev,
+				    const struct ethtool_link_ksettings *link_ksettings)
+{
+	struct mlx5e_priv *priv    = netdev_priv(netdev);
+	struct mlx5_core_dev *mdev = priv->mdev;
+	u32 eth_proto_cap, eth_proto_admin;
+	bool an_changes = false;
+	u8 an_disable_admin;
+	u8 an_disable_cap;
+	bool an_disable;
+	u32 link_modes;
+	u8 an_status;
+	u32 speed;
+	int err;
+
+	speed = link_ksettings->base.speed;
+
+	link_modes = link_ksettings->base.autoneg == AUTONEG_ENABLE ?
+		mlx5e_ethtool2ptys_adver_link(link_ksettings->link_modes.advertising) :
+		mlx5e_ethtool2ptys_speed_link(speed);
+
+	err = mlx5_query_port_proto_cap(mdev, &eth_proto_cap, MLX5_PTYS_EN);
+	if (err) {
+		netdev_err(netdev, "%s: query port eth proto cap failed: %d\n",
+			   __func__, err);
+		goto out;
+	}
+
+	link_modes = link_modes & eth_proto_cap;
+	if (!link_modes) {
+		netdev_err(netdev, "%s: Not supported link mode(s) requested",
+			   __func__);
+		err = -EINVAL;
+		goto out;
+	}
+
+	err = mlx5_query_port_proto_admin(mdev, &eth_proto_admin, MLX5_PTYS_EN);
+	if (err) {
+		netdev_err(netdev, "%s: query port eth proto admin failed: %d\n",
+			   __func__, err);
+		goto out;
+	}
+
+	mlx5_query_port_autoneg(mdev, MLX5_PTYS_EN, &an_status,
+				&an_disable_cap, &an_disable_admin);
+
+	an_disable = link_ksettings->base.autoneg == AUTONEG_DISABLE;
+	an_changes = ((!an_disable && an_disable_admin) ||
+		      (an_disable && !an_disable_admin));
+
+	if (!an_changes && link_modes == eth_proto_admin)
+		goto out;
+
+	mlx5_set_port_ptys(mdev, an_disable, link_modes, MLX5_PTYS_EN);
+	mlx5_toggle_port_link(mdev);
+
+out:
+	return err;
+}
+
+static u32 mlx5e_get_rxfh_key_size(struct net_device *netdev)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+
+	return sizeof(priv->params.toeplitz_hash_key);
+}
+
+static u32 mlx5e_get_rxfh_indir_size(struct net_device *netdev)
+{
+	return MLX5E_INDIR_RQT_SIZE;
+}
+
+static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
+			  u8 *hfunc)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+
+	if (indir)
+		memcpy(indir, priv->params.indirection_rqt,
+		       sizeof(priv->params.indirection_rqt));
+
+	if (key)
+		memcpy(key, priv->params.toeplitz_hash_key,
+		       sizeof(priv->params.toeplitz_hash_key));
+
+	if (hfunc)
+		*hfunc = priv->params.rss_hfunc;
+
+	return 0;
+}
+
+static void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen)
+{
+	struct mlx5_core_dev *mdev = priv->mdev;
+	void *tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
+	int i;
+
+	MLX5_SET(modify_tir_in, in, bitmask.hash, 1);
+	mlx5e_build_tir_ctx_hash(tirc, priv);
+
+	for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
+		mlx5_core_modify_tir(mdev, priv->indir_tir[i].tirn, in, inlen);
+}
+
+static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
+			  const u8 *key, const u8 hfunc)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+	int inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
+	void *in;
+
+	if ((hfunc != ETH_RSS_HASH_NO_CHANGE) &&
+	    (hfunc != ETH_RSS_HASH_XOR) &&
+	    (hfunc != ETH_RSS_HASH_TOP))
+		return -EINVAL;
+
+	in = mlx5_vzalloc(inlen);
+	if (!in)
+		return -ENOMEM;
+
+	mutex_lock(&priv->state_lock);
+
+	if (indir) {
+		u32 rqtn = priv->indir_rqt.rqtn;
+
+		memcpy(priv->params.indirection_rqt, indir,
+		       sizeof(priv->params.indirection_rqt));
+		mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0);
+	}
+
+	if (key)
+		memcpy(priv->params.toeplitz_hash_key, key,
+		       sizeof(priv->params.toeplitz_hash_key));
+
+	if (hfunc != ETH_RSS_HASH_NO_CHANGE)
+		priv->params.rss_hfunc = hfunc;
+
+	mlx5e_modify_tirs_hash(priv, in, inlen);
+
+	mutex_unlock(&priv->state_lock);
+
+	kvfree(in);
+
+	return 0;
+}
+
+static int mlx5e_get_rxnfc(struct net_device *netdev,
+			   struct ethtool_rxnfc *info, u32 *rule_locs)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+	int err = 0;
+
+	switch (info->cmd) {
+	case ETHTOOL_GRXRINGS:
+		info->data = priv->params.num_channels;
+		break;
+	case ETHTOOL_GRXCLSRLCNT:
+		info->rule_cnt = priv->fs.ethtool.tot_num_rules;
+		break;
+	case ETHTOOL_GRXCLSRULE:
+		err = mlx5e_ethtool_get_flow(priv, info, info->fs.location);
+		break;
+	case ETHTOOL_GRXCLSRLALL:
+		err = mlx5e_ethtool_get_all_flows(priv, info, rule_locs);
+		break;
+	default:
+		err = -EOPNOTSUPP;
+		break;
+	}
+
+	return err;
+}
+
+static int mlx5e_get_tunable(struct net_device *dev,
+			     const struct ethtool_tunable *tuna,
+			     void *data)
+{
+	const struct mlx5e_priv *priv = netdev_priv(dev);
+	int err = 0;
+
+	switch (tuna->id) {
+	case ETHTOOL_TX_COPYBREAK:
+		*(u32 *)data = priv->params.tx_max_inline;
+		break;
+	default:
+		err = -EINVAL;
+		break;
+	}
+
+	return err;
+}
+
+static int mlx5e_set_tunable(struct net_device *dev,
+			     const struct ethtool_tunable *tuna,
+			     const void *data)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+	struct mlx5_core_dev *mdev = priv->mdev;
+	bool was_opened;
+	u32 val;
+	int err = 0;
+
+	switch (tuna->id) {
+	case ETHTOOL_TX_COPYBREAK:
+		val = *(u32 *)data;
+		if (val > mlx5e_get_max_inline_cap(mdev)) {
+			err = -EINVAL;
+			break;
+		}
+
+		mutex_lock(&priv->state_lock);
+
+		was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+		if (was_opened)
+			mlx5e_close_locked(dev);
+
+		priv->params.tx_max_inline = val;
+
+		if (was_opened)
+			err = mlx5e_open_locked(dev);
+
+		mutex_unlock(&priv->state_lock);
+		break;
+	default:
+		err = -EINVAL;
+		break;
+	}
+
+	return err;
+}
+
+static void mlx5e_get_pauseparam(struct net_device *netdev,
+				 struct ethtool_pauseparam *pauseparam)
+{
+	struct mlx5e_priv *priv    = netdev_priv(netdev);
+	struct mlx5_core_dev *mdev = priv->mdev;
+	int err;
+
+	err = mlx5_query_port_pause(mdev, &pauseparam->rx_pause,
+				    &pauseparam->tx_pause);
+	if (err) {
+		netdev_err(netdev, "%s: mlx5_query_port_pause failed:0x%x\n",
+			   __func__, err);
+	}
+}
+
+static int mlx5e_set_pauseparam(struct net_device *netdev,
+				struct ethtool_pauseparam *pauseparam)
+{
+	struct mlx5e_priv *priv    = netdev_priv(netdev);
+	struct mlx5_core_dev *mdev = priv->mdev;
+	int err;
+
+	if (pauseparam->autoneg)
+		return -EINVAL;
+
+	err = mlx5_set_port_pause(mdev,
+				  pauseparam->rx_pause ? 1 : 0,
+				  pauseparam->tx_pause ? 1 : 0);
+	if (err) {
+		netdev_err(netdev, "%s: mlx5_set_port_pause failed:0x%x\n",
+			   __func__, err);
+	}
+
+	return err;
+}
+
+static int mlx5e_get_ts_info(struct net_device *dev,
+			     struct ethtool_ts_info *info)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+	int ret;
+
+	ret = ethtool_op_get_ts_info(dev, info);
+	if (ret)
+		return ret;
+
+	info->phc_index = priv->tstamp.ptp ?
+			  ptp_clock_index(priv->tstamp.ptp) : -1;
+
+	if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
+		return 0;
+
+	info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
+				 SOF_TIMESTAMPING_RX_HARDWARE |
+				 SOF_TIMESTAMPING_RAW_HARDWARE;
+
+	info->tx_types = (BIT(1) << HWTSTAMP_TX_OFF) |
+			 (BIT(1) << HWTSTAMP_TX_ON);
+
+	info->rx_filters = (BIT(1) << HWTSTAMP_FILTER_NONE) |
+			   (BIT(1) << HWTSTAMP_FILTER_ALL);
+
+	return 0;
+}
+
+static __u32 mlx5e_get_wol_supported(struct mlx5_core_dev *mdev)
+{
+	__u32 ret = 0;
+
+	if (MLX5_CAP_GEN(mdev, wol_g))
+		ret |= WAKE_MAGIC;
+
+	if (MLX5_CAP_GEN(mdev, wol_s))
+		ret |= WAKE_MAGICSECURE;
+
+	if (MLX5_CAP_GEN(mdev, wol_a))
+		ret |= WAKE_ARP;
+
+	if (MLX5_CAP_GEN(mdev, wol_b))
+		ret |= WAKE_BCAST;
+
+	if (MLX5_CAP_GEN(mdev, wol_m))
+		ret |= WAKE_MCAST;
+
+	if (MLX5_CAP_GEN(mdev, wol_u))
+		ret |= WAKE_UCAST;
+
+	if (MLX5_CAP_GEN(mdev, wol_p))
+		ret |= WAKE_PHY;
+
+	return ret;
+}
+
+static __u32 mlx5e_refomrat_wol_mode_mlx5_to_linux(u8 mode)
+{
+	__u32 ret = 0;
+
+	if (mode & MLX5_WOL_MAGIC)
+		ret |= WAKE_MAGIC;
+
+	if (mode & MLX5_WOL_SECURED_MAGIC)
+		ret |= WAKE_MAGICSECURE;
+
+	if (mode & MLX5_WOL_ARP)
+		ret |= WAKE_ARP;
+
+	if (mode & MLX5_WOL_BROADCAST)
+		ret |= WAKE_BCAST;
+
+	if (mode & MLX5_WOL_MULTICAST)
+		ret |= WAKE_MCAST;
+
+	if (mode & MLX5_WOL_UNICAST)
+		ret |= WAKE_UCAST;
+
+	if (mode & MLX5_WOL_PHY_ACTIVITY)
+		ret |= WAKE_PHY;
+
+	return ret;
+}
+
+static u8 mlx5e_refomrat_wol_mode_linux_to_mlx5(__u32 mode)
+{
+	u8 ret = 0;
+
+	if (mode & WAKE_MAGIC)
+		ret |= MLX5_WOL_MAGIC;
+
+	if (mode & WAKE_MAGICSECURE)
+		ret |= MLX5_WOL_SECURED_MAGIC;
+
+	if (mode & WAKE_ARP)
+		ret |= MLX5_WOL_ARP;
+
+	if (mode & WAKE_BCAST)
+		ret |= MLX5_WOL_BROADCAST;
+
+	if (mode & WAKE_MCAST)
+		ret |= MLX5_WOL_MULTICAST;
+
+	if (mode & WAKE_UCAST)
+		ret |= MLX5_WOL_UNICAST;
+
+	if (mode & WAKE_PHY)
+		ret |= MLX5_WOL_PHY_ACTIVITY;
+
+	return ret;
+}
+
+static void mlx5e_get_wol(struct net_device *netdev,
+			  struct ethtool_wolinfo *wol)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+	struct mlx5_core_dev *mdev = priv->mdev;
+	u8 mlx5_wol_mode;
+	int err;
+
+	memset(wol, 0, sizeof(*wol));
+
+	wol->supported = mlx5e_get_wol_supported(mdev);
+	if (!wol->supported)
+		return;
+
+	err = mlx5_query_port_wol(mdev, &mlx5_wol_mode);
+	if (err)
+		return;
+
+	wol->wolopts = mlx5e_refomrat_wol_mode_mlx5_to_linux(mlx5_wol_mode);
+}
+
+static int mlx5e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+	struct mlx5_core_dev *mdev = priv->mdev;
+	__u32 wol_supported = mlx5e_get_wol_supported(mdev);
+	u32 mlx5_wol_mode;
+
+	if (!wol_supported)
+		return -ENOTSUPP;
+
+	if (wol->wolopts & ~wol_supported)
+		return -EINVAL;
+
+	mlx5_wol_mode = mlx5e_refomrat_wol_mode_linux_to_mlx5(wol->wolopts);
+
+	return mlx5_set_port_wol(mdev, mlx5_wol_mode);
+}
+
+static int mlx5e_set_phys_id(struct net_device *dev,
+			     enum ethtool_phys_id_state state)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+	struct mlx5_core_dev *mdev = priv->mdev;
+	u16 beacon_duration;
+
+	if (!MLX5_CAP_GEN(mdev, beacon_led))
+		return -EOPNOTSUPP;
+
+	switch (state) {
+	case ETHTOOL_ID_ACTIVE:
+		beacon_duration = MLX5_BEACON_DURATION_INF;
+		break;
+	case ETHTOOL_ID_INACTIVE:
+		beacon_duration = MLX5_BEACON_DURATION_OFF;
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	return mlx5_set_port_beacon(mdev, beacon_duration);
+}
+
+static int mlx5e_get_module_info(struct net_device *netdev,
+				 struct ethtool_modinfo *modinfo)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+	struct mlx5_core_dev *dev = priv->mdev;
+	int size_read = 0;
+	u8 data[4];
+
+	size_read = mlx5_query_module_eeprom(dev, 0, 2, data);
+	if (size_read < 2)
+		return -EIO;
+
+	/* data[0] = identifier byte */
+	switch (data[0]) {
+	case MLX5_MODULE_ID_QSFP:
+		modinfo->type       = ETH_MODULE_SFF_8436;
+		modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+		break;
+	case MLX5_MODULE_ID_QSFP_PLUS:
+	case MLX5_MODULE_ID_QSFP28:
+		/* data[1] = revision id */
+		if (data[0] == MLX5_MODULE_ID_QSFP28 || data[1] >= 0x3) {
+			modinfo->type       = ETH_MODULE_SFF_8636;
+			modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
+		} else {
+			modinfo->type       = ETH_MODULE_SFF_8436;
+			modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+		}
+		break;
+	case MLX5_MODULE_ID_SFP:
+		modinfo->type       = ETH_MODULE_SFF_8472;
+		modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+		break;
+	default:
+		netdev_err(priv->netdev, "%s: cable type not recognized:0x%x\n",
+			   __func__, data[0]);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int mlx5e_get_module_eeprom(struct net_device *netdev,
+				   struct ethtool_eeprom *ee,
+				   u8 *data)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+	struct mlx5_core_dev *mdev = priv->mdev;
+	int offset = ee->offset;
+	int size_read;
+	int i = 0;
+
+	if (!ee->len)
+		return -EINVAL;
+
+	memset(data, 0, ee->len);
+
+	while (i < ee->len) {
+		size_read = mlx5_query_module_eeprom(mdev, offset, ee->len - i,
+						     data + i);
+
+		if (!size_read)
+			/* Done reading */
+			return 0;
+
+		if (size_read < 0) {
+			netdev_err(priv->netdev, "%s: mlx5_query_eeprom failed:0x%x\n",
+				   __func__, size_read);
+			return 0;
+		}
+
+		i += size_read;
+		offset += size_read;
+	}
+
+	return 0;
+}
+
+typedef int (*mlx5e_pflag_handler)(struct net_device *netdev, bool enable);
+
+static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+	struct mlx5_core_dev *mdev = priv->mdev;
+	bool rx_mode_changed;
+	u8 rx_cq_period_mode;
+	int err = 0;
+	bool reset;
+
+	rx_cq_period_mode = enable ?
+		MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
+		MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
+	rx_mode_changed = rx_cq_period_mode != priv->params.rx_cq_period_mode;
+
+	if (rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE &&
+	    !MLX5_CAP_GEN(mdev, cq_period_start_from_cqe))
+		return -ENOTSUPP;
+
+	if (!rx_mode_changed)
+		return 0;
+
+	reset = test_bit(MLX5E_STATE_OPENED, &priv->state);
+	if (reset)
+		mlx5e_close_locked(netdev);
+
+	mlx5e_set_rx_cq_mode_params(&priv->params, rx_cq_period_mode);
+
+	if (reset)
+		err = mlx5e_open_locked(netdev);
+
+	return err;
+}
+
+static int set_pflag_rx_cqe_compress(struct net_device *netdev,
+				     bool enable)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+	struct mlx5_core_dev *mdev = priv->mdev;
+	int err = 0;
+	bool reset;
+
+	if (!MLX5_CAP_GEN(mdev, cqe_compression))
+		return -ENOTSUPP;
+
+	if (enable && priv->tstamp.hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE) {
+		netdev_err(netdev, "Can't enable cqe compression while timestamping is enabled.\n");
+		return -EINVAL;
+	}
+
+	reset = test_bit(MLX5E_STATE_OPENED, &priv->state);
+
+	if (reset)
+		mlx5e_close_locked(netdev);
+
+	MLX5E_SET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS, enable);
+	priv->params.rx_cqe_compress_def = enable;
+
+	if (reset)
+		err = mlx5e_open_locked(netdev);
+	return err;
+}
+
+static int mlx5e_handle_pflag(struct net_device *netdev,
+			      u32 wanted_flags,
+			      enum mlx5e_priv_flag flag,
+			      mlx5e_pflag_handler pflag_handler)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+	bool enable = !!(wanted_flags & flag);
+	u32 changes = wanted_flags ^ priv->params.pflags;
+	int err;
+
+	if (!(changes & flag))
+		return 0;
+
+	err = pflag_handler(netdev, enable);
+	if (err) {
+		netdev_err(netdev, "%s private flag 0x%x failed err %d\n",
+			   enable ? "Enable" : "Disable", flag, err);
+		return err;
+	}
+
+	MLX5E_SET_PFLAG(priv, flag, enable);
+	return 0;
+}
+
+static int mlx5e_set_priv_flags(struct net_device *netdev, u32 pflags)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+	int err;
+
+	mutex_lock(&priv->state_lock);
+	err = mlx5e_handle_pflag(netdev, pflags,
+				 MLX5E_PFLAG_RX_CQE_BASED_MODER,
+				 set_pflag_rx_cqe_based_moder);
+	if (err)
+		goto out;
+
+	err = mlx5e_handle_pflag(netdev, pflags,
+				 MLX5E_PFLAG_RX_CQE_COMPRESS,
+				 set_pflag_rx_cqe_compress);
+
+out:
+	mutex_unlock(&priv->state_lock);
+	return err;
+}
+
+static u32 mlx5e_get_priv_flags(struct net_device *netdev)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+
+	return priv->params.pflags;
+}
+
+static int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+	int err = 0;
+	struct mlx5e_priv *priv = netdev_priv(dev);
+
+	switch (cmd->cmd) {
+	case ETHTOOL_SRXCLSRLINS:
+		err = mlx5e_ethtool_flow_replace(priv, &cmd->fs);
+		break;
+	case ETHTOOL_SRXCLSRLDEL:
+		err = mlx5e_ethtool_flow_remove(priv, cmd->fs.location);
+		break;
+	default:
+		err = -EOPNOTSUPP;
+		break;
+	}
+
+	return err;
+}
+
+const struct ethtool_ops mlx5e_ethtool_ops = {
+	.get_drvinfo       = mlx5e_get_drvinfo,
+	.get_link          = ethtool_op_get_link,
+	.get_strings       = mlx5e_get_strings,
+	.get_sset_count    = mlx5e_get_sset_count,
+	.get_ethtool_stats = mlx5e_get_ethtool_stats,
+	.get_ringparam     = mlx5e_get_ringparam,
+	.set_ringparam     = mlx5e_set_ringparam,
+	.get_channels      = mlx5e_get_channels,
+	.set_channels      = mlx5e_set_channels,
+	.get_coalesce      = mlx5e_get_coalesce,
+	.set_coalesce      = mlx5e_set_coalesce,
+	.get_link_ksettings  = mlx5e_get_link_ksettings,
+	.set_link_ksettings  = mlx5e_set_link_ksettings,
+	.get_rxfh_key_size   = mlx5e_get_rxfh_key_size,
+	.get_rxfh_indir_size = mlx5e_get_rxfh_indir_size,
+	.get_rxfh          = mlx5e_get_rxfh,
+	.set_rxfh          = mlx5e_set_rxfh,
+	.get_rxnfc         = mlx5e_get_rxnfc,
+	.set_rxnfc         = mlx5e_set_rxnfc,
+	.get_tunable       = mlx5e_get_tunable,
+	.set_tunable       = mlx5e_set_tunable,
+	.get_pauseparam    = mlx5e_get_pauseparam,
+	.set_pauseparam    = mlx5e_set_pauseparam,
+	.get_ts_info       = mlx5e_get_ts_info,
+	.set_phys_id       = mlx5e_set_phys_id,
+	.get_wol	   = mlx5e_get_wol,
+	.set_wol	   = mlx5e_set_wol,
+	.get_module_info   = mlx5e_get_module_info,
+	.get_module_eeprom = mlx5e_get_module_eeprom,
+	.get_priv_flags    = mlx5e_get_priv_flags,
+	.set_priv_flags    = mlx5e_set_priv_flags,
+	.self_test         = mlx5e_self_test,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/en/fs.c b/drivers/net/ethernet/mellanox/mlx5/en/fs.c
new file mode 100644
index 0000000..cd7499c
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/en/fs.c
@@ -0,0 +1,1143 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/list.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include "lib/fs.h"
+#include "en/en.h"
+
+static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
+				  struct mlx5e_l2_rule *ai, int type);
+static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
+				   struct mlx5e_l2_rule *ai);
+
+enum {
+	MLX5E_FULLMATCH = 0,
+	MLX5E_ALLMULTI  = 1,
+	MLX5E_PROMISC   = 2,
+};
+
+enum {
+	MLX5E_UC        = 0,
+	MLX5E_MC_IPV4   = 1,
+	MLX5E_MC_IPV6   = 2,
+	MLX5E_MC_OTHER  = 3,
+};
+
+enum {
+	MLX5E_ACTION_NONE = 0,
+	MLX5E_ACTION_ADD  = 1,
+	MLX5E_ACTION_DEL  = 2,
+};
+
+struct mlx5e_l2_hash_node {
+	struct hlist_node          hlist;
+	u8                         action;
+	struct mlx5e_l2_rule ai;
+};
+
+static inline int mlx5e_hash_l2(u8 *addr)
+{
+	return addr[5];
+}
+
+static void mlx5e_add_l2_to_hash(struct hlist_head *hash, u8 *addr)
+{
+	struct mlx5e_l2_hash_node *hn;
+	int ix = mlx5e_hash_l2(addr);
+	int found = 0;
+
+	hlist_for_each_entry(hn, &hash[ix], hlist)
+		if (ether_addr_equal_64bits(hn->ai.addr, addr)) {
+			found = 1;
+			break;
+		}
+
+	if (found) {
+		hn->action = MLX5E_ACTION_NONE;
+		return;
+	}
+
+	hn = kzalloc(sizeof(*hn), GFP_ATOMIC);
+	if (!hn)
+		return;
+
+	ether_addr_copy(hn->ai.addr, addr);
+	hn->action = MLX5E_ACTION_ADD;
+
+	hlist_add_head(&hn->hlist, &hash[ix]);
+}
+
+static void mlx5e_del_l2_from_hash(struct mlx5e_l2_hash_node *hn)
+{
+	hlist_del(&hn->hlist);
+	kfree(hn);
+}
+
+static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
+{
+	struct net_device *ndev = priv->netdev;
+	int max_list_size;
+	int list_size;
+	u16 *vlans;
+	int vlan;
+	int err;
+	int i;
+
+	list_size = 0;
+	for_each_set_bit(vlan, priv->fs.vlan.active_vlans, VLAN_N_VID)
+		list_size++;
+
+	max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list);
+
+	if (list_size > max_list_size) {
+		netdev_warn(ndev,
+			    "netdev vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n",
+			    list_size, max_list_size);
+		list_size = max_list_size;
+	}
+
+	vlans = kcalloc(list_size, sizeof(*vlans), GFP_KERNEL);
+	if (!vlans)
+		return -ENOMEM;
+
+	i = 0;
+	for_each_set_bit(vlan, priv->fs.vlan.active_vlans, VLAN_N_VID) {
+		if (i >= list_size)
+			break;
+		vlans[i++] = vlan;
+	}
+
+	err = mlx5_modify_nic_vport_vlans(priv->mdev, vlans, list_size);
+	if (err)
+		netdev_err(ndev, "Failed to modify vport vlans list err(%d)\n",
+			   err);
+
+	kfree(vlans);
+	return err;
+}
+
+enum mlx5e_vlan_rule_type {
+	MLX5E_VLAN_RULE_TYPE_UNTAGGED,
+	MLX5E_VLAN_RULE_TYPE_ANY_VID,
+	MLX5E_VLAN_RULE_TYPE_MATCH_VID,
+};
+
+static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
+				 enum mlx5e_vlan_rule_type rule_type,
+				 u16 vid, struct mlx5_flow_spec *spec)
+{
+	struct mlx5_flow_act flow_act = {
+		.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+		.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG,
+		.encap_id = 0,
+	};
+	struct mlx5_flow_table *ft = priv->fs.vlan.ft.t;
+	struct mlx5_flow_destination dest;
+	struct mlx5_flow_handle **rule_p;
+	int err = 0;
+
+	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+	dest.ft = priv->fs.l2.ft.t;
+
+	spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag);
+
+	switch (rule_type) {
+	case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
+		rule_p = &priv->fs.vlan.untagged_rule;
+		break;
+	case MLX5E_VLAN_RULE_TYPE_ANY_VID:
+		rule_p = &priv->fs.vlan.any_vlan_rule;
+		MLX5_SET(fte_match_param, spec->match_value, outer_headers.vlan_tag, 1);
+		break;
+	default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
+		rule_p = &priv->fs.vlan.active_vlans_rule[vid];
+		MLX5_SET(fte_match_param, spec->match_value, outer_headers.vlan_tag, 1);
+		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+				 outer_headers.first_vid);
+		MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid,
+			 vid);
+		break;
+	}
+
+	*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
+
+	if (IS_ERR(*rule_p)) {
+		err = PTR_ERR(*rule_p);
+		*rule_p = NULL;
+		netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
+	}
+
+	return err;
+}
+
+static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
+			       enum mlx5e_vlan_rule_type rule_type, u16 vid)
+{
+	struct mlx5_flow_spec *spec;
+	int err = 0;
+
+	spec = mlx5_vzalloc(sizeof(*spec));
+	if (!spec) {
+		netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
+		return -ENOMEM;
+	}
+
+	if (rule_type == MLX5E_VLAN_RULE_TYPE_MATCH_VID)
+		mlx5e_vport_context_update_vlans(priv);
+
+	err = __mlx5e_add_vlan_rule(priv, rule_type, vid, spec);
+
+	kvfree(spec);
+
+	return err;
+}
+
+static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
+				enum mlx5e_vlan_rule_type rule_type, u16 vid)
+{
+	switch (rule_type) {
+	case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
+		if (priv->fs.vlan.untagged_rule) {
+			mlx5_del_flow_rules(priv->fs.vlan.untagged_rule);
+			priv->fs.vlan.untagged_rule = NULL;
+		}
+		break;
+	case MLX5E_VLAN_RULE_TYPE_ANY_VID:
+		if (priv->fs.vlan.any_vlan_rule) {
+			mlx5_del_flow_rules(priv->fs.vlan.any_vlan_rule);
+			priv->fs.vlan.any_vlan_rule = NULL;
+		}
+		break;
+	case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
+		mlx5e_vport_context_update_vlans(priv);
+		if (priv->fs.vlan.active_vlans_rule[vid]) {
+			mlx5_del_flow_rules(priv->fs.vlan.active_vlans_rule[vid]);
+			priv->fs.vlan.active_vlans_rule[vid] = NULL;
+		}
+		mlx5e_vport_context_update_vlans(priv);
+		break;
+	}
+}
+
+void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
+{
+	if (!priv->fs.vlan.filter_disabled)
+		return;
+
+	priv->fs.vlan.filter_disabled = false;
+	if (priv->netdev->flags & IFF_PROMISC)
+		return;
+	mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
+}
+
+void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
+{
+	if (priv->fs.vlan.filter_disabled)
+		return;
+
+	priv->fs.vlan.filter_disabled = true;
+	if (priv->netdev->flags & IFF_PROMISC)
+		return;
+	mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
+}
+
+int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
+			  u16 vid)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+
+	set_bit(vid, priv->fs.vlan.active_vlans);
+
+	return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
+}
+
+int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
+			   u16 vid)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+
+	clear_bit(vid, priv->fs.vlan.active_vlans);
+
+	mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
+
+	return 0;
+}
+
+static void mlx5e_add_vlan_rules(struct mlx5e_priv *priv)
+{
+	int i;
+
+	mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
+
+	for_each_set_bit(i, priv->fs.vlan.active_vlans, VLAN_N_VID) {
+		mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, i);
+	}
+
+	if (priv->fs.vlan.filter_disabled &&
+	    !(priv->netdev->flags & IFF_PROMISC))
+		mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
+}
+
+static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv)
+{
+	int i;
+
+	mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
+
+	for_each_set_bit(i, priv->fs.vlan.active_vlans, VLAN_N_VID) {
+		mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, i);
+	}
+
+	if (priv->fs.vlan.filter_disabled &&
+	    !(priv->netdev->flags & IFF_PROMISC))
+		mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
+}
+
+#define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
+	for (i = 0; i < MLX5E_L2_ADDR_HASH_SIZE; i++) \
+		hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
+
+static void mlx5e_execute_l2_action(struct mlx5e_priv *priv,
+				    struct mlx5e_l2_hash_node *hn)
+{
+	switch (hn->action) {
+	case MLX5E_ACTION_ADD:
+		mlx5e_add_l2_flow_rule(priv, &hn->ai, MLX5E_FULLMATCH);
+		hn->action = MLX5E_ACTION_NONE;
+		break;
+
+	case MLX5E_ACTION_DEL:
+		mlx5e_del_l2_flow_rule(priv, &hn->ai);
+		mlx5e_del_l2_from_hash(hn);
+		break;
+	}
+}
+
+static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv)
+{
+	struct net_device *netdev = priv->netdev;
+	struct netdev_hw_addr *ha;
+
+	netif_addr_lock_bh(netdev);
+
+	mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc,
+			     priv->netdev->dev_addr);
+
+	netdev_for_each_uc_addr(ha, netdev)
+		mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc, ha->addr);
+
+	netdev_for_each_mc_addr(ha, netdev)
+		mlx5e_add_l2_to_hash(priv->fs.l2.netdev_mc, ha->addr);
+
+	netif_addr_unlock_bh(netdev);
+}
+
+static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type,
+				  u8 addr_array[][ETH_ALEN], int size)
+{
+	bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
+	struct net_device *ndev = priv->netdev;
+	struct mlx5e_l2_hash_node *hn;
+	struct hlist_head *addr_list;
+	struct hlist_node *tmp;
+	int i = 0;
+	int hi;
+
+	addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc;
+
+	if (is_uc) /* Make sure our own address is pushed first */
+		ether_addr_copy(addr_array[i++], ndev->dev_addr);
+	else if (priv->fs.l2.broadcast_enabled)
+		ether_addr_copy(addr_array[i++], ndev->broadcast);
+
+	mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) {
+		if (ether_addr_equal(ndev->dev_addr, hn->ai.addr))
+			continue;
+		if (i >= size)
+			break;
+		ether_addr_copy(addr_array[i++], hn->ai.addr);
+	}
+}
+
+static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
+						 int list_type)
+{
+	bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
+	struct mlx5e_l2_hash_node *hn;
+	u8 (*addr_array)[ETH_ALEN] = NULL;
+	struct hlist_head *addr_list;
+	struct hlist_node *tmp;
+	int max_size;
+	int size;
+	int err;
+	int hi;
+
+	size = is_uc ? 0 : (priv->fs.l2.broadcast_enabled ? 1 : 0);
+	max_size = is_uc ?
+		1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) :
+		1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list);
+
+	addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc;
+	mlx5e_for_each_hash_node(hn, tmp, addr_list, hi)
+		size++;
+
+	if (size > max_size) {
+		netdev_warn(priv->netdev,
+			    "netdev %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n",
+			    is_uc ? "UC" : "MC", size, max_size);
+		size = max_size;
+	}
+
+	if (size) {
+		addr_array = kcalloc(size, ETH_ALEN, GFP_KERNEL);
+		if (!addr_array) {
+			err = -ENOMEM;
+			goto out;
+		}
+		mlx5e_fill_addr_array(priv, list_type, addr_array, size);
+	}
+
+	err = mlx5_modify_nic_vport_mac_list(priv->mdev, list_type, addr_array, size);
+out:
+	if (err)
+		netdev_err(priv->netdev,
+			   "Failed to modify vport %s list err(%d)\n",
+			   is_uc ? "UC" : "MC", err);
+	kfree(addr_array);
+}
+
+static void mlx5e_vport_context_update(struct mlx5e_priv *priv)
+{
+	struct mlx5e_l2_table *ea = &priv->fs.l2;
+
+	mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_UC);
+	mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_MC);
+	mlx5_modify_nic_vport_promisc(priv->mdev, 0,
+				      ea->allmulti_enabled,
+				      ea->promisc_enabled);
+}
+
+static void mlx5e_apply_netdev_addr(struct mlx5e_priv *priv)
+{
+	struct mlx5e_l2_hash_node *hn;
+	struct hlist_node *tmp;
+	int i;
+
+	mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i)
+		mlx5e_execute_l2_action(priv, hn);
+
+	mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i)
+		mlx5e_execute_l2_action(priv, hn);
+}
+
+static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv)
+{
+	struct mlx5e_l2_hash_node *hn;
+	struct hlist_node *tmp;
+	int i;
+
+	mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i)
+		hn->action = MLX5E_ACTION_DEL;
+	mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i)
+		hn->action = MLX5E_ACTION_DEL;
+
+	if (!test_bit(MLX5E_STATE_DESTROYING, &priv->state))
+		mlx5e_sync_netdev_addr(priv);
+
+	mlx5e_apply_netdev_addr(priv);
+}
+
+void mlx5e_set_rx_mode_work(struct work_struct *work)
+{
+	struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
+					       set_rx_mode_work);
+
+	struct mlx5e_l2_table *ea = &priv->fs.l2;
+	struct net_device *ndev = priv->netdev;
+
+	bool rx_mode_enable   = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
+	bool promisc_enabled   = rx_mode_enable && (ndev->flags & IFF_PROMISC);
+	bool allmulti_enabled  = rx_mode_enable && (ndev->flags & IFF_ALLMULTI);
+	bool broadcast_enabled = rx_mode_enable;
+
+	bool enable_promisc    = !ea->promisc_enabled   &&  promisc_enabled;
+	bool disable_promisc   =  ea->promisc_enabled   && !promisc_enabled;
+	bool enable_allmulti   = !ea->allmulti_enabled  &&  allmulti_enabled;
+	bool disable_allmulti  =  ea->allmulti_enabled  && !allmulti_enabled;
+	bool enable_broadcast  = !ea->broadcast_enabled &&  broadcast_enabled;
+	bool disable_broadcast =  ea->broadcast_enabled && !broadcast_enabled;
+
+	if (enable_promisc) {
+		mlx5e_add_l2_flow_rule(priv, &ea->promisc, MLX5E_PROMISC);
+		if (!priv->fs.vlan.filter_disabled)
+			mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
+					    0);
+	}
+	if (enable_allmulti)
+		mlx5e_add_l2_flow_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
+	if (enable_broadcast)
+		mlx5e_add_l2_flow_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
+
+	mlx5e_handle_netdev_addr(priv);
+
+	if (disable_broadcast)
+		mlx5e_del_l2_flow_rule(priv, &ea->broadcast);
+	if (disable_allmulti)
+		mlx5e_del_l2_flow_rule(priv, &ea->allmulti);
+	if (disable_promisc) {
+		if (!priv->fs.vlan.filter_disabled)
+			mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
+					    0);
+		mlx5e_del_l2_flow_rule(priv, &ea->promisc);
+	}
+
+	ea->promisc_enabled   = promisc_enabled;
+	ea->allmulti_enabled  = allmulti_enabled;
+	ea->broadcast_enabled = broadcast_enabled;
+
+	mlx5e_vport_context_update(priv);
+}
+
+static void mlx5e_destroy_groups(struct mlx5e_flow_table *ft)
+{
+	int i;
+
+	for (i = ft->num_groups - 1; i >= 0; i--) {
+		if (!IS_ERR_OR_NULL(ft->g[i]))
+			mlx5_destroy_flow_group(ft->g[i]);
+		ft->g[i] = NULL;
+	}
+	ft->num_groups = 0;
+}
+
+void mlx5e_init_l2_addr(struct mlx5e_priv *priv)
+{
+	ether_addr_copy(priv->fs.l2.broadcast.addr, priv->netdev->broadcast);
+}
+
+void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
+{
+	mlx5e_destroy_groups(ft);
+	kfree(ft->g);
+	mlx5_destroy_flow_table(ft->t);
+	ft->t = NULL;
+}
+
+static void mlx5e_cleanup_ttc_rules(struct mlx5e_ttc_table *ttc)
+{
+	int i;
+
+	for (i = 0; i < MLX5E_NUM_TT; i++) {
+		if (!IS_ERR_OR_NULL(ttc->rules[i])) {
+			mlx5_del_flow_rules(ttc->rules[i]);
+			ttc->rules[i] = NULL;
+		}
+	}
+}
+
+static struct {
+	u16 etype;
+	u8 proto;
+} ttc_rules[] = {
+	[MLX5E_TT_IPV4_TCP] = {
+		.etype = ETH_P_IP,
+		.proto = IPPROTO_TCP,
+	},
+	[MLX5E_TT_IPV6_TCP] = {
+		.etype = ETH_P_IPV6,
+		.proto = IPPROTO_TCP,
+	},
+	[MLX5E_TT_IPV4_UDP] = {
+		.etype = ETH_P_IP,
+		.proto = IPPROTO_UDP,
+	},
+	[MLX5E_TT_IPV6_UDP] = {
+		.etype = ETH_P_IPV6,
+		.proto = IPPROTO_UDP,
+	},
+	[MLX5E_TT_IPV4_IPSEC_AH] = {
+		.etype = ETH_P_IP,
+		.proto = IPPROTO_AH,
+	},
+	[MLX5E_TT_IPV6_IPSEC_AH] = {
+		.etype = ETH_P_IPV6,
+		.proto = IPPROTO_AH,
+	},
+	[MLX5E_TT_IPV4_IPSEC_ESP] = {
+		.etype = ETH_P_IP,
+		.proto = IPPROTO_ESP,
+	},
+	[MLX5E_TT_IPV6_IPSEC_ESP] = {
+		.etype = ETH_P_IPV6,
+		.proto = IPPROTO_ESP,
+	},
+	[MLX5E_TT_IPV4] = {
+		.etype = ETH_P_IP,
+		.proto = 0,
+	},
+	[MLX5E_TT_IPV6] = {
+		.etype = ETH_P_IPV6,
+		.proto = 0,
+	},
+	[MLX5E_TT_ANY] = {
+		.etype = 0,
+		.proto = 0,
+	},
+};
+
+static struct mlx5_flow_handle *
+mlx5e_generate_ttc_rule(struct mlx5e_priv *priv,
+			struct mlx5_flow_table *ft,
+			struct mlx5_flow_destination *dest,
+			u16 etype,
+			u8 proto)
+{
+	struct mlx5_flow_act flow_act = {
+		.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+		.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG,
+		.encap_id = 0,
+	};
+	struct mlx5_flow_handle *rule;
+	struct mlx5_flow_spec *spec;
+	int err = 0;
+
+	spec = mlx5_vzalloc(sizeof(*spec));
+	if (!spec) {
+		netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	if (proto) {
+		spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
+		MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, proto);
+	}
+	if (etype) {
+		spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype);
+		MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, etype);
+	}
+
+	rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
+	if (IS_ERR(rule)) {
+		err = PTR_ERR(rule);
+		netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
+	}
+
+	kvfree(spec);
+	return err ? ERR_PTR(err) : rule;
+}
+
+static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv)
+{
+	struct mlx5_flow_destination dest;
+	struct mlx5e_ttc_table *ttc;
+	struct mlx5_flow_handle **rules;
+	struct mlx5_flow_table *ft;
+	int tt;
+	int err;
+
+	ttc = &priv->fs.ttc;
+	ft = ttc->ft.t;
+	rules = ttc->rules;
+
+	dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+	for (tt = 0; tt < MLX5E_NUM_TT; tt++) {
+		if (tt == MLX5E_TT_ANY)
+			dest.tir_num = priv->direct_tir[0].tirn;
+		else
+			dest.tir_num = priv->indir_tir[tt].tirn;
+		rules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest,
+						    ttc_rules[tt].etype,
+						    ttc_rules[tt].proto);
+		if (IS_ERR(rules[tt]))
+			goto del_rules;
+	}
+
+	return 0;
+
+del_rules:
+	err = PTR_ERR(rules[tt]);
+	rules[tt] = NULL;
+	mlx5e_cleanup_ttc_rules(ttc);
+	return err;
+}
+
+#define MLX5E_TTC_NUM_GROUPS	3
+#define MLX5E_TTC_GROUP1_SIZE	BIT(3)
+#define MLX5E_TTC_GROUP2_SIZE	BIT(1)
+#define MLX5E_TTC_GROUP3_SIZE	BIT(0)
+#define MLX5E_TTC_TABLE_SIZE	(MLX5E_TTC_GROUP1_SIZE +\
+				 MLX5E_TTC_GROUP2_SIZE +\
+				 MLX5E_TTC_GROUP3_SIZE)
+static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc)
+{
+	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+	struct mlx5e_flow_table *ft = &ttc->ft;
+	int ix = 0;
+	u32 *in;
+	int err;
+	u8 *mc;
+
+	ft->g = kcalloc(MLX5E_TTC_NUM_GROUPS,
+			sizeof(*ft->g), GFP_KERNEL);
+	if (!ft->g)
+		return -ENOMEM;
+	in = mlx5_vzalloc(inlen);
+	if (!in) {
+		kfree(ft->g);
+		return -ENOMEM;
+	}
+
+	/* L4 Group */
+	mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
+	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
+	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+	MLX5_SET_CFG(in, start_flow_index, ix);
+	ix += MLX5E_TTC_GROUP1_SIZE;
+	MLX5_SET_CFG(in, end_flow_index, ix - 1);
+	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+	if (IS_ERR(ft->g[ft->num_groups]))
+		goto err;
+	ft->num_groups++;
+
+	/* L3 Group */
+	MLX5_SET(fte_match_param, mc, outer_headers.ip_protocol, 0);
+	MLX5_SET_CFG(in, start_flow_index, ix);
+	ix += MLX5E_TTC_GROUP2_SIZE;
+	MLX5_SET_CFG(in, end_flow_index, ix - 1);
+	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+	if (IS_ERR(ft->g[ft->num_groups]))
+		goto err;
+	ft->num_groups++;
+
+	/* Any Group */
+	memset(in, 0, inlen);
+	MLX5_SET_CFG(in, start_flow_index, ix);
+	ix += MLX5E_TTC_GROUP3_SIZE;
+	MLX5_SET_CFG(in, end_flow_index, ix - 1);
+	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+	if (IS_ERR(ft->g[ft->num_groups]))
+		goto err;
+	ft->num_groups++;
+
+	kvfree(in);
+	return 0;
+
+err:
+	err = PTR_ERR(ft->g[ft->num_groups]);
+	ft->g[ft->num_groups] = NULL;
+	kvfree(in);
+
+	return err;
+}
+
+static void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv)
+{
+	struct mlx5e_ttc_table *ttc = &priv->fs.ttc;
+
+	mlx5e_cleanup_ttc_rules(ttc);
+	mlx5e_destroy_flow_table(&ttc->ft);
+}
+
+static int mlx5e_create_ttc_table(struct mlx5e_priv *priv)
+{
+	struct mlx5e_ttc_table *ttc = &priv->fs.ttc;
+	struct mlx5e_flow_table *ft = &ttc->ft;
+	int err;
+
+	ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO,
+				       MLX5E_TTC_TABLE_SIZE, MLX5E_TTC_FT_LEVEL, 0);
+	if (IS_ERR(ft->t)) {
+		err = PTR_ERR(ft->t);
+		ft->t = NULL;
+		return err;
+	}
+
+	err = mlx5e_create_ttc_table_groups(ttc);
+	if (err)
+		goto err;
+
+	err = mlx5e_generate_ttc_table_rules(priv);
+	if (err)
+		goto err;
+
+	return 0;
+err:
+	mlx5e_destroy_flow_table(ft);
+	return err;
+}
+
+static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
+				   struct mlx5e_l2_rule *ai)
+{
+	if (!IS_ERR_OR_NULL(ai->rule)) {
+		mlx5_del_flow_rules(ai->rule);
+		ai->rule = NULL;
+	}
+}
+
+static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
+				  struct mlx5e_l2_rule *ai, int type)
+{
+	struct mlx5_flow_act flow_act = {
+		.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+		.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG,
+		.encap_id = 0,
+	};
+	struct mlx5_flow_table *ft = priv->fs.l2.ft.t;
+	struct mlx5_flow_destination dest;
+	struct mlx5_flow_spec *spec;
+	int err = 0;
+	u8 *mc_dmac;
+	u8 *mv_dmac;
+
+	spec = mlx5_vzalloc(sizeof(*spec));
+	if (!spec) {
+		netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
+		return -ENOMEM;
+	}
+
+	mc_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+			       outer_headers.dmac_47_16);
+	mv_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+			       outer_headers.dmac_47_16);
+
+	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+	dest.ft = priv->fs.ttc.ft.t;
+
+	switch (type) {
+	case MLX5E_FULLMATCH:
+		spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+		eth_broadcast_addr(mc_dmac);
+		ether_addr_copy(mv_dmac, ai->addr);
+		break;
+
+	case MLX5E_ALLMULTI:
+		spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+		mc_dmac[0] = 0x01;
+		mv_dmac[0] = 0x01;
+		break;
+
+	case MLX5E_PROMISC:
+		break;
+	}
+
+	ai->rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
+	if (IS_ERR(ai->rule)) {
+		netdev_err(priv->netdev, "%s: add l2 rule(mac:%pM) failed\n",
+			   __func__, mv_dmac);
+		err = PTR_ERR(ai->rule);
+		ai->rule = NULL;
+	}
+
+	kvfree(spec);
+
+	return err;
+}
+
+#define MLX5E_NUM_L2_GROUPS	   3
+#define MLX5E_L2_GROUP1_SIZE	   BIT(0)
+#define MLX5E_L2_GROUP2_SIZE	   BIT(15)
+#define MLX5E_L2_GROUP3_SIZE	   BIT(0)
+#define MLX5E_L2_TABLE_SIZE	   (MLX5E_L2_GROUP1_SIZE +\
+				    MLX5E_L2_GROUP2_SIZE +\
+				    MLX5E_L2_GROUP3_SIZE)
+static int mlx5e_create_l2_table_groups(struct mlx5e_l2_table *l2_table)
+{
+	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+	struct mlx5e_flow_table *ft = &l2_table->ft;
+	int ix = 0;
+	u8 *mc_dmac;
+	u32 *in;
+	int err;
+	u8 *mc;
+
+	ft->g = kcalloc(MLX5E_NUM_L2_GROUPS, sizeof(*ft->g), GFP_KERNEL);
+	if (!ft->g)
+		return -ENOMEM;
+	in = mlx5_vzalloc(inlen);
+	if (!in) {
+		kfree(ft->g);
+		return -ENOMEM;
+	}
+
+	mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+	mc_dmac = MLX5_ADDR_OF(fte_match_param, mc,
+			       outer_headers.dmac_47_16);
+	/* Flow Group for promiscuous */
+	MLX5_SET_CFG(in, start_flow_index, ix);
+	ix += MLX5E_L2_GROUP1_SIZE;
+	MLX5_SET_CFG(in, end_flow_index, ix - 1);
+	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+	if (IS_ERR(ft->g[ft->num_groups]))
+		goto err_destroy_groups;
+	ft->num_groups++;
+
+	/* Flow Group for full match */
+	eth_broadcast_addr(mc_dmac);
+	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+	MLX5_SET_CFG(in, start_flow_index, ix);
+	ix += MLX5E_L2_GROUP2_SIZE;
+	MLX5_SET_CFG(in, end_flow_index, ix - 1);
+	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+	if (IS_ERR(ft->g[ft->num_groups]))
+		goto err_destroy_groups;
+	ft->num_groups++;
+
+	/* Flow Group for allmulti */
+	eth_zero_addr(mc_dmac);
+	mc_dmac[0] = 0x01;
+	MLX5_SET_CFG(in, start_flow_index, ix);
+	ix += MLX5E_L2_GROUP3_SIZE;
+	MLX5_SET_CFG(in, end_flow_index, ix - 1);
+	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+	if (IS_ERR(ft->g[ft->num_groups]))
+		goto err_destroy_groups;
+	ft->num_groups++;
+
+	kvfree(in);
+	return 0;
+
+err_destroy_groups:
+	err = PTR_ERR(ft->g[ft->num_groups]);
+	ft->g[ft->num_groups] = NULL;
+	mlx5e_destroy_groups(ft);
+	kvfree(in);
+
+	return err;
+}
+
+static void mlx5e_destroy_l2_table(struct mlx5e_priv *priv)
+{
+	mlx5e_destroy_flow_table(&priv->fs.l2.ft);
+}
+
+static int mlx5e_create_l2_table(struct mlx5e_priv *priv)
+{
+	struct mlx5e_l2_table *l2_table = &priv->fs.l2;
+	struct mlx5e_flow_table *ft = &l2_table->ft;
+	int err;
+
+	ft->num_groups = 0;
+	ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO,
+				       MLX5E_L2_TABLE_SIZE, MLX5E_L2_FT_LEVEL, 0);
+
+	if (IS_ERR(ft->t)) {
+		err = PTR_ERR(ft->t);
+		ft->t = NULL;
+		return err;
+	}
+
+	err = mlx5e_create_l2_table_groups(l2_table);
+	if (err)
+		goto err_destroy_flow_table;
+
+	return 0;
+
+err_destroy_flow_table:
+	mlx5_destroy_flow_table(ft->t);
+	ft->t = NULL;
+
+	return err;
+}
+
+#define MLX5E_NUM_VLAN_GROUPS	2
+#define MLX5E_VLAN_GROUP0_SIZE	BIT(12)
+#define MLX5E_VLAN_GROUP1_SIZE	BIT(1)
+#define MLX5E_VLAN_TABLE_SIZE	(MLX5E_VLAN_GROUP0_SIZE +\
+				 MLX5E_VLAN_GROUP1_SIZE)
+
+static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in,
+					    int inlen)
+{
+	int err;
+	int ix = 0;
+	u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+
+	memset(in, 0, inlen);
+	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag);
+	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
+	MLX5_SET_CFG(in, start_flow_index, ix);
+	ix += MLX5E_VLAN_GROUP0_SIZE;
+	MLX5_SET_CFG(in, end_flow_index, ix - 1);
+	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+	if (IS_ERR(ft->g[ft->num_groups]))
+		goto err_destroy_groups;
+	ft->num_groups++;
+
+	memset(in, 0, inlen);
+	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag);
+	MLX5_SET_CFG(in, start_flow_index, ix);
+	ix += MLX5E_VLAN_GROUP1_SIZE;
+	MLX5_SET_CFG(in, end_flow_index, ix - 1);
+	ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+	if (IS_ERR(ft->g[ft->num_groups]))
+		goto err_destroy_groups;
+	ft->num_groups++;
+
+	return 0;
+
+err_destroy_groups:
+	err = PTR_ERR(ft->g[ft->num_groups]);
+	ft->g[ft->num_groups] = NULL;
+	mlx5e_destroy_groups(ft);
+
+	return err;
+}
+
+static int mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft)
+{
+	u32 *in;
+	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+	int err;
+
+	in = mlx5_vzalloc(inlen);
+	if (!in)
+		return -ENOMEM;
+
+	err = __mlx5e_create_vlan_table_groups(ft, in, inlen);
+
+	kvfree(in);
+	return err;
+}
+
+static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
+{
+	struct mlx5e_flow_table *ft = &priv->fs.vlan.ft;
+	int err;
+
+	ft->num_groups = 0;
+	ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO,
+				       MLX5E_VLAN_TABLE_SIZE, MLX5E_VLAN_FT_LEVEL, 0);
+
+	if (IS_ERR(ft->t)) {
+		err = PTR_ERR(ft->t);
+		ft->t = NULL;
+		return err;
+	}
+	ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
+	if (!ft->g) {
+		err = -ENOMEM;
+		goto err_destroy_vlan_table;
+	}
+
+	err = mlx5e_create_vlan_table_groups(ft);
+	if (err)
+		goto err_free_g;
+
+	mlx5e_add_vlan_rules(priv);
+
+	return 0;
+
+err_free_g:
+	kfree(ft->g);
+err_destroy_vlan_table:
+	mlx5_destroy_flow_table(ft->t);
+	ft->t = NULL;
+
+	return err;
+}
+
+static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv)
+{
+	mlx5e_del_vlan_rules(priv);
+	mlx5e_destroy_flow_table(&priv->fs.vlan.ft);
+}
+
+int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
+{
+	int err;
+
+	priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
+					       MLX5_FLOW_NAMESPACE_KERNEL);
+
+	if (!priv->fs.ns)
+		return -EINVAL;
+
+	err = mlx5e_arfs_create_tables(priv);
+	if (err) {
+		netdev_err(priv->netdev, "Failed to create arfs tables, err=%d\n",
+			   err);
+		priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
+	}
+
+	err = mlx5e_create_ttc_table(priv);
+	if (err) {
+		netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
+			   err);
+		goto err_destroy_arfs_tables;
+	}
+
+	err = mlx5e_create_l2_table(priv);
+	if (err) {
+		netdev_err(priv->netdev, "Failed to create l2 table, err=%d\n",
+			   err);
+		goto err_destroy_ttc_table;
+	}
+
+	err = mlx5e_create_vlan_table(priv);
+	if (err) {
+		netdev_err(priv->netdev, "Failed to create vlan table, err=%d\n",
+			   err);
+		goto err_destroy_l2_table;
+	}
+
+	mlx5e_ethtool_init_steering(priv);
+
+	return 0;
+
+err_destroy_l2_table:
+	mlx5e_destroy_l2_table(priv);
+err_destroy_ttc_table:
+	mlx5e_destroy_ttc_table(priv);
+err_destroy_arfs_tables:
+	mlx5e_arfs_destroy_tables(priv);
+
+	return err;
+}
+
+void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv)
+{
+	mlx5e_destroy_vlan_table(priv);
+	mlx5e_destroy_l2_table(priv);
+	mlx5e_destroy_ttc_table(priv);
+	mlx5e_arfs_destroy_tables(priv);
+	mlx5e_ethtool_cleanup_steering(priv);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/en/fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/en/fs_ethtool.c
new file mode 100644
index 0000000..4459f1c
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/en/fs_ethtool.c
@@ -0,0 +1,587 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "lib/fs.h"
+#include "en/en.h"
+
+struct mlx5e_ethtool_rule {
+	struct list_head             list;
+	struct ethtool_rx_flow_spec  flow_spec;
+	struct mlx5_flow_handle	     *rule;
+	struct mlx5e_ethtool_table   *eth_ft;
+};
+
+static void put_flow_table(struct mlx5e_ethtool_table *eth_ft)
+{
+	if (!--eth_ft->num_rules) {
+		mlx5_destroy_flow_table(eth_ft->ft);
+		eth_ft->ft = NULL;
+	}
+}
+
+#define MLX5E_ETHTOOL_L3_L4_PRIO 0
+#define MLX5E_ETHTOOL_L2_PRIO (MLX5E_ETHTOOL_L3_L4_PRIO + ETHTOOL_NUM_L3_L4_FTS)
+#define MLX5E_ETHTOOL_NUM_ENTRIES 64000
+#define MLX5E_ETHTOOL_NUM_GROUPS  10
+static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv,
+						  struct ethtool_rx_flow_spec *fs,
+						  int num_tuples)
+{
+	struct mlx5e_ethtool_table *eth_ft;
+	struct mlx5_flow_namespace *ns;
+	struct mlx5_flow_table *ft;
+	int max_tuples;
+	int table_size;
+	int prio;
+
+	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
+	case TCP_V4_FLOW:
+	case UDP_V4_FLOW:
+		max_tuples = ETHTOOL_NUM_L3_L4_FTS;
+		prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
+		eth_ft = &priv->fs.ethtool.l3_l4_ft[prio];
+		break;
+	case IP_USER_FLOW:
+		max_tuples = ETHTOOL_NUM_L3_L4_FTS;
+		prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
+		eth_ft = &priv->fs.ethtool.l3_l4_ft[prio];
+		break;
+	case ETHER_FLOW:
+		max_tuples = ETHTOOL_NUM_L2_FTS;
+		prio = max_tuples - num_tuples;
+		eth_ft = &priv->fs.ethtool.l2_ft[prio];
+		prio += MLX5E_ETHTOOL_L2_PRIO;
+		break;
+	default:
+		return ERR_PTR(-EINVAL);
+	}
+
+	eth_ft->num_rules++;
+	if (eth_ft->ft)
+		return eth_ft;
+
+	ns = mlx5_get_flow_namespace(priv->mdev,
+				     MLX5_FLOW_NAMESPACE_ETHTOOL);
+	if (!ns)
+		return ERR_PTR(-ENOTSUPP);
+
+	table_size = min_t(u32, BIT(MLX5_CAP_FLOWTABLE(priv->mdev,
+						       flow_table_properties_nic_receive.log_max_ft_size)),
+			   MLX5E_ETHTOOL_NUM_ENTRIES);
+	ft = mlx5_create_auto_grouped_flow_table(ns, prio,
+						 table_size,
+						 MLX5E_ETHTOOL_NUM_GROUPS, 0, 0);
+	if (IS_ERR(ft))
+		return (void *)ft;
+
+	eth_ft->ft = ft;
+	return eth_ft;
+}
+
+static void mask_spec(u8 *mask, u8 *val, size_t size)
+{
+	unsigned int i;
+
+	for (i = 0; i < size; i++, mask++, val++)
+		*((u8 *)val) = *((u8 *)mask) & *((u8 *)val);
+}
+
+static void set_ips(void *outer_headers_v, void *outer_headers_c, __be32 ip4src_m,
+		    __be32 ip4src_v, __be32 ip4dst_m, __be32 ip4dst_v)
+{
+	if (ip4src_m) {
+		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
+				    src_ipv4_src_ipv6.ipv4_layout.ipv4),
+		       &ip4src_v, sizeof(ip4src_v));
+		memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
+				    src_ipv4_src_ipv6.ipv4_layout.ipv4),
+		       0xff, sizeof(ip4src_m));
+	}
+	if (ip4dst_m) {
+		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
+				    dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
+		       &ip4dst_v, sizeof(ip4dst_v));
+		memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
+				    dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
+		       0xff, sizeof(ip4dst_m));
+	}
+	MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
+		 ethertype, ETH_P_IP);
+	MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
+		 ethertype, 0xffff);
+}
+
+static int set_flow_attrs(u32 *match_c, u32 *match_v,
+			  struct ethtool_rx_flow_spec *fs)
+{
+	void *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
+					     outer_headers);
+	void *outer_headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
+					     outer_headers);
+	u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
+	struct ethtool_tcpip4_spec *l4_mask;
+	struct ethtool_tcpip4_spec *l4_val;
+	struct ethtool_usrip4_spec *l3_mask;
+	struct ethtool_usrip4_spec *l3_val;
+	struct ethhdr *eth_val;
+	struct ethhdr *eth_mask;
+
+	switch (flow_type) {
+	case TCP_V4_FLOW:
+		l4_mask = &fs->m_u.tcp_ip4_spec;
+		l4_val = &fs->h_u.tcp_ip4_spec;
+		set_ips(outer_headers_v, outer_headers_c, l4_mask->ip4src,
+			l4_val->ip4src, l4_mask->ip4dst, l4_val->ip4dst);
+
+		if (l4_mask->psrc) {
+			MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, tcp_sport,
+				 0xffff);
+			MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, tcp_sport,
+				 ntohs(l4_val->psrc));
+		}
+		if (l4_mask->pdst) {
+			MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, tcp_dport,
+				 0xffff);
+			MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, tcp_dport,
+				 ntohs(l4_val->pdst));
+		}
+		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol,
+			 0xffff);
+		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ip_protocol,
+			 IPPROTO_TCP);
+		break;
+	case UDP_V4_FLOW:
+		l4_mask = &fs->m_u.tcp_ip4_spec;
+		l4_val = &fs->h_u.tcp_ip4_spec;
+		set_ips(outer_headers_v, outer_headers_c, l4_mask->ip4src,
+			l4_val->ip4src, l4_mask->ip4dst, l4_val->ip4dst);
+
+		if (l4_mask->psrc) {
+			MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, udp_sport,
+				 0xffff);
+			MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, udp_sport,
+				 ntohs(l4_val->psrc));
+		}
+		if (l4_mask->pdst) {
+			MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, udp_dport,
+				 0xffff);
+			MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, udp_dport,
+				 ntohs(l4_val->pdst));
+		}
+		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol,
+			 0xffff);
+		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ip_protocol,
+			 IPPROTO_UDP);
+		break;
+	case IP_USER_FLOW:
+		l3_mask = &fs->m_u.usr_ip4_spec;
+		l3_val = &fs->h_u.usr_ip4_spec;
+		set_ips(outer_headers_v, outer_headers_c, l3_mask->ip4src,
+			l3_val->ip4src, l3_mask->ip4dst, l3_val->ip4dst);
+		break;
+	case ETHER_FLOW:
+		eth_mask = &fs->m_u.ether_spec;
+		eth_val = &fs->h_u.ether_spec;
+
+		mask_spec((u8 *)eth_mask, (u8 *)eth_val, sizeof(*eth_mask));
+		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
+					     outer_headers_c, smac_47_16),
+				eth_mask->h_source);
+		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
+					     outer_headers_v, smac_47_16),
+				eth_val->h_source);
+		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
+					     outer_headers_c, dmac_47_16),
+				eth_mask->h_dest);
+		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
+					     outer_headers_v, dmac_47_16),
+				eth_val->h_dest);
+		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ethertype,
+			 ntohs(eth_mask->h_proto));
+		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ethertype,
+			 ntohs(eth_val->h_proto));
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if ((fs->flow_type & FLOW_EXT) &&
+	    (fs->m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK))) {
+		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
+			 vlan_tag, 1);
+		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
+			 vlan_tag, 1);
+		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
+			 first_vid, 0xfff);
+		MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
+			 first_vid, ntohs(fs->h_ext.vlan_tci));
+	}
+	if (fs->flow_type & FLOW_MAC_EXT &&
+	    !is_zero_ether_addr(fs->m_ext.h_dest)) {
+		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
+					     outer_headers_c, dmac_47_16),
+				fs->m_ext.h_dest);
+		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
+					     outer_headers_v, dmac_47_16),
+				fs->h_ext.h_dest);
+	}
+
+	return 0;
+}
+
+static void add_rule_to_list(struct mlx5e_priv *priv,
+			     struct mlx5e_ethtool_rule *rule)
+{
+	struct mlx5e_ethtool_rule *iter;
+	struct list_head *head = &priv->fs.ethtool.rules;
+
+	list_for_each_entry(iter, &priv->fs.ethtool.rules, list) {
+		if (iter->flow_spec.location > rule->flow_spec.location)
+			break;
+		head = &iter->list;
+	}
+	priv->fs.ethtool.tot_num_rules++;
+	list_add(&rule->list, head);
+}
+
+static bool outer_header_zero(u32 *match_criteria)
+{
+	int size = MLX5_ST_SZ_BYTES(fte_match_param);
+	char *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_criteria,
+					     outer_headers);
+
+	return outer_headers_c[0] == 0 && !memcmp(outer_headers_c,
+						  outer_headers_c + 1,
+						  size - 1);
+}
+
+static struct mlx5_flow_handle *
+add_ethtool_flow_rule(struct mlx5e_priv *priv,
+		      struct mlx5_flow_table *ft,
+		      struct ethtool_rx_flow_spec *fs)
+{
+	struct mlx5_flow_destination *dst = NULL;
+	struct mlx5_flow_act flow_act = {0};
+	struct mlx5_flow_spec *spec;
+	struct mlx5_flow_handle *rule;
+	int err = 0;
+
+	spec = mlx5_vzalloc(sizeof(*spec));
+	if (!spec)
+		return ERR_PTR(-ENOMEM);
+	err = set_flow_attrs(spec->match_criteria, spec->match_value,
+			     fs);
+	if (err)
+		goto free;
+
+	if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
+		flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
+	} else {
+		dst = kzalloc(sizeof(*dst), GFP_KERNEL);
+		if (!dst) {
+			err = -ENOMEM;
+			goto free;
+		}
+
+		dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+		dst->tir_num = priv->direct_tir[fs->ring_cookie].tirn;
+		flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+	}
+
+	spec->match_criteria_enable = (!outer_header_zero(spec->match_criteria));
+	flow_act.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
+	rule = mlx5_add_flow_rules(ft, spec, &flow_act, dst, 1);
+	if (IS_ERR(rule)) {
+		err = PTR_ERR(rule);
+		netdev_err(priv->netdev, "%s: failed to add ethtool steering rule: %d\n",
+			   __func__, err);
+		goto free;
+	}
+free:
+	kvfree(spec);
+	kfree(dst);
+	return err ? ERR_PTR(err) : rule;
+}
+
+static void del_ethtool_rule(struct mlx5e_priv *priv,
+			     struct mlx5e_ethtool_rule *eth_rule)
+{
+	if (eth_rule->rule)
+		mlx5_del_flow_rules(eth_rule->rule);
+	list_del(&eth_rule->list);
+	priv->fs.ethtool.tot_num_rules--;
+	put_flow_table(eth_rule->eth_ft);
+	kfree(eth_rule);
+}
+
+static struct mlx5e_ethtool_rule *find_ethtool_rule(struct mlx5e_priv *priv,
+						    int location)
+{
+	struct mlx5e_ethtool_rule *iter;
+
+	list_for_each_entry(iter, &priv->fs.ethtool.rules, list) {
+		if (iter->flow_spec.location == location)
+			return iter;
+	}
+	return NULL;
+}
+
+static struct mlx5e_ethtool_rule *get_ethtool_rule(struct mlx5e_priv *priv,
+						   int location)
+{
+	struct mlx5e_ethtool_rule *eth_rule;
+
+	eth_rule = find_ethtool_rule(priv, location);
+	if (eth_rule)
+		del_ethtool_rule(priv, eth_rule);
+
+	eth_rule = kzalloc(sizeof(*eth_rule), GFP_KERNEL);
+	if (!eth_rule)
+		return ERR_PTR(-ENOMEM);
+
+	add_rule_to_list(priv, eth_rule);
+	return eth_rule;
+}
+
+#define MAX_NUM_OF_ETHTOOL_RULES BIT(10)
+
+#define all_ones(field) (field == (__force typeof(field))-1)
+#define all_zeros_or_all_ones(field)		\
+	((field) == 0 || (field) == (__force typeof(field))-1)
+
+static int validate_flow(struct mlx5e_priv *priv,
+			 struct ethtool_rx_flow_spec *fs)
+{
+	struct ethtool_tcpip4_spec *l4_mask;
+	struct ethtool_usrip4_spec *l3_mask;
+	struct ethhdr *eth_mask;
+	int num_tuples = 0;
+
+	if (fs->location >= MAX_NUM_OF_ETHTOOL_RULES)
+		return -EINVAL;
+
+	if (fs->ring_cookie >= priv->params.num_channels &&
+	    fs->ring_cookie != RX_CLS_FLOW_DISC)
+		return -EINVAL;
+
+	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
+	case ETHER_FLOW:
+		eth_mask = &fs->m_u.ether_spec;
+		if (!is_zero_ether_addr(eth_mask->h_dest))
+			num_tuples++;
+		if (!is_zero_ether_addr(eth_mask->h_source))
+			num_tuples++;
+		if (eth_mask->h_proto)
+			num_tuples++;
+		break;
+	case TCP_V4_FLOW:
+	case UDP_V4_FLOW:
+		if (fs->m_u.tcp_ip4_spec.tos)
+			return -EINVAL;
+		l4_mask = &fs->m_u.tcp_ip4_spec;
+		if (l4_mask->ip4src) {
+			if (!all_ones(l4_mask->ip4src))
+				return -EINVAL;
+			num_tuples++;
+		}
+		if (l4_mask->ip4dst) {
+			if (!all_ones(l4_mask->ip4dst))
+				return -EINVAL;
+			num_tuples++;
+		}
+		if (l4_mask->psrc) {
+			if (!all_ones(l4_mask->psrc))
+				return -EINVAL;
+			num_tuples++;
+		}
+		if (l4_mask->pdst) {
+			if (!all_ones(l4_mask->pdst))
+				return -EINVAL;
+			num_tuples++;
+		}
+		/* Flow is TCP/UDP */
+		num_tuples++;
+		break;
+	case IP_USER_FLOW:
+		l3_mask = &fs->m_u.usr_ip4_spec;
+		if (l3_mask->l4_4_bytes || l3_mask->tos || l3_mask->proto ||
+		    fs->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4)
+			return -EINVAL;
+		if (l3_mask->ip4src) {
+			if (!all_ones(l3_mask->ip4src))
+				return -EINVAL;
+			num_tuples++;
+		}
+		if (l3_mask->ip4dst) {
+			if (!all_ones(l3_mask->ip4dst))
+				return -EINVAL;
+			num_tuples++;
+		}
+		/* Flow is IPv4 */
+		num_tuples++;
+		break;
+	default:
+		return -EINVAL;
+	}
+	if ((fs->flow_type & FLOW_EXT)) {
+		if (fs->m_ext.vlan_etype ||
+		    (fs->m_ext.vlan_tci != cpu_to_be16(VLAN_VID_MASK)))
+			return -EINVAL;
+
+		if (fs->m_ext.vlan_tci) {
+			if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
+				return -EINVAL;
+		}
+		num_tuples++;
+	}
+
+	if (fs->flow_type & FLOW_MAC_EXT &&
+	    !is_zero_ether_addr(fs->m_ext.h_dest))
+		num_tuples++;
+
+	return num_tuples;
+}
+
+int mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
+			       struct ethtool_rx_flow_spec *fs)
+{
+	struct mlx5e_ethtool_table *eth_ft;
+	struct mlx5e_ethtool_rule *eth_rule;
+	struct mlx5_flow_handle *rule;
+	int num_tuples;
+	int err;
+
+	num_tuples = validate_flow(priv, fs);
+	if (num_tuples <= 0) {
+		netdev_warn(priv->netdev, "%s: flow is not valid\n",  __func__);
+		return -EINVAL;
+	}
+
+	eth_ft = get_flow_table(priv, fs, num_tuples);
+	if (IS_ERR(eth_ft))
+		return PTR_ERR(eth_ft);
+
+	eth_rule = get_ethtool_rule(priv, fs->location);
+	if (IS_ERR(eth_rule)) {
+		put_flow_table(eth_ft);
+		return PTR_ERR(eth_rule);
+	}
+
+	eth_rule->flow_spec = *fs;
+	eth_rule->eth_ft = eth_ft;
+	if (!eth_ft->ft) {
+		err = -EINVAL;
+		goto del_ethtool_rule;
+	}
+	rule = add_ethtool_flow_rule(priv, eth_ft->ft, fs);
+	if (IS_ERR(rule)) {
+		err = PTR_ERR(rule);
+		goto del_ethtool_rule;
+	}
+
+	eth_rule->rule = rule;
+
+	return 0;
+
+del_ethtool_rule:
+	del_ethtool_rule(priv, eth_rule);
+
+	return err;
+}
+
+int mlx5e_ethtool_flow_remove(struct mlx5e_priv *priv,
+			      int location)
+{
+	struct mlx5e_ethtool_rule *eth_rule;
+	int err = 0;
+
+	if (location >= MAX_NUM_OF_ETHTOOL_RULES)
+		return -ENOSPC;
+
+	eth_rule = find_ethtool_rule(priv, location);
+	if (!eth_rule) {
+		err =  -ENOENT;
+		goto out;
+	}
+
+	del_ethtool_rule(priv, eth_rule);
+out:
+	return err;
+}
+
+int mlx5e_ethtool_get_flow(struct mlx5e_priv *priv, struct ethtool_rxnfc *info,
+			   int location)
+{
+	struct mlx5e_ethtool_rule *eth_rule;
+
+	if (location < 0 || location >= MAX_NUM_OF_ETHTOOL_RULES)
+		return -EINVAL;
+
+	list_for_each_entry(eth_rule, &priv->fs.ethtool.rules, list) {
+		if (eth_rule->flow_spec.location == location) {
+			info->fs = eth_rule->flow_spec;
+			return 0;
+		}
+	}
+
+	return -ENOENT;
+}
+
+int mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv, struct ethtool_rxnfc *info,
+				u32 *rule_locs)
+{
+	int location = 0;
+	int idx = 0;
+	int err = 0;
+
+	while ((!err || err == -ENOENT) && idx < info->rule_cnt) {
+		err = mlx5e_ethtool_get_flow(priv, info, location);
+		if (!err)
+			rule_locs[idx++] = location;
+		location++;
+	}
+	return err;
+}
+
+void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv)
+{
+	struct mlx5e_ethtool_rule *iter;
+	struct mlx5e_ethtool_rule *temp;
+
+	list_for_each_entry_safe(iter, temp, &priv->fs.ethtool.rules, list)
+		del_ethtool_rule(priv, iter);
+}
+
+void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv)
+{
+	INIT_LIST_HEAD(&priv->fs.ethtool.rules);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/en/main.c b/drivers/net/ethernet/mellanox/mlx5/en/main.c
new file mode 100644
index 0000000..b537df7
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/en/main.c
@@ -0,0 +1,4125 @@
+/*
+ * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <net/tc_act/tc_gact.h>
+#include <net/pkt_cls.h>
+#include "lib/fs.h"
+#include <net/vxlan.h>
+#include <linux/bpf.h>
+#include "en/en.h"
+#include "en/tc.h"
+#include "sriov/eswitch.h"
+#include "en/vxlan.h"
+
+struct mlx5e_rq_param {
+	u32			rqc[MLX5_ST_SZ_DW(rqc)];
+	struct mlx5_wq_param	wq;
+	bool			am_enabled;
+};
+
+struct mlx5e_sq_param {
+	u32                        sqc[MLX5_ST_SZ_DW(sqc)];
+	struct mlx5_wq_param       wq;
+	u16                        max_inline;
+	u8                         min_inline_mode;
+	enum mlx5e_sq_type         type;
+};
+
+struct mlx5e_cq_param {
+	u32                        cqc[MLX5_ST_SZ_DW(cqc)];
+	struct mlx5_wq_param       wq;
+	u16                        eq_ix;
+	u8                         cq_period_mode;
+};
+
+struct mlx5e_channel_param {
+	struct mlx5e_rq_param      rq;
+	struct mlx5e_sq_param      sq;
+	struct mlx5e_sq_param      xdp_sq;
+	struct mlx5e_sq_param      icosq;
+	struct mlx5e_cq_param      rx_cq;
+	struct mlx5e_cq_param      tx_cq;
+	struct mlx5e_cq_param      icosq_cq;
+};
+
+static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
+{
+	return MLX5_CAP_GEN(mdev, striding_rq) &&
+		MLX5_CAP_GEN(mdev, umr_ptr_rlky) &&
+		MLX5_CAP_ETH(mdev, reg_umr_sq);
+}
+
+static void mlx5e_set_rq_type_params(struct mlx5e_priv *priv, u8 rq_type)
+{
+	priv->params.rq_wq_type = rq_type;
+	switch (priv->params.rq_wq_type) {
+	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+		priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW;
+		priv->params.mpwqe_log_stride_sz =
+			MLX5E_GET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS) ?
+			MLX5_MPWRQ_LOG_STRIDE_SIZE_CQE_COMPRESS :
+			MLX5_MPWRQ_LOG_STRIDE_SIZE;
+		priv->params.mpwqe_log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ -
+			priv->params.mpwqe_log_stride_sz;
+		break;
+	default: /* MLX5_WQ_TYPE_LINKED_LIST */
+		priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
+	}
+	priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type,
+					       BIT(priv->params.log_rq_size));
+
+	mlx5_core_info(priv->mdev,
+		       "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
+		       priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ,
+		       BIT(priv->params.log_rq_size),
+		       BIT(priv->params.mpwqe_log_stride_sz),
+		       MLX5E_GET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS));
+}
+
+static void mlx5e_set_rq_priv_params(struct mlx5e_priv *priv)
+{
+	u8 rq_type = mlx5e_check_fragmented_striding_rq_cap(priv->mdev) &&
+		    !priv->xdp_prog ?
+		    MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
+		    MLX5_WQ_TYPE_LINKED_LIST;
+	mlx5e_set_rq_type_params(priv, rq_type);
+}
+
+static void mlx5e_update_carrier(struct mlx5e_priv *priv)
+{
+	struct mlx5_core_dev *mdev = priv->mdev;
+	u8 port_state;
+
+	port_state = mlx5_query_vport_state(mdev,
+		MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT, 0);
+
+	if (port_state == VPORT_STATE_UP) {
+		netdev_info(priv->netdev, "Link up\n");
+		netif_carrier_on(priv->netdev);
+	} else {
+		netdev_info(priv->netdev, "Link down\n");
+		netif_carrier_off(priv->netdev);
+	}
+}
+
+static void mlx5e_update_carrier_work(struct work_struct *work)
+{
+	struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
+					       update_carrier_work);
+
+	mutex_lock(&priv->state_lock);
+	if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+		mlx5e_update_carrier(priv);
+	mutex_unlock(&priv->state_lock);
+}
+
+static void mlx5e_tx_timeout_work(struct work_struct *work)
+{
+	struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
+					       tx_timeout_work);
+	int err;
+
+	rtnl_lock();
+	mutex_lock(&priv->state_lock);
+	if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+		goto unlock;
+	mlx5e_close_locked(priv->netdev);
+	err = mlx5e_open_locked(priv->netdev);
+	if (err)
+		netdev_err(priv->netdev, "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n",
+			   err);
+unlock:
+	mutex_unlock(&priv->state_lock);
+	rtnl_unlock();
+}
+
+static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
+{
+	struct mlx5e_sw_stats *s = &priv->stats.sw;
+	struct mlx5e_rq_stats *rq_stats;
+	struct mlx5e_sq_stats *sq_stats;
+	u64 tx_offload_none = 0;
+	int i, j;
+
+	memset(s, 0, sizeof(*s));
+	for (i = 0; i < priv->params.num_channels; i++) {
+		rq_stats = &priv->channel[i]->rq.stats;
+
+		s->rx_packets	+= rq_stats->packets;
+		s->rx_bytes	+= rq_stats->bytes;
+		s->rx_lro_packets += rq_stats->lro_packets;
+		s->rx_lro_bytes	+= rq_stats->lro_bytes;
+		s->rx_csum_none	+= rq_stats->csum_none;
+		s->rx_csum_complete += rq_stats->csum_complete;
+		s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
+		s->rx_xdp_drop += rq_stats->xdp_drop;
+		s->rx_xdp_tx += rq_stats->xdp_tx;
+		s->rx_xdp_tx_full += rq_stats->xdp_tx_full;
+		s->rx_wqe_err   += rq_stats->wqe_err;
+		s->rx_mpwqe_filler += rq_stats->mpwqe_filler;
+		s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
+		s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
+		s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
+		s->rx_cache_reuse += rq_stats->cache_reuse;
+		s->rx_cache_full  += rq_stats->cache_full;
+		s->rx_cache_empty += rq_stats->cache_empty;
+		s->rx_cache_busy  += rq_stats->cache_busy;
+
+		for (j = 0; j < priv->params.num_tc; j++) {
+			sq_stats = &priv->channel[i]->sq[j].stats;
+
+			s->tx_packets		+= sq_stats->packets;
+			s->tx_bytes		+= sq_stats->bytes;
+			s->tx_tso_packets	+= sq_stats->tso_packets;
+			s->tx_tso_bytes		+= sq_stats->tso_bytes;
+			s->tx_tso_inner_packets	+= sq_stats->tso_inner_packets;
+			s->tx_tso_inner_bytes	+= sq_stats->tso_inner_bytes;
+			s->tx_queue_stopped	+= sq_stats->stopped;
+			s->tx_queue_wake	+= sq_stats->wake;
+			s->tx_queue_dropped	+= sq_stats->dropped;
+			s->tx_xmit_more		+= sq_stats->xmit_more;
+			s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
+			tx_offload_none		+= sq_stats->csum_none;
+		}
+	}
+
+	/* Update calculated offload counters */
+	s->tx_csum_partial = s->tx_packets - tx_offload_none - s->tx_csum_partial_inner;
+	s->rx_csum_unnecessary = s->rx_packets - s->rx_csum_none - s->rx_csum_complete;
+
+	s->link_down_events_phy = MLX5_GET(ppcnt_reg,
+				priv->stats.pport.phy_counters,
+				counter_set.phys_layer_cntrs.link_down_events);
+}
+
+static void mlx5e_update_vport_counters(struct mlx5e_priv *priv)
+{
+	int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
+	u32 *out = (u32 *)priv->stats.vport.query_vport_out;
+	u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
+	struct mlx5_core_dev *mdev = priv->mdev;
+
+	MLX5_SET(query_vport_counter_in, in, opcode,
+		 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
+	MLX5_SET(query_vport_counter_in, in, op_mod, 0);
+	MLX5_SET(query_vport_counter_in, in, other_vport, 0);
+
+	memset(out, 0, outlen);
+	mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
+}
+
+static void mlx5e_update_pport_counters(struct mlx5e_priv *priv)
+{
+	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
+	struct mlx5_core_dev *mdev = priv->mdev;
+	int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+	int prio;
+	void *out;
+	u32 *in;
+
+	in = mlx5_vzalloc(sz);
+	if (!in)
+		goto free_out;
+
+	MLX5_SET(ppcnt_reg, in, local_port, 1);
+
+	out = pstats->IEEE_802_3_counters;
+	MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
+	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
+
+	out = pstats->RFC_2863_counters;
+	MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
+	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
+
+	out = pstats->RFC_2819_counters;
+	MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
+	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
+
+	out = pstats->phy_counters;
+	MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
+	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
+
+	MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
+	for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
+		out = pstats->per_prio_counters[prio];
+		MLX5_SET(ppcnt_reg, in, prio_tc, prio);
+		mlx5_core_access_reg(mdev, in, sz, out, sz,
+				     MLX5_REG_PPCNT, 0, 0);
+	}
+
+free_out:
+	kvfree(in);
+}
+
+static void mlx5e_update_q_counter(struct mlx5e_priv *priv)
+{
+	struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
+
+	if (!priv->q_counter)
+		return;
+
+	mlx5_core_query_out_of_buffer(priv->mdev, priv->q_counter,
+				      &qcnt->rx_out_of_buffer);
+}
+
+static void mlx5e_update_pcie_counters(struct mlx5e_priv *priv)
+{
+	struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
+	struct mlx5_core_dev *mdev = priv->mdev;
+	int sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
+	void *out;
+	u32 *in;
+
+	in = mlx5_vzalloc(sz);
+	if (!in)
+		return;
+
+	out = pcie_stats->pcie_perf_counters;
+	MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
+	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
+
+	out = pcie_stats->pcie_tas_counters;
+	MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_TIMERS_AND_STATES_COUNTERS_GROUP);
+	mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
+
+	kvfree(in);
+}
+
+void mlx5e_update_stats(struct mlx5e_priv *priv)
+{
+	mlx5e_update_q_counter(priv);
+	mlx5e_update_vport_counters(priv);
+	mlx5e_update_pport_counters(priv);
+	mlx5e_update_sw_counters(priv);
+	mlx5e_update_pcie_counters(priv);
+}
+
+void mlx5e_update_stats_work(struct work_struct *work)
+{
+	struct delayed_work *dwork = to_delayed_work(work);
+	struct mlx5e_priv *priv = container_of(dwork, struct mlx5e_priv,
+					       update_stats_work);
+	mutex_lock(&priv->state_lock);
+	if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+		priv->profile->update_stats(priv);
+		queue_delayed_work(priv->wq, dwork,
+				   msecs_to_jiffies(MLX5E_UPDATE_STATS_INTERVAL));
+	}
+	mutex_unlock(&priv->state_lock);
+}
+
+static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
+			      enum mlx5_dev_event event, unsigned long param)
+{
+	struct mlx5e_priv *priv = vpriv;
+
+	if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state))
+		return;
+
+	switch (event) {
+	case MLX5_DEV_EVENT_PORT_UP:
+	case MLX5_DEV_EVENT_PORT_DOWN:
+		queue_work(priv->wq, &priv->update_carrier_work);
+		break;
+
+	default:
+		break;
+	}
+}
+
+static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
+{
+	set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
+}
+
+static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
+{
+	clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
+	synchronize_irq(mlx5_get_msix_vec(priv->mdev, MLX5_EQ_VEC_ASYNC));
+}
+
+#define MLX5E_HW2SW_MTU(hwmtu) (hwmtu - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
+#define MLX5E_SW2HW_MTU(swmtu) (swmtu + (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
+
+static inline int mlx5e_get_wqe_mtt_sz(void)
+{
+	/* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes.
+	 * To avoid copying garbage after the mtt array, we allocate
+	 * a little more.
+	 */
+	return ALIGN(MLX5_MPWRQ_PAGES_PER_WQE * sizeof(__be64),
+		     MLX5_UMR_MTT_ALIGNMENT);
+}
+
+static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq, struct mlx5e_sq *sq,
+				       struct mlx5e_umr_wqe *wqe, u16 ix)
+{
+	struct mlx5_wqe_ctrl_seg      *cseg = &wqe->ctrl;
+	struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
+	struct mlx5_wqe_data_seg      *dseg = &wqe->data;
+	struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
+	u8 ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS);
+	u32 umr_wqe_mtt_offset = mlx5e_get_wqe_mtt_offset(rq, ix);
+
+	cseg->qpn_ds    = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
+				      ds_cnt);
+	cseg->fm_ce_se  = MLX5_WQE_CTRL_CQ_UPDATE;
+	cseg->imm       = rq->mkey_be;
+
+	ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN;
+	ucseg->xlt_octowords =
+		cpu_to_be16(MLX5_MTT_OCTW(MLX5_MPWRQ_PAGES_PER_WQE));
+	ucseg->bsf_octowords =
+		cpu_to_be16(MLX5_MTT_OCTW(umr_wqe_mtt_offset));
+	ucseg->mkey_mask     = cpu_to_be64(MLX5_MKEY_MASK_FREE);
+
+	dseg->lkey = sq->mkey_be;
+	dseg->addr = cpu_to_be64(wi->umr.mtt_addr);
+}
+
+static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq,
+				     struct mlx5e_channel *c)
+{
+	int wq_sz = mlx5_wq_ll_get_size(&rq->wq);
+	int mtt_sz = mlx5e_get_wqe_mtt_sz();
+	int mtt_alloc = mtt_sz + MLX5_UMR_ALIGN - 1;
+	int i;
+
+	rq->mpwqe.info = kzalloc_node(wq_sz * sizeof(*rq->mpwqe.info),
+				      GFP_KERNEL, cpu_to_node(c->cpu));
+	if (!rq->mpwqe.info)
+		goto err_out;
+
+	/* We allocate more than mtt_sz as we will align the pointer */
+	rq->mpwqe.mtt_no_align = kzalloc_node(mtt_alloc * wq_sz, GFP_KERNEL,
+					cpu_to_node(c->cpu));
+	if (unlikely(!rq->mpwqe.mtt_no_align))
+		goto err_free_wqe_info;
+
+	for (i = 0; i < wq_sz; i++) {
+		struct mlx5e_mpw_info *wi = &rq->mpwqe.info[i];
+
+		wi->umr.mtt = PTR_ALIGN(rq->mpwqe.mtt_no_align + i * mtt_alloc,
+					MLX5_UMR_ALIGN);
+		wi->umr.mtt_addr = dma_map_single(c->pdev, wi->umr.mtt, mtt_sz,
+						  PCI_DMA_TODEVICE);
+		if (unlikely(dma_mapping_error(c->pdev, wi->umr.mtt_addr)))
+			goto err_unmap_mtts;
+
+		mlx5e_build_umr_wqe(rq, &c->icosq, &wi->umr.wqe, i);
+	}
+
+	return 0;
+
+err_unmap_mtts:
+	while (--i >= 0) {
+		struct mlx5e_mpw_info *wi = &rq->mpwqe.info[i];
+
+		dma_unmap_single(c->pdev, wi->umr.mtt_addr, mtt_sz,
+				 PCI_DMA_TODEVICE);
+	}
+	kfree(rq->mpwqe.mtt_no_align);
+err_free_wqe_info:
+	kfree(rq->mpwqe.info);
+
+err_out:
+	return -ENOMEM;
+}
+
+static void mlx5e_rq_free_mpwqe_info(struct mlx5e_rq *rq)
+{
+	int wq_sz = mlx5_wq_ll_get_size(&rq->wq);
+	int mtt_sz = mlx5e_get_wqe_mtt_sz();
+	int i;
+
+	for (i = 0; i < wq_sz; i++) {
+		struct mlx5e_mpw_info *wi = &rq->mpwqe.info[i];
+
+		dma_unmap_single(rq->pdev, wi->umr.mtt_addr, mtt_sz,
+				 PCI_DMA_TODEVICE);
+	}
+	kfree(rq->mpwqe.mtt_no_align);
+	kfree(rq->mpwqe.info);
+}
+
+static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv,
+				 u64 npages, u8 page_shift,
+				 struct mlx5_core_mkey *umr_mkey)
+{
+	struct mlx5_core_dev *mdev = priv->mdev;
+	int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
+	void *mkc;
+	u32 *in;
+	int err;
+
+	if (!MLX5E_VALID_NUM_MTTS(npages))
+		return -EINVAL;
+
+	in = mlx5_vzalloc(inlen);
+	if (!in)
+		return -ENOMEM;
+
+	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+
+	MLX5_SET(mkc, mkc, free, 1);
+	MLX5_SET(mkc, mkc, umr_en, 1);
+	MLX5_SET(mkc, mkc, lw, 1);
+	MLX5_SET(mkc, mkc, lr, 1);
+	MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_MTT);
+
+	MLX5_SET(mkc, mkc, qpn, 0xffffff);
+	MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.pdn);
+	MLX5_SET64(mkc, mkc, len, npages << page_shift);
+	MLX5_SET(mkc, mkc, translations_octword_size,
+		 MLX5_MTT_OCTW(npages));
+	MLX5_SET(mkc, mkc, log_page_size, page_shift);
+
+	err = mlx5_core_create_mkey(mdev, umr_mkey, in, inlen);
+
+	kvfree(in);
+	return err;
+}
+
+static int mlx5e_create_rq_umr_mkey(struct mlx5e_rq *rq)
+{
+	struct mlx5e_priv *priv = rq->priv;
+	u64 num_mtts = MLX5E_REQUIRED_MTTS(BIT(priv->params.log_rq_size));
+
+	return mlx5e_create_umr_mkey(priv, num_mtts, PAGE_SHIFT, &rq->umr_mkey);
+}
+
+static int mlx5e_create_rq(struct mlx5e_channel *c,
+			   struct mlx5e_rq_param *param,
+			   struct mlx5e_rq *rq)
+{
+	struct mlx5e_priv *priv = c->priv;
+	struct mlx5_core_dev *mdev = priv->mdev;
+	void *rqc = param->rqc;
+	void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
+	u32 byte_count;
+	u32 frag_sz;
+	int npages;
+	int wq_sz;
+	int err;
+	int i;
+
+	param->wq.db_numa_node = cpu_to_node(c->cpu);
+
+	err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
+				&rq->wq_ctrl);
+	if (err)
+		return err;
+
+	rq->wq.db = &rq->wq.db[MLX5_RCV_DBR];
+
+	wq_sz = mlx5_wq_ll_get_size(&rq->wq);
+
+	rq->wq_type = priv->params.rq_wq_type;
+	rq->pdev    = c->pdev;
+	rq->netdev  = c->netdev;
+	rq->tstamp  = &priv->tstamp;
+	rq->channel = c;
+	rq->ix      = c->ix;
+	rq->priv    = c->priv;
+
+	rq->xdp_prog = priv->xdp_prog ? bpf_prog_inc(priv->xdp_prog) : NULL;
+	if (IS_ERR(rq->xdp_prog)) {
+		err = PTR_ERR(rq->xdp_prog);
+		rq->xdp_prog = NULL;
+		goto err_rq_wq_destroy;
+	}
+
+	rq->buff.map_dir = DMA_FROM_DEVICE;
+	if (rq->xdp_prog)
+		rq->buff.map_dir = DMA_BIDIRECTIONAL;
+
+	switch (priv->params.rq_wq_type) {
+	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+		if (mlx5e_is_vf_vport_rep(priv)) {
+			err = -EINVAL;
+			goto err_rq_wq_destroy;
+		}
+
+		rq->handle_rx_cqe = mlx5e_handle_rx_cqe_mpwrq;
+		rq->alloc_wqe = mlx5e_alloc_rx_mpwqe;
+		rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
+
+		rq->mpwqe_stride_sz = BIT(priv->params.mpwqe_log_stride_sz);
+		rq->mpwqe_num_strides = BIT(priv->params.mpwqe_log_num_strides);
+
+		rq->buff.wqe_sz = rq->mpwqe_stride_sz * rq->mpwqe_num_strides;
+		byte_count = rq->buff.wqe_sz;
+
+		err = mlx5e_create_rq_umr_mkey(rq);
+		if (err)
+			goto err_rq_wq_destroy;
+		rq->mkey_be = cpu_to_be32(rq->umr_mkey.key);
+
+		err = mlx5e_rq_alloc_mpwqe_info(rq, c);
+		if (err)
+			goto err_destroy_umr_mkey;
+		break;
+	default: /* MLX5_WQ_TYPE_LINKED_LIST */
+		rq->dma_info = kzalloc_node(wq_sz * sizeof(*rq->dma_info),
+					    GFP_KERNEL, cpu_to_node(c->cpu));
+		if (!rq->dma_info) {
+			err = -ENOMEM;
+			goto err_rq_wq_destroy;
+		}
+
+		if (mlx5e_is_vf_vport_rep(priv))
+			rq->handle_rx_cqe = mlx5e_handle_rx_cqe_rep;
+		else
+			rq->handle_rx_cqe = mlx5e_handle_rx_cqe;
+
+		rq->alloc_wqe = mlx5e_alloc_rx_wqe;
+		rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
+
+		rq->buff.wqe_sz = (priv->params.lro_en) ?
+				priv->params.lro_wqe_sz :
+				MLX5E_SW2HW_MTU(priv->netdev->mtu);
+		byte_count = rq->buff.wqe_sz;
+
+		/* calc the required page order */
+		frag_sz = MLX5_RX_HEADROOM +
+			  byte_count /* packet data */ +
+			  SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+		frag_sz = SKB_DATA_ALIGN(frag_sz);
+
+		npages = DIV_ROUND_UP(frag_sz, PAGE_SIZE);
+		rq->buff.page_order = order_base_2(npages);
+
+		byte_count |= MLX5_HW_START_PADDING;
+		rq->mkey_be = c->mkey_be;
+	}
+
+	for (i = 0; i < wq_sz; i++) {
+		struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
+
+		wqe->data.byte_count = cpu_to_be32(byte_count);
+		wqe->data.lkey = rq->mkey_be;
+	}
+
+	INIT_WORK(&rq->am.work, mlx5e_rx_am_work);
+	rq->am.mode = priv->params.rx_cq_period_mode;
+
+	rq->page_cache.head = 0;
+	rq->page_cache.tail = 0;
+
+	return 0;
+
+err_destroy_umr_mkey:
+	mlx5_core_destroy_mkey(mdev, &rq->umr_mkey);
+
+err_rq_wq_destroy:
+	if (rq->xdp_prog)
+		bpf_prog_put(rq->xdp_prog);
+	mlx5_wq_destroy(&rq->wq_ctrl);
+
+	return err;
+}
+
+static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
+{
+	int i;
+
+	if (rq->xdp_prog)
+		bpf_prog_put(rq->xdp_prog);
+
+	switch (rq->wq_type) {
+	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+		mlx5e_rq_free_mpwqe_info(rq);
+		mlx5_core_destroy_mkey(rq->priv->mdev, &rq->umr_mkey);
+		break;
+	default: /* MLX5_WQ_TYPE_LINKED_LIST */
+		kfree(rq->dma_info);
+	}
+
+	for (i = rq->page_cache.head; i != rq->page_cache.tail;
+	     i = (i + 1) & (MLX5E_CACHE_SIZE - 1)) {
+		struct mlx5e_dma_info *dma_info = &rq->page_cache.page_cache[i];
+
+		mlx5e_page_release(rq, dma_info, false);
+	}
+	mlx5_wq_destroy(&rq->wq_ctrl);
+}
+
+static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
+{
+	struct mlx5e_priv *priv = rq->priv;
+	struct mlx5_core_dev *mdev = priv->mdev;
+
+	void *in;
+	void *rqc;
+	void *wq;
+	int inlen;
+	int err;
+
+	inlen = MLX5_ST_SZ_BYTES(create_rq_in) +
+		sizeof(u64) * rq->wq_ctrl.buf.npages;
+	in = mlx5_vzalloc(inlen);
+	if (!in)
+		return -ENOMEM;
+
+	rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
+	wq  = MLX5_ADDR_OF(rqc, rqc, wq);
+
+	memcpy(rqc, param->rqc, sizeof(param->rqc));
+
+	MLX5_SET(rqc,  rqc, cqn,		rq->cq.mcq.cqn);
+	MLX5_SET(rqc,  rqc, state,		MLX5_RQC_STATE_RST);
+	MLX5_SET(rqc,  rqc, vsd, priv->params.vlan_strip_disable);
+	MLX5_SET(wq,   wq,  log_wq_pg_sz,	rq->wq_ctrl.buf.page_shift -
+						MLX5_ADAPTER_PAGE_SHIFT);
+	MLX5_SET64(wq, wq,  dbr_addr,		rq->wq_ctrl.db.dma);
+
+	mlx5_fill_page_array(&rq->wq_ctrl.buf,
+			     (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
+
+	err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn);
+
+	kvfree(in);
+
+	return err;
+}
+
+static int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state,
+				 int next_state)
+{
+	struct mlx5e_channel *c = rq->channel;
+	struct mlx5e_priv *priv = c->priv;
+	struct mlx5_core_dev *mdev = priv->mdev;
+
+	void *in;
+	void *rqc;
+	int inlen;
+	int err;
+
+	inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
+	in = mlx5_vzalloc(inlen);
+	if (!in)
+		return -ENOMEM;
+
+	rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
+
+	MLX5_SET(modify_rq_in, in, rq_state, curr_state);
+	MLX5_SET(rqc, rqc, state, next_state);
+
+	err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
+
+	kvfree(in);
+
+	return err;
+}
+
+static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
+{
+	struct mlx5e_channel *c = rq->channel;
+	struct mlx5e_priv *priv = c->priv;
+	struct mlx5_core_dev *mdev = priv->mdev;
+
+	void *in;
+	void *rqc;
+	int inlen;
+	int err;
+
+	inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
+	in = mlx5_vzalloc(inlen);
+	if (!in)
+		return -ENOMEM;
+
+	rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
+
+	MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY);
+	MLX5_SET64(modify_rq_in, in, modify_bitmask,
+		   MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD);
+	MLX5_SET(rqc, rqc, vsd, vsd);
+	MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
+
+	err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
+
+	kvfree(in);
+
+	return err;
+}
+
+static void mlx5e_disable_rq(struct mlx5e_rq *rq)
+{
+	mlx5_core_destroy_rq(rq->priv->mdev, rq->rqn);
+}
+
+static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
+{
+	unsigned long exp_time = jiffies + msecs_to_jiffies(20000);
+	struct mlx5e_channel *c = rq->channel;
+	struct mlx5e_priv *priv = c->priv;
+	struct mlx5_wq_ll *wq = &rq->wq;
+
+	while (time_before(jiffies, exp_time)) {
+		if (wq->cur_sz >= priv->params.min_rx_wqes)
+			return 0;
+
+		msleep(20);
+	}
+
+	return -ETIMEDOUT;
+}
+
+static void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
+{
+	struct mlx5_wq_ll *wq = &rq->wq;
+	struct mlx5e_rx_wqe *wqe;
+	__be16 wqe_ix_be;
+	u16 wqe_ix;
+
+	/* UMR WQE (if in progress) is always at wq->head */
+	if (test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state))
+		mlx5e_free_rx_mpwqe(rq, &rq->mpwqe.info[wq->head]);
+
+	while (!mlx5_wq_ll_is_empty(wq)) {
+		wqe_ix_be = *wq->tail_next;
+		wqe_ix    = be16_to_cpu(wqe_ix_be);
+		wqe       = mlx5_wq_ll_get_wqe(&rq->wq, wqe_ix);
+		rq->dealloc_wqe(rq, wqe_ix);
+		mlx5_wq_ll_pop(&rq->wq, wqe_ix_be,
+			       &wqe->next.next_wqe_index);
+	}
+}
+
+static int mlx5e_open_rq(struct mlx5e_channel *c,
+			 struct mlx5e_rq_param *param,
+			 struct mlx5e_rq *rq)
+{
+	struct mlx5e_sq *sq = &c->icosq;
+	u16 pi = sq->pc & sq->wq.sz_m1;
+	int err;
+
+	err = mlx5e_create_rq(c, param, rq);
+	if (err)
+		return err;
+
+	err = mlx5e_enable_rq(rq, param);
+	if (err)
+		goto err_destroy_rq;
+
+	set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
+	err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
+	if (err)
+		goto err_disable_rq;
+
+	if (param->am_enabled)
+		set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);
+
+	sq->db.ico_wqe[pi].opcode     = MLX5_OPCODE_NOP;
+	sq->db.ico_wqe[pi].num_wqebbs = 1;
+	mlx5e_send_nop(sq, true); /* trigger mlx5e_post_rx_wqes() */
+
+	return 0;
+
+err_disable_rq:
+	clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
+	mlx5e_disable_rq(rq);
+err_destroy_rq:
+	mlx5e_destroy_rq(rq);
+
+	return err;
+}
+
+static void mlx5e_close_rq(struct mlx5e_rq *rq)
+{
+	clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
+	napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
+	cancel_work_sync(&rq->am.work);
+
+	mlx5e_disable_rq(rq);
+	mlx5e_free_rx_descs(rq);
+	mlx5e_destroy_rq(rq);
+}
+
+static void mlx5e_free_sq_xdp_db(struct mlx5e_sq *sq)
+{
+	kfree(sq->db.xdp.di);
+	kfree(sq->db.xdp.wqe_info);
+}
+
+static int mlx5e_alloc_sq_xdp_db(struct mlx5e_sq *sq, int numa)
+{
+	int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
+
+	sq->db.xdp.di = kzalloc_node(sizeof(*sq->db.xdp.di) * wq_sz,
+				     GFP_KERNEL, numa);
+	sq->db.xdp.wqe_info = kzalloc_node(sizeof(*sq->db.xdp.wqe_info) * wq_sz,
+					   GFP_KERNEL, numa);
+	if (!sq->db.xdp.di || !sq->db.xdp.wqe_info) {
+		mlx5e_free_sq_xdp_db(sq);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void mlx5e_free_sq_ico_db(struct mlx5e_sq *sq)
+{
+	kfree(sq->db.ico_wqe);
+}
+
+static int mlx5e_alloc_sq_ico_db(struct mlx5e_sq *sq, int numa)
+{
+	u8 wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
+
+	sq->db.ico_wqe = kzalloc_node(sizeof(*sq->db.ico_wqe) * wq_sz,
+				      GFP_KERNEL, numa);
+	if (!sq->db.ico_wqe)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static void mlx5e_free_sq_txq_db(struct mlx5e_sq *sq)
+{
+	kfree(sq->db.txq.wqe_info);
+	kfree(sq->db.txq.dma_fifo);
+	kfree(sq->db.txq.skb);
+}
+
+static int mlx5e_alloc_sq_txq_db(struct mlx5e_sq *sq, int numa)
+{
+	int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
+	int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
+
+	sq->db.txq.skb = kzalloc_node(wq_sz * sizeof(*sq->db.txq.skb),
+				      GFP_KERNEL, numa);
+	sq->db.txq.dma_fifo = kzalloc_node(df_sz * sizeof(*sq->db.txq.dma_fifo),
+					   GFP_KERNEL, numa);
+	sq->db.txq.wqe_info = kzalloc_node(wq_sz * sizeof(*sq->db.txq.wqe_info),
+					   GFP_KERNEL, numa);
+	if (!sq->db.txq.skb || !sq->db.txq.dma_fifo || !sq->db.txq.wqe_info) {
+		mlx5e_free_sq_txq_db(sq);
+		return -ENOMEM;
+	}
+
+	sq->dma_fifo_mask = df_sz - 1;
+
+	return 0;
+}
+
+static void mlx5e_free_sq_db(struct mlx5e_sq *sq)
+{
+	switch (sq->type) {
+	case MLX5E_SQ_TXQ:
+		mlx5e_free_sq_txq_db(sq);
+		break;
+	case MLX5E_SQ_ICO:
+		mlx5e_free_sq_ico_db(sq);
+		break;
+	case MLX5E_SQ_XDP:
+		mlx5e_free_sq_xdp_db(sq);
+		break;
+	}
+}
+
+static int mlx5e_alloc_sq_db(struct mlx5e_sq *sq, int numa)
+{
+	switch (sq->type) {
+	case MLX5E_SQ_TXQ:
+		return mlx5e_alloc_sq_txq_db(sq, numa);
+	case MLX5E_SQ_ICO:
+		return mlx5e_alloc_sq_ico_db(sq, numa);
+	case MLX5E_SQ_XDP:
+		return mlx5e_alloc_sq_xdp_db(sq, numa);
+	}
+
+	return 0;
+}
+
+static int mlx5e_sq_get_max_wqebbs(u8 sq_type)
+{
+	switch (sq_type) {
+	case MLX5E_SQ_ICO:
+		return MLX5E_ICOSQ_MAX_WQEBBS;
+	case MLX5E_SQ_XDP:
+		return MLX5E_XDP_TX_WQEBBS;
+	}
+	return MLX5_SEND_WQE_MAX_WQEBBS;
+}
+
+static int mlx5e_create_sq(struct mlx5e_channel *c,
+			   int tc,
+			   struct mlx5e_sq_param *param,
+			   struct mlx5e_sq *sq)
+{
+	struct mlx5e_priv *priv = c->priv;
+	struct mlx5_core_dev *mdev = priv->mdev;
+
+	void *sqc = param->sqc;
+	void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq);
+	int err;
+
+	sq->type      = param->type;
+	sq->pdev      = c->pdev;
+	sq->tstamp    = &priv->tstamp;
+	sq->mkey_be   = c->mkey_be;
+	sq->channel   = c;
+	sq->tc        = tc;
+
+	err = mlx5_alloc_bfreg(mdev, &sq->bfreg, MLX5_CAP_GEN(mdev, bf), false);
+	if (err)
+		return err;
+
+	param->wq.db_numa_node = cpu_to_node(c->cpu);
+
+	err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq,
+				 &sq->wq_ctrl);
+	if (err)
+		goto err_unmap_free_uar;
+
+	sq->wq.db       = &sq->wq.db[MLX5_SND_DBR];
+	if (sq->bfreg.wc)
+		set_bit(MLX5E_SQ_STATE_BF_ENABLE, &sq->state);
+
+	sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
+	sq->max_inline  = param->max_inline;
+	sq->min_inline_mode =
+		MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5_CAP_INLINE_MODE_VPORT_CONTEXT ?
+		param->min_inline_mode : 0;
+
+	err = mlx5e_alloc_sq_db(sq, cpu_to_node(c->cpu));
+	if (err)
+		goto err_sq_wq_destroy;
+
+	if (sq->type == MLX5E_SQ_TXQ) {
+		int txq_ix;
+
+		txq_ix = c->ix + tc * priv->params.num_channels;
+		sq->txq = netdev_get_tx_queue(priv->netdev, txq_ix);
+		priv->txq_to_sq_map[txq_ix] = sq;
+	}
+
+	sq->edge = (sq->wq.sz_m1 + 1) - mlx5e_sq_get_max_wqebbs(sq->type);
+	sq->bf_budget = MLX5E_SQ_BF_BUDGET;
+
+	return 0;
+
+err_sq_wq_destroy:
+	mlx5_wq_destroy(&sq->wq_ctrl);
+
+err_unmap_free_uar:
+	mlx5_free_bfreg(mdev, &sq->bfreg);
+
+	return err;
+}
+
+static void mlx5e_destroy_sq(struct mlx5e_sq *sq)
+{
+	struct mlx5e_channel *c = sq->channel;
+	struct mlx5e_priv *priv = c->priv;
+
+	mlx5e_free_sq_db(sq);
+	mlx5_wq_destroy(&sq->wq_ctrl);
+	mlx5_free_bfreg(priv->mdev, &sq->bfreg);
+}
+
+static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
+{
+	struct mlx5e_channel *c = sq->channel;
+	struct mlx5e_priv *priv = c->priv;
+	struct mlx5_core_dev *mdev = priv->mdev;
+
+	void *in;
+	void *sqc;
+	void *wq;
+	int inlen;
+	int err;
+
+	inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
+		sizeof(u64) * sq->wq_ctrl.buf.npages;
+	in = mlx5_vzalloc(inlen);
+	if (!in)
+		return -ENOMEM;
+
+	sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
+	wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+	memcpy(sqc, param->sqc, sizeof(param->sqc));
+
+	MLX5_SET(sqc,  sqc, tis_num_0, param->type == MLX5E_SQ_ICO ?
+				       0 : priv->tisn[sq->tc]);
+	MLX5_SET(sqc,  sqc, cqn,		sq->cq.mcq.cqn);
+	MLX5_SET(sqc,  sqc, min_wqe_inline_mode, sq->min_inline_mode);
+	MLX5_SET(sqc,  sqc, state,		MLX5_SQC_STATE_RST);
+	MLX5_SET(sqc,  sqc, tis_lst_sz, param->type == MLX5E_SQ_ICO ? 0 : 1);
+
+	MLX5_SET(wq,   wq, wq_type,       MLX5_WQ_TYPE_CYCLIC);
+	MLX5_SET(wq,   wq, uar_page,      sq->bfreg.index);
+	MLX5_SET(wq,   wq, log_wq_pg_sz,  sq->wq_ctrl.buf.page_shift -
+					  MLX5_ADAPTER_PAGE_SHIFT);
+	MLX5_SET64(wq, wq, dbr_addr,      sq->wq_ctrl.db.dma);
+
+	mlx5_fill_page_array(&sq->wq_ctrl.buf,
+			     (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
+
+	err = mlx5_core_create_sq(mdev, in, inlen, &sq->sqn);
+
+	kvfree(in);
+
+	return err;
+}
+
+static int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state,
+			   int next_state, bool update_rl, int rl_index)
+{
+	struct mlx5e_channel *c = sq->channel;
+	struct mlx5e_priv *priv = c->priv;
+	struct mlx5_core_dev *mdev = priv->mdev;
+
+	void *in;
+	void *sqc;
+	int inlen;
+	int err;
+
+	inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
+	in = mlx5_vzalloc(inlen);
+	if (!in)
+		return -ENOMEM;
+
+	sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
+
+	MLX5_SET(modify_sq_in, in, sq_state, curr_state);
+	MLX5_SET(sqc, sqc, state, next_state);
+	if (update_rl && next_state == MLX5_SQC_STATE_RDY) {
+		MLX5_SET64(modify_sq_in, in, modify_bitmask, 1);
+		MLX5_SET(sqc,  sqc, packet_pacing_rate_limit_index, rl_index);
+	}
+
+	err = mlx5_core_modify_sq(mdev, sq->sqn, in, inlen);
+
+	kvfree(in);
+
+	return err;
+}
+
+static void mlx5e_disable_sq(struct mlx5e_sq *sq)
+{
+	struct mlx5e_channel *c = sq->channel;
+	struct mlx5e_priv *priv = c->priv;
+	struct mlx5_core_dev *mdev = priv->mdev;
+
+	mlx5_core_destroy_sq(mdev, sq->sqn);
+	if (sq->rate_limit)
+		mlx5_rl_remove_rate(mdev, sq->rate_limit);
+}
+
+static int mlx5e_open_sq(struct mlx5e_channel *c,
+			 int tc,
+			 struct mlx5e_sq_param *param,
+			 struct mlx5e_sq *sq)
+{
+	int err;
+
+	err = mlx5e_create_sq(c, tc, param, sq);
+	if (err)
+		return err;
+
+	err = mlx5e_enable_sq(sq, param);
+	if (err)
+		goto err_destroy_sq;
+
+	set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
+	err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY,
+			      false, 0);
+	if (err)
+		goto err_disable_sq;
+
+	if (sq->txq) {
+		netdev_tx_reset_queue(sq->txq);
+		netif_tx_start_queue(sq->txq);
+	}
+
+	return 0;
+
+err_disable_sq:
+	clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
+	mlx5e_disable_sq(sq);
+err_destroy_sq:
+	mlx5e_destroy_sq(sq);
+
+	return err;
+}
+
+static inline void netif_tx_disable_queue(struct netdev_queue *txq)
+{
+	__netif_tx_lock_bh(txq);
+	netif_tx_stop_queue(txq);
+	__netif_tx_unlock_bh(txq);
+}
+
+static void mlx5e_close_sq(struct mlx5e_sq *sq)
+{
+	clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
+	/* prevent netif_tx_wake_queue */
+	napi_synchronize(&sq->channel->napi);
+
+	if (sq->txq) {
+		netif_tx_disable_queue(sq->txq);
+
+		/* last doorbell out, godspeed .. */
+		if (mlx5e_sq_has_room_for(sq, 1)) {
+			sq->db.txq.skb[(sq->pc & sq->wq.sz_m1)] = NULL;
+			mlx5e_send_nop(sq, true);
+		}
+	}
+
+	mlx5e_disable_sq(sq);
+	mlx5e_free_sq_descs(sq);
+	mlx5e_destroy_sq(sq);
+}
+
+static int mlx5e_create_cq(struct mlx5e_channel *c,
+			   struct mlx5e_cq_param *param,
+			   struct mlx5e_cq *cq)
+{
+	struct mlx5e_priv *priv = c->priv;
+	struct mlx5_core_dev *mdev = priv->mdev;
+	struct mlx5_core_cq *mcq = &cq->mcq;
+	int eqn_not_used;
+	unsigned int irqn;
+	int err;
+	u32 i;
+
+	param->wq.buf_numa_node = cpu_to_node(c->cpu);
+	param->wq.db_numa_node  = cpu_to_node(c->cpu);
+	param->eq_ix   = c->ix;
+
+	err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
+			       &cq->wq_ctrl);
+	if (err)
+		return err;
+
+	mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
+
+	cq->napi        = &c->napi;
+
+	mcq->cqe_sz     = 64;
+	mcq->set_ci_db  = cq->wq_ctrl.db.db;
+	mcq->arm_db     = cq->wq_ctrl.db.db + 1;
+	*mcq->set_ci_db = 0;
+	*mcq->arm_db    = 0;
+	mcq->vector     = param->eq_ix;
+	mcq->comp       = mlx5e_completion_event;
+	mcq->event      = mlx5e_cq_error_event;
+	mcq->irqn       = irqn;
+
+	for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
+		struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
+
+		cqe->op_own = 0xf1;
+	}
+
+	cq->channel = c;
+	cq->priv = priv;
+
+	return 0;
+}
+
+static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
+{
+	mlx5_cqwq_destroy(&cq->wq_ctrl);
+}
+
+static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
+{
+	struct mlx5e_priv *priv = cq->priv;
+	struct mlx5_core_dev *mdev = priv->mdev;
+	struct mlx5_core_cq *mcq = &cq->mcq;
+
+	void *in;
+	void *cqc;
+	int inlen;
+	unsigned int irqn_not_used;
+	int eqn;
+	int err;
+
+	inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
+		sizeof(u64) * cq->wq_ctrl.frag_buf.npages;
+	in = mlx5_vzalloc(inlen);
+	if (!in)
+		return -ENOMEM;
+
+	cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
+
+	memcpy(cqc, param->cqc, sizeof(param->cqc));
+
+	mlx5_fill_page_frag_array(&cq->wq_ctrl.frag_buf,
+				  (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
+
+	mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
+
+	MLX5_SET(cqc,   cqc, cq_period_mode, param->cq_period_mode);
+	MLX5_SET(cqc,   cqc, c_eqn,         eqn);
+	MLX5_SET(cqc,   cqc, uar_page,      mdev->priv.uar->index);
+	MLX5_SET(cqc,   cqc, log_page_size, cq->wq_ctrl.frag_buf.page_shift -
+					    MLX5_ADAPTER_PAGE_SHIFT);
+	MLX5_SET64(cqc, cqc, dbr_addr,      cq->wq_ctrl.db.dma);
+
+	err = mlx5_core_create_cq(mdev, mcq, in, inlen);
+
+	kvfree(in);
+
+	if (err)
+		return err;
+
+	mlx5e_cq_arm(cq);
+
+	return 0;
+}
+
+static void mlx5e_disable_cq(struct mlx5e_cq *cq)
+{
+	struct mlx5e_priv *priv = cq->priv;
+	struct mlx5_core_dev *mdev = priv->mdev;
+
+	mlx5_core_destroy_cq(mdev, &cq->mcq);
+}
+
+static int mlx5e_open_cq(struct mlx5e_channel *c,
+			 struct mlx5e_cq_param *param,
+			 struct mlx5e_cq *cq,
+			 struct mlx5e_cq_moder moderation)
+{
+	int err;
+	struct mlx5e_priv *priv = c->priv;
+	struct mlx5_core_dev *mdev = priv->mdev;
+
+	err = mlx5e_create_cq(c, param, cq);
+	if (err)
+		return err;
+
+	err = mlx5e_enable_cq(cq, param);
+	if (err)
+		goto err_destroy_cq;
+
+	if (MLX5_CAP_GEN(mdev, cq_moderation))
+		mlx5_core_modify_cq_moderation(mdev, &cq->mcq,
+					       moderation.usec,
+					       moderation.pkts);
+	return 0;
+
+err_destroy_cq:
+	mlx5e_destroy_cq(cq);
+
+	return err;
+}
+
+static void mlx5e_close_cq(struct mlx5e_cq *cq)
+{
+	mlx5e_disable_cq(cq);
+	mlx5e_destroy_cq(cq);
+}
+
+static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix)
+{
+	return cpumask_first(priv->mdev->priv.irq_info[ix].mask);
+}
+
+static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
+			     struct mlx5e_channel_param *cparam)
+{
+	struct mlx5e_priv *priv = c->priv;
+	int err;
+	int tc;
+
+	for (tc = 0; tc < c->num_tc; tc++) {
+		err = mlx5e_open_cq(c, &cparam->tx_cq, &c->sq[tc].cq,
+				    priv->params.tx_cq_moderation);
+		if (err)
+			goto err_close_tx_cqs;
+	}
+
+	return 0;
+
+err_close_tx_cqs:
+	for (tc--; tc >= 0; tc--)
+		mlx5e_close_cq(&c->sq[tc].cq);
+
+	return err;
+}
+
+static void mlx5e_close_tx_cqs(struct mlx5e_channel *c)
+{
+	int tc;
+
+	for (tc = 0; tc < c->num_tc; tc++)
+		mlx5e_close_cq(&c->sq[tc].cq);
+}
+
+static int mlx5e_open_sqs(struct mlx5e_channel *c,
+			  struct mlx5e_channel_param *cparam)
+{
+	int err;
+	int tc;
+
+	for (tc = 0; tc < c->num_tc; tc++) {
+		err = mlx5e_open_sq(c, tc, &cparam->sq, &c->sq[tc]);
+		if (err)
+			goto err_close_sqs;
+	}
+
+	return 0;
+
+err_close_sqs:
+	for (tc--; tc >= 0; tc--)
+		mlx5e_close_sq(&c->sq[tc]);
+
+	return err;
+}
+
+static void mlx5e_close_sqs(struct mlx5e_channel *c)
+{
+	int tc;
+
+	for (tc = 0; tc < c->num_tc; tc++)
+		mlx5e_close_sq(&c->sq[tc]);
+}
+
+static void mlx5e_build_channeltc_to_txq_map(struct mlx5e_priv *priv, int ix)
+{
+	int i;
+
+	for (i = 0; i < priv->profile->max_tc; i++)
+		priv->channeltc_to_txq_map[ix][i] =
+			ix + i * priv->params.num_channels;
+}
+
+static int mlx5e_set_sq_maxrate(struct net_device *dev,
+				struct mlx5e_sq *sq, u32 rate)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+	struct mlx5_core_dev *mdev = priv->mdev;
+	u16 rl_index = 0;
+	int err;
+
+	if (rate == sq->rate_limit)
+		/* nothing to do */
+		return 0;
+
+	if (sq->rate_limit)
+		/* remove current rl index to free space to next ones */
+		mlx5_rl_remove_rate(mdev, sq->rate_limit);
+
+	sq->rate_limit = 0;
+
+	if (rate) {
+		err = mlx5_rl_add_rate(mdev, rate, &rl_index);
+		if (err) {
+			netdev_err(dev, "Failed configuring rate %u: %d\n",
+				   rate, err);
+			return err;
+		}
+	}
+
+	err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY,
+			      MLX5_SQC_STATE_RDY, true, rl_index);
+	if (err) {
+		netdev_err(dev, "Failed configuring rate %u: %d\n",
+			   rate, err);
+		/* remove the rate from the table */
+		if (rate)
+			mlx5_rl_remove_rate(mdev, rate);
+		return err;
+	}
+
+	sq->rate_limit = rate;
+	return 0;
+}
+
+static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+	struct mlx5_core_dev *mdev = priv->mdev;
+	struct mlx5e_sq *sq = priv->txq_to_sq_map[index];
+	int err = 0;
+
+	if (!mlx5_rl_is_supported(mdev)) {
+		netdev_err(dev, "Rate limiting is not supported on this device\n");
+		return -EINVAL;
+	}
+
+	/* rate is given in Mb/sec, HW config is in Kb/sec */
+	rate = rate << 10;
+
+	/* Check whether rate in valid range, 0 is always valid */
+	if (rate && !mlx5_rl_is_in_range(mdev, rate)) {
+		netdev_err(dev, "TX rate %u, is not in range\n", rate);
+		return -ERANGE;
+	}
+
+	mutex_lock(&priv->state_lock);
+	if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+		err = mlx5e_set_sq_maxrate(dev, sq, rate);
+	if (!err)
+		priv->tx_rates[index] = rate;
+	mutex_unlock(&priv->state_lock);
+
+	return err;
+}
+
+static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
+			      struct mlx5e_channel_param *cparam,
+			      struct mlx5e_channel **cp)
+{
+	struct mlx5e_cq_moder icosq_cq_moder = {0, 0};
+	struct net_device *netdev = priv->netdev;
+	struct mlx5e_cq_moder rx_cq_profile;
+	int cpu = mlx5e_get_cpu(priv, ix);
+	struct mlx5e_channel *c;
+	struct mlx5e_sq *sq;
+	int err;
+	int i;
+
+	c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
+	if (!c)
+		return -ENOMEM;
+
+	c->priv     = priv;
+	c->ix       = ix;
+	c->cpu      = cpu;
+	c->pdev     = &priv->mdev->pdev->dev;
+	c->netdev   = priv->netdev;
+	c->mkey_be  = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
+	c->num_tc   = priv->params.num_tc;
+	c->xdp      = !!priv->xdp_prog;
+
+	if (priv->params.rx_am_enabled)
+		rx_cq_profile = mlx5e_am_get_def_profile(priv->params.rx_cq_period_mode);
+	else
+		rx_cq_profile = priv->params.rx_cq_moderation;
+
+	mlx5e_build_channeltc_to_txq_map(priv, ix);
+
+	netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
+
+	err = mlx5e_open_cq(c, &cparam->icosq_cq, &c->icosq.cq, icosq_cq_moder);
+	if (err)
+		goto err_napi_del;
+
+	err = mlx5e_open_tx_cqs(c, cparam);
+	if (err)
+		goto err_close_icosq_cq;
+
+	err = mlx5e_open_cq(c, &cparam->rx_cq, &c->rq.cq,
+			    rx_cq_profile);
+	if (err)
+		goto err_close_tx_cqs;
+
+	/* XDP SQ CQ params are same as normal TXQ sq CQ params */
+	err = c->xdp ? mlx5e_open_cq(c, &cparam->tx_cq, &c->xdp_sq.cq,
+				     priv->params.tx_cq_moderation) : 0;
+	if (err)
+		goto err_close_rx_cq;
+
+	napi_enable(&c->napi);
+
+	err = mlx5e_open_sq(c, 0, &cparam->icosq, &c->icosq);
+	if (err)
+		goto err_disable_napi;
+
+	err = mlx5e_open_sqs(c, cparam);
+	if (err)
+		goto err_close_icosq;
+
+	for (i = 0; i < priv->params.num_tc; i++) {
+		u32 txq_ix = priv->channeltc_to_txq_map[ix][i];
+
+		if (priv->tx_rates[txq_ix]) {
+			sq = priv->txq_to_sq_map[txq_ix];
+			mlx5e_set_sq_maxrate(priv->netdev, sq,
+					     priv->tx_rates[txq_ix]);
+		}
+	}
+
+	err = c->xdp ? mlx5e_open_sq(c, 0, &cparam->xdp_sq, &c->xdp_sq) : 0;
+	if (err)
+		goto err_close_sqs;
+
+	err = mlx5e_open_rq(c, &cparam->rq, &c->rq);
+	if (err)
+		goto err_close_xdp_sq;
+
+	netif_set_xps_queue(netdev, get_cpu_mask(c->cpu), ix);
+	*cp = c;
+
+	return 0;
+err_close_xdp_sq:
+	if (c->xdp)
+		mlx5e_close_sq(&c->xdp_sq);
+
+err_close_sqs:
+	mlx5e_close_sqs(c);
+
+err_close_icosq:
+	mlx5e_close_sq(&c->icosq);
+
+err_disable_napi:
+	napi_disable(&c->napi);
+	if (c->xdp)
+		mlx5e_close_cq(&c->xdp_sq.cq);
+
+err_close_rx_cq:
+	mlx5e_close_cq(&c->rq.cq);
+
+err_close_tx_cqs:
+	mlx5e_close_tx_cqs(c);
+
+err_close_icosq_cq:
+	mlx5e_close_cq(&c->icosq.cq);
+
+err_napi_del:
+	netif_napi_del(&c->napi);
+	kfree(c);
+
+	return err;
+}
+
+static void mlx5e_close_channel(struct mlx5e_channel *c)
+{
+	mlx5e_close_rq(&c->rq);
+	if (c->xdp)
+		mlx5e_close_sq(&c->xdp_sq);
+	mlx5e_close_sqs(c);
+	mlx5e_close_sq(&c->icosq);
+	napi_disable(&c->napi);
+	if (c->xdp)
+		mlx5e_close_cq(&c->xdp_sq.cq);
+	mlx5e_close_cq(&c->rq.cq);
+	mlx5e_close_tx_cqs(c);
+	mlx5e_close_cq(&c->icosq.cq);
+	netif_napi_del(&c->napi);
+
+	kfree(c);
+}
+
+static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
+				 struct mlx5e_rq_param *param)
+{
+	void *rqc = param->rqc;
+	void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
+
+	switch (priv->params.rq_wq_type) {
+	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+		MLX5_SET(wq, wq, log_wqe_num_of_strides,
+			 priv->params.mpwqe_log_num_strides - 9);
+		MLX5_SET(wq, wq, log_wqe_stride_size,
+			 priv->params.mpwqe_log_stride_sz - 6);
+		MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ);
+		break;
+	default: /* MLX5_WQ_TYPE_LINKED_LIST */
+		MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
+	}
+
+	MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
+	MLX5_SET(wq, wq, log_wq_stride,    ilog2(sizeof(struct mlx5e_rx_wqe)));
+	MLX5_SET(wq, wq, log_wq_sz,        priv->params.log_rq_size);
+	MLX5_SET(wq, wq, pd,               priv->mdev->mlx5e_res.pdn);
+	MLX5_SET(rqc, rqc, counter_set_id, priv->q_counter);
+
+	param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
+	param->wq.linear = 1;
+
+	param->am_enabled = priv->params.rx_am_enabled;
+}
+
+static void mlx5e_build_drop_rq_param(struct mlx5e_rq_param *param)
+{
+	void *rqc = param->rqc;
+	void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
+
+	MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
+	MLX5_SET(wq, wq, log_wq_stride,    ilog2(sizeof(struct mlx5e_rx_wqe)));
+}
+
+static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
+					struct mlx5e_sq_param *param)
+{
+	void *sqc = param->sqc;
+	void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+	MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
+	MLX5_SET(wq, wq, pd,            priv->mdev->mlx5e_res.pdn);
+
+	param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
+}
+
+static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
+				 struct mlx5e_sq_param *param)
+{
+	void *sqc = param->sqc;
+	void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+	mlx5e_build_sq_param_common(priv, param);
+	MLX5_SET(wq, wq, log_wq_sz,     priv->params.log_sq_size);
+
+	param->max_inline = priv->params.tx_max_inline;
+	param->min_inline_mode = priv->params.tx_min_inline_mode;
+	param->type = MLX5E_SQ_TXQ;
+}
+
+static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
+					struct mlx5e_cq_param *param)
+{
+	void *cqc = param->cqc;
+
+	MLX5_SET(cqc, cqc, uar_page, priv->mdev->priv.uar->index);
+}
+
+static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
+				    struct mlx5e_cq_param *param)
+{
+	void *cqc = param->cqc;
+	u8 log_cq_size;
+
+	switch (priv->params.rq_wq_type) {
+	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+		log_cq_size = priv->params.log_rq_size +
+			priv->params.mpwqe_log_num_strides;
+		break;
+	default: /* MLX5_WQ_TYPE_LINKED_LIST */
+		log_cq_size = priv->params.log_rq_size;
+	}
+
+	MLX5_SET(cqc, cqc, log_cq_size, log_cq_size);
+	if (MLX5E_GET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
+		MLX5_SET(cqc, cqc, mini_cqe_res_format, MLX5_CQE_FORMAT_CSUM);
+		MLX5_SET(cqc, cqc, cqe_comp_en, 1);
+	}
+
+	mlx5e_build_common_cq_param(priv, param);
+
+	param->cq_period_mode = priv->params.rx_cq_period_mode;
+}
+
+static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
+				    struct mlx5e_cq_param *param)
+{
+	void *cqc = param->cqc;
+
+	MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size);
+
+	mlx5e_build_common_cq_param(priv, param);
+
+	param->cq_period_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
+}
+
+static void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv,
+				     struct mlx5e_cq_param *param,
+				     u8 log_wq_size)
+{
+	void *cqc = param->cqc;
+
+	MLX5_SET(cqc, cqc, log_cq_size, log_wq_size);
+
+	mlx5e_build_common_cq_param(priv, param);
+
+	param->cq_period_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
+}
+
+static void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
+				    struct mlx5e_sq_param *param,
+				    u8 log_wq_size)
+{
+	void *sqc = param->sqc;
+	void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+	mlx5e_build_sq_param_common(priv, param);
+
+	MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
+	MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(priv->mdev, reg_umr_sq));
+
+	param->type = MLX5E_SQ_ICO;
+}
+
+static void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv,
+				    struct mlx5e_sq_param *param)
+{
+	void *sqc = param->sqc;
+	void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+	mlx5e_build_sq_param_common(priv, param);
+	MLX5_SET(wq, wq, log_wq_sz,     priv->params.log_sq_size);
+
+	param->max_inline = priv->params.tx_max_inline;
+	/* FOR XDP SQs will support only L2 inline mode */
+	param->min_inline_mode = MLX5_INLINE_MODE_NONE;
+	param->type = MLX5E_SQ_XDP;
+}
+
+static void mlx5e_build_channel_param(struct mlx5e_priv *priv, struct mlx5e_channel_param *cparam)
+{
+	u8 icosq_log_wq_sz = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
+
+	mlx5e_build_rq_param(priv, &cparam->rq);
+	mlx5e_build_sq_param(priv, &cparam->sq);
+	mlx5e_build_xdpsq_param(priv, &cparam->xdp_sq);
+	mlx5e_build_icosq_param(priv, &cparam->icosq, icosq_log_wq_sz);
+	mlx5e_build_rx_cq_param(priv, &cparam->rx_cq);
+	mlx5e_build_tx_cq_param(priv, &cparam->tx_cq);
+	mlx5e_build_ico_cq_param(priv, &cparam->icosq_cq, icosq_log_wq_sz);
+}
+
+static int mlx5e_open_channels(struct mlx5e_priv *priv)
+{
+	struct mlx5e_channel_param *cparam;
+	int nch = priv->params.num_channels;
+	int err = -ENOMEM;
+	int i;
+	int j;
+
+	priv->channel = kcalloc(nch, sizeof(struct mlx5e_channel *),
+				GFP_KERNEL);
+
+	priv->txq_to_sq_map = kcalloc(nch * priv->params.num_tc,
+				      sizeof(struct mlx5e_sq *), GFP_KERNEL);
+
+	cparam = kzalloc(sizeof(struct mlx5e_channel_param), GFP_KERNEL);
+
+	if (!priv->channel || !priv->txq_to_sq_map || !cparam)
+		goto err_free_txq_to_sq_map;
+
+	mlx5e_build_channel_param(priv, cparam);
+
+	for (i = 0; i < nch; i++) {
+		err = mlx5e_open_channel(priv, i, cparam, &priv->channel[i]);
+		if (err)
+			goto err_close_channels;
+	}
+
+	for (j = 0; j < nch; j++) {
+		err = mlx5e_wait_for_min_rx_wqes(&priv->channel[j]->rq);
+		if (err)
+			goto err_close_channels;
+	}
+
+	/* FIXME: This is a W/A for tx timeout watch dog false alarm when
+	 * polling for inactive tx queues.
+	 */
+	netif_tx_start_all_queues(priv->netdev);
+
+	kfree(cparam);
+	return 0;
+
+err_close_channels:
+	for (i--; i >= 0; i--)
+		mlx5e_close_channel(priv->channel[i]);
+
+err_free_txq_to_sq_map:
+	kfree(priv->txq_to_sq_map);
+	kfree(priv->channel);
+	kfree(cparam);
+
+	return err;
+}
+
+static void mlx5e_close_channels(struct mlx5e_priv *priv)
+{
+	int i;
+
+	/* FIXME: This is a W/A only for tx timeout watch dog false alarm when
+	 * polling for inactive tx queues.
+	 */
+	netif_tx_stop_all_queues(priv->netdev);
+	netif_tx_disable(priv->netdev);
+
+	for (i = 0; i < priv->params.num_channels; i++)
+		mlx5e_close_channel(priv->channel[i]);
+
+	kfree(priv->txq_to_sq_map);
+	kfree(priv->channel);
+}
+
+static int mlx5e_rx_hash_fn(int hfunc)
+{
+	return (hfunc == ETH_RSS_HASH_TOP) ?
+	       MLX5_RX_HASH_FN_TOEPLITZ :
+	       MLX5_RX_HASH_FN_INVERTED_XOR8;
+}
+
+static int mlx5e_bits_invert(unsigned long a, int size)
+{
+	int inv = 0;
+	int i;
+
+	for (i = 0; i < size; i++)
+		inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i;
+
+	return inv;
+}
+
+static void mlx5e_fill_indir_rqt_rqns(struct mlx5e_priv *priv, void *rqtc)
+{
+	int i;
+
+	for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) {
+		int ix = i;
+		u32 rqn;
+
+		if (priv->params.rss_hfunc == ETH_RSS_HASH_XOR)
+			ix = mlx5e_bits_invert(i, MLX5E_LOG_INDIR_RQT_SIZE);
+
+		ix = priv->params.indirection_rqt[ix];
+		rqn = test_bit(MLX5E_STATE_OPENED, &priv->state) ?
+				priv->channel[ix]->rq.rqn :
+				priv->drop_rq.rqn;
+		MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
+	}
+}
+
+static void mlx5e_fill_direct_rqt_rqn(struct mlx5e_priv *priv, void *rqtc,
+				      int ix)
+{
+	u32 rqn = test_bit(MLX5E_STATE_OPENED, &priv->state) ?
+			priv->channel[ix]->rq.rqn :
+			priv->drop_rq.rqn;
+
+	MLX5_SET(rqtc, rqtc, rq_num[0], rqn);
+}
+
+static int mlx5e_create_rqt(struct mlx5e_priv *priv, int sz,
+			    int ix, struct mlx5e_rqt *rqt)
+{
+	struct mlx5_core_dev *mdev = priv->mdev;
+	void *rqtc;
+	int inlen;
+	int err;
+	u32 *in;
+
+	inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
+	in = mlx5_vzalloc(inlen);
+	if (!in)
+		return -ENOMEM;
+
+	rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
+
+	MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
+	MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
+
+	if (sz > 1) /* RSS */
+		mlx5e_fill_indir_rqt_rqns(priv, rqtc);
+	else
+		mlx5e_fill_direct_rqt_rqn(priv, rqtc, ix);
+
+	err = mlx5_core_create_rqt(mdev, in, inlen, &rqt->rqtn);
+	if (!err)
+		rqt->enabled = true;
+
+	kvfree(in);
+	return err;
+}
+
+void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt)
+{
+	rqt->enabled = false;
+	mlx5_core_destroy_rqt(priv->mdev, rqt->rqtn);
+}
+
+static int mlx5e_create_indirect_rqts(struct mlx5e_priv *priv)
+{
+	struct mlx5e_rqt *rqt = &priv->indir_rqt;
+
+	return mlx5e_create_rqt(priv, MLX5E_INDIR_RQT_SIZE, 0, rqt);
+}
+
+int mlx5e_create_direct_rqts(struct mlx5e_priv *priv)
+{
+	struct mlx5e_rqt *rqt;
+	int err;
+	int ix;
+
+	for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
+		rqt = &priv->direct_tir[ix].rqt;
+		err = mlx5e_create_rqt(priv, 1 /*size */, ix, rqt);
+		if (err)
+			goto err_destroy_rqts;
+	}
+
+	return 0;
+
+err_destroy_rqts:
+	for (ix--; ix >= 0; ix--)
+		mlx5e_destroy_rqt(priv, &priv->direct_tir[ix].rqt);
+
+	return err;
+}
+
+int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix)
+{
+	struct mlx5_core_dev *mdev = priv->mdev;
+	void *rqtc;
+	int inlen;
+	u32 *in;
+	int err;
+
+	inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * sz;
+	in = mlx5_vzalloc(inlen);
+	if (!in)
+		return -ENOMEM;
+
+	rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx);
+
+	MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
+	if (sz > 1) /* RSS */
+		mlx5e_fill_indir_rqt_rqns(priv, rqtc);
+	else
+		mlx5e_fill_direct_rqt_rqn(priv, rqtc, ix);
+
+	MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1);
+
+	err = mlx5_core_modify_rqt(mdev, rqtn, in, inlen);
+
+	kvfree(in);
+
+	return err;
+}
+
+static void mlx5e_redirect_rqts(struct mlx5e_priv *priv)
+{
+	u32 rqtn;
+	int ix;
+
+	if (priv->indir_rqt.enabled) {
+		rqtn = priv->indir_rqt.rqtn;
+		mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0);
+	}
+
+	for (ix = 0; ix < priv->params.num_channels; ix++) {
+		if (!priv->direct_tir[ix].rqt.enabled)
+			continue;
+		rqtn = priv->direct_tir[ix].rqt.rqtn;
+		mlx5e_redirect_rqt(priv, rqtn, 1, ix);
+	}
+}
+
+static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv)
+{
+	if (!priv->params.lro_en)
+		return;
+
+#define ROUGH_MAX_L2_L3_HDR_SZ 256
+
+	MLX5_SET(tirc, tirc, lro_enable_mask,
+		 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
+		 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
+	MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
+		 (priv->params.lro_wqe_sz -
+		  ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
+	MLX5_SET(tirc, tirc, lro_timeout_period_usecs, priv->params.lro_timeout);
+}
+
+void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv)
+{
+	MLX5_SET(tirc, tirc, rx_hash_fn,
+		 mlx5e_rx_hash_fn(priv->params.rss_hfunc));
+	if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) {
+		void *rss_key = MLX5_ADDR_OF(tirc, tirc,
+					     rx_hash_toeplitz_key);
+		size_t len = MLX5_FLD_SZ_BYTES(tirc,
+					       rx_hash_toeplitz_key);
+
+		MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
+		memcpy(rss_key, priv->params.toeplitz_hash_key, len);
+	}
+}
+
+static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
+{
+	struct mlx5_core_dev *mdev = priv->mdev;
+
+	void *in;
+	void *tirc;
+	int inlen;
+	int err;
+	int tt;
+	int ix;
+
+	inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
+	in = mlx5_vzalloc(inlen);
+	if (!in)
+		return -ENOMEM;
+
+	MLX5_SET(modify_tir_in, in, bitmask.lro, 1);
+	tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
+
+	mlx5e_build_tir_ctx_lro(tirc, priv);
+
+	for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
+		err = mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in,
+					   inlen);
+		if (err)
+			goto free_in;
+	}
+
+	for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
+		err = mlx5_core_modify_tir(mdev, priv->direct_tir[ix].tirn,
+					   in, inlen);
+		if (err)
+			goto free_in;
+	}
+
+free_in:
+	kvfree(in);
+
+	return err;
+}
+
+static int mlx5e_set_mtu(struct mlx5e_priv *priv, u16 mtu)
+{
+	struct mlx5_core_dev *mdev = priv->mdev;
+	u16 hw_mtu = MLX5E_SW2HW_MTU(mtu);
+	int err;
+
+	err = mlx5_set_port_mtu(mdev, hw_mtu, 1);
+	if (err)
+		return err;
+
+	/* Update vport context MTU */
+	mlx5_modify_nic_vport_mtu(mdev, hw_mtu);
+	return 0;
+}
+
+static void mlx5e_query_mtu(struct mlx5e_priv *priv, u16 *mtu)
+{
+	struct mlx5_core_dev *mdev = priv->mdev;
+	u16 hw_mtu = 0;
+	int err;
+
+	err = mlx5_query_nic_vport_mtu(mdev, &hw_mtu);
+	if (err || !hw_mtu) /* fallback to port oper mtu */
+		mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
+
+	*mtu = MLX5E_HW2SW_MTU(hw_mtu);
+}
+
+static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+	u16 mtu;
+	int err;
+
+	err = mlx5e_set_mtu(priv, netdev->mtu);
+	if (err)
+		return err;
+
+	mlx5e_query_mtu(priv, &mtu);
+	if (mtu != netdev->mtu)
+		netdev_warn(netdev, "%s: VPort MTU %d is different than netdev mtu %d\n",
+			    __func__, mtu, netdev->mtu);
+
+	netdev->mtu = mtu;
+	return 0;
+}
+
+static void mlx5e_netdev_set_tcs(struct net_device *netdev)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+	int nch = priv->params.num_channels;
+	int ntc = priv->params.num_tc;
+	int tc;
+
+	netdev_reset_tc(netdev);
+
+	if (ntc == 1)
+		return;
+
+	netdev_set_num_tc(netdev, ntc);
+
+	/* Map netdev TCs to offset 0
+	 * We have our own UP to TXQ mapping for QoS
+	 */
+	for (tc = 0; tc < ntc; tc++)
+		netdev_set_tc_queue(netdev, tc, nch, 0);
+}
+
+int mlx5e_open_locked(struct net_device *netdev)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+	struct mlx5_core_dev *mdev = priv->mdev;
+	int num_txqs;
+	int err;
+
+	set_bit(MLX5E_STATE_OPENED, &priv->state);
+
+	mlx5e_netdev_set_tcs(netdev);
+
+	num_txqs = priv->params.num_channels * priv->params.num_tc;
+	netif_set_real_num_tx_queues(netdev, num_txqs);
+	netif_set_real_num_rx_queues(netdev, priv->params.num_channels);
+
+	err = mlx5e_open_channels(priv);
+	if (err) {
+		netdev_err(netdev, "%s: mlx5e_open_channels failed, %d\n",
+			   __func__, err);
+		goto err_clear_state_opened_flag;
+	}
+
+	err = mlx5e_refresh_tirs_self_loopback(priv->mdev, false);
+	if (err) {
+		netdev_err(netdev, "%s: mlx5e_refresh_tirs_self_loopback_enable failed, %d\n",
+			   __func__, err);
+		goto err_close_channels;
+	}
+
+	mlx5e_redirect_rqts(priv);
+	mlx5e_update_carrier(priv);
+	mlx5e_timestamp_init(priv);
+#ifdef CONFIG_RFS_ACCEL
+	priv->netdev->rx_cpu_rmap = priv->mdev->rmap;
+#endif
+	if (priv->profile->update_stats)
+		queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
+
+	if (MLX5_CAP_GEN(mdev, vport_group_manager)) {
+		err = mlx5e_add_sqs_fwd_rules(priv);
+		if (err)
+			goto err_close_channels;
+	}
+	return 0;
+
+err_close_channels:
+	mlx5e_close_channels(priv);
+err_clear_state_opened_flag:
+	clear_bit(MLX5E_STATE_OPENED, &priv->state);
+	return err;
+}
+
+int mlx5e_open(struct net_device *netdev)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+	int err;
+
+	mutex_lock(&priv->state_lock);
+	err = mlx5e_open_locked(netdev);
+	mutex_unlock(&priv->state_lock);
+
+	return err;
+}
+
+int mlx5e_close_locked(struct net_device *netdev)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+	struct mlx5_core_dev *mdev = priv->mdev;
+
+	/* May already be CLOSED in case a previous configuration operation
+	 * (e.g RX/TX queue size change) that involves close&open failed.
+	 */
+	if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+		return 0;
+
+	clear_bit(MLX5E_STATE_OPENED, &priv->state);
+
+	if (MLX5_CAP_GEN(mdev, vport_group_manager))
+		mlx5e_remove_sqs_fwd_rules(priv);
+
+	mlx5e_timestamp_cleanup(priv);
+	netif_carrier_off(priv->netdev);
+	mlx5e_redirect_rqts(priv);
+	mlx5e_close_channels(priv);
+
+	return 0;
+}
+
+int mlx5e_close(struct net_device *netdev)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+	int err;
+
+	if (!netif_device_present(netdev))
+		return -ENODEV;
+
+	mutex_lock(&priv->state_lock);
+	err = mlx5e_close_locked(netdev);
+	mutex_unlock(&priv->state_lock);
+
+	return err;
+}
+
+static int mlx5e_create_drop_rq(struct mlx5e_priv *priv,
+				struct mlx5e_rq *rq,
+				struct mlx5e_rq_param *param)
+{
+	struct mlx5_core_dev *mdev = priv->mdev;
+	void *rqc = param->rqc;
+	void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
+	int err;
+
+	param->wq.db_numa_node = param->wq.buf_numa_node;
+
+	err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
+				&rq->wq_ctrl);
+	if (err)
+		return err;
+
+	rq->priv = priv;
+
+	return 0;
+}
+
+static int mlx5e_create_drop_cq(struct mlx5e_priv *priv,
+				struct mlx5e_cq *cq,
+				struct mlx5e_cq_param *param)
+{
+	struct mlx5_core_dev *mdev = priv->mdev;
+	struct mlx5_core_cq *mcq = &cq->mcq;
+	int eqn_not_used;
+	unsigned int irqn;
+	int err;
+
+	err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
+			       &cq->wq_ctrl);
+	if (err)
+		return err;
+
+	mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
+
+	mcq->cqe_sz     = 64;
+	mcq->set_ci_db  = cq->wq_ctrl.db.db;
+	mcq->arm_db     = cq->wq_ctrl.db.db + 1;
+	*mcq->set_ci_db = 0;
+	*mcq->arm_db    = 0;
+	mcq->vector     = param->eq_ix;
+	mcq->comp       = mlx5e_completion_event;
+	mcq->event      = mlx5e_cq_error_event;
+	mcq->irqn       = irqn;
+
+	cq->priv = priv;
+
+	return 0;
+}
+
+static int mlx5e_open_drop_rq(struct mlx5e_priv *priv)
+{
+	struct mlx5e_cq_param cq_param;
+	struct mlx5e_rq_param rq_param;
+	struct mlx5e_rq *rq = &priv->drop_rq;
+	struct mlx5e_cq *cq = &priv->drop_rq.cq;
+	int err;
+
+	memset(&cq_param, 0, sizeof(cq_param));
+	memset(&rq_param, 0, sizeof(rq_param));
+	mlx5e_build_drop_rq_param(&rq_param);
+
+	err = mlx5e_create_drop_cq(priv, cq, &cq_param);
+	if (err)
+		return err;
+
+	err = mlx5e_enable_cq(cq, &cq_param);
+	if (err)
+		goto err_destroy_cq;
+
+	err = mlx5e_create_drop_rq(priv, rq, &rq_param);
+	if (err)
+		goto err_disable_cq;
+
+	err = mlx5e_enable_rq(rq, &rq_param);
+	if (err)
+		goto err_destroy_rq;
+
+	return 0;
+
+err_destroy_rq:
+	mlx5e_destroy_rq(&priv->drop_rq);
+
+err_disable_cq:
+	mlx5e_disable_cq(&priv->drop_rq.cq);
+
+err_destroy_cq:
+	mlx5e_destroy_cq(&priv->drop_rq.cq);
+
+	return err;
+}
+
+static void mlx5e_close_drop_rq(struct mlx5e_priv *priv)
+{
+	mlx5e_disable_rq(&priv->drop_rq);
+	mlx5e_destroy_rq(&priv->drop_rq);
+	mlx5e_disable_cq(&priv->drop_rq.cq);
+	mlx5e_destroy_cq(&priv->drop_rq.cq);
+}
+
+static int mlx5e_create_tis(struct mlx5e_priv *priv, int tc)
+{
+	struct mlx5_core_dev *mdev = priv->mdev;
+	u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {0};
+	void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
+
+	MLX5_SET(tisc, tisc, prio, tc << 1);
+	MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.td.tdn);
+
+	if (mlx5_lag_is_lacp_owner(mdev))
+		MLX5_SET(tisc, tisc, strict_lag_tx_port_affinity, 1);
+
+	return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]);
+}
+
+static void mlx5e_destroy_tis(struct mlx5e_priv *priv, int tc)
+{
+	mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]);
+}
+
+int mlx5e_create_tises(struct mlx5e_priv *priv)
+{
+	int err;
+	int tc;
+
+	for (tc = 0; tc < priv->profile->max_tc; tc++) {
+		err = mlx5e_create_tis(priv, tc);
+		if (err)
+			goto err_close_tises;
+	}
+
+	return 0;
+
+err_close_tises:
+	for (tc--; tc >= 0; tc--)
+		mlx5e_destroy_tis(priv, tc);
+
+	return err;
+}
+
+void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
+{
+	int tc;
+
+	for (tc = 0; tc < priv->profile->max_tc; tc++)
+		mlx5e_destroy_tis(priv, tc);
+}
+
+static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
+				      enum mlx5e_traffic_types tt)
+{
+	void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
+
+	MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
+
+#define MLX5_HASH_IP            (MLX5_HASH_FIELD_SEL_SRC_IP   |\
+				 MLX5_HASH_FIELD_SEL_DST_IP)
+
+#define MLX5_HASH_IP_L4PORTS    (MLX5_HASH_FIELD_SEL_SRC_IP   |\
+				 MLX5_HASH_FIELD_SEL_DST_IP   |\
+				 MLX5_HASH_FIELD_SEL_L4_SPORT |\
+				 MLX5_HASH_FIELD_SEL_L4_DPORT)
+
+#define MLX5_HASH_IP_IPSEC_SPI  (MLX5_HASH_FIELD_SEL_SRC_IP   |\
+				 MLX5_HASH_FIELD_SEL_DST_IP   |\
+				 MLX5_HASH_FIELD_SEL_IPSEC_SPI)
+
+	mlx5e_build_tir_ctx_lro(tirc, priv);
+
+	MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
+	MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
+	mlx5e_build_tir_ctx_hash(tirc, priv);
+
+	switch (tt) {
+	case MLX5E_TT_IPV4_TCP:
+		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+			 MLX5_L3_PROT_TYPE_IPV4);
+		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+			 MLX5_L4_PROT_TYPE_TCP);
+		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+			 MLX5_HASH_IP_L4PORTS);
+		break;
+
+	case MLX5E_TT_IPV6_TCP:
+		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+			 MLX5_L3_PROT_TYPE_IPV6);
+		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+			 MLX5_L4_PROT_TYPE_TCP);
+		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+			 MLX5_HASH_IP_L4PORTS);
+		break;
+
+	case MLX5E_TT_IPV4_UDP:
+		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+			 MLX5_L3_PROT_TYPE_IPV4);
+		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+			 MLX5_L4_PROT_TYPE_UDP);
+		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+			 MLX5_HASH_IP_L4PORTS);
+		break;
+
+	case MLX5E_TT_IPV6_UDP:
+		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+			 MLX5_L3_PROT_TYPE_IPV6);
+		MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+			 MLX5_L4_PROT_TYPE_UDP);
+		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+			 MLX5_HASH_IP_L4PORTS);
+		break;
+
+	case MLX5E_TT_IPV4_IPSEC_AH:
+		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+			 MLX5_L3_PROT_TYPE_IPV4);
+		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+			 MLX5_HASH_IP_IPSEC_SPI);
+		break;
+
+	case MLX5E_TT_IPV6_IPSEC_AH:
+		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+			 MLX5_L3_PROT_TYPE_IPV6);
+		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+			 MLX5_HASH_IP_IPSEC_SPI);
+		break;
+
+	case MLX5E_TT_IPV4_IPSEC_ESP:
+		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+			 MLX5_L3_PROT_TYPE_IPV4);
+		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+			 MLX5_HASH_IP_IPSEC_SPI);
+		break;
+
+	case MLX5E_TT_IPV6_IPSEC_ESP:
+		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+			 MLX5_L3_PROT_TYPE_IPV6);
+		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+			 MLX5_HASH_IP_IPSEC_SPI);
+		break;
+
+	case MLX5E_TT_IPV4:
+		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+			 MLX5_L3_PROT_TYPE_IPV4);
+		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+			 MLX5_HASH_IP);
+		break;
+
+	case MLX5E_TT_IPV6:
+		MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+			 MLX5_L3_PROT_TYPE_IPV6);
+		MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+			 MLX5_HASH_IP);
+		break;
+	default:
+		WARN_ONCE(true,
+			  "mlx5e_build_indir_tir_ctx: bad traffic type!\n");
+	}
+}
+
+static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
+				       u32 rqtn)
+{
+	MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
+
+	mlx5e_build_tir_ctx_lro(tirc, priv);
+
+	MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
+	MLX5_SET(tirc, tirc, indirect_table, rqtn);
+	MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
+}
+
+static int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv)
+{
+	struct mlx5e_tir *tir;
+	void *tirc;
+	int inlen;
+	int err;
+	u32 *in;
+	int tt;
+
+	inlen = MLX5_ST_SZ_BYTES(create_tir_in);
+	in = mlx5_vzalloc(inlen);
+	if (!in)
+		return -ENOMEM;
+
+	for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
+		memset(in, 0, inlen);
+		tir = &priv->indir_tir[tt];
+		tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
+		mlx5e_build_indir_tir_ctx(priv, tirc, tt);
+		err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
+		if (err)
+			goto err_destroy_tirs;
+	}
+
+	kvfree(in);
+
+	return 0;
+
+err_destroy_tirs:
+	for (tt--; tt >= 0; tt--)
+		mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[tt]);
+
+	kvfree(in);
+
+	return err;
+}
+
+int mlx5e_create_direct_tirs(struct mlx5e_priv *priv)
+{
+	int nch = priv->profile->max_nch(priv->mdev);
+	struct mlx5e_tir *tir;
+	void *tirc;
+	int inlen;
+	int err;
+	u32 *in;
+	int ix;
+
+	inlen = MLX5_ST_SZ_BYTES(create_tir_in);
+	in = mlx5_vzalloc(inlen);
+	if (!in)
+		return -ENOMEM;
+
+	for (ix = 0; ix < nch; ix++) {
+		memset(in, 0, inlen);
+		tir = &priv->direct_tir[ix];
+		tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
+		mlx5e_build_direct_tir_ctx(priv, tirc,
+					   priv->direct_tir[ix].rqt.rqtn);
+		err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
+		if (err)
+			goto err_destroy_ch_tirs;
+	}
+
+	kvfree(in);
+
+	return 0;
+
+err_destroy_ch_tirs:
+	for (ix--; ix >= 0; ix--)
+		mlx5e_destroy_tir(priv->mdev, &priv->direct_tir[ix]);
+
+	kvfree(in);
+
+	return err;
+}
+
+static void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv)
+{
+	int i;
+
+	for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
+		mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[i]);
+}
+
+void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv)
+{
+	int nch = priv->profile->max_nch(priv->mdev);
+	int i;
+
+	for (i = 0; i < nch; i++)
+		mlx5e_destroy_tir(priv->mdev, &priv->direct_tir[i]);
+}
+
+int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd)
+{
+	int err = 0;
+	int i;
+
+	if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+		return 0;
+
+	for (i = 0; i < priv->params.num_channels; i++) {
+		err = mlx5e_modify_rq_vsd(&priv->channel[i]->rq, vsd);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+static int mlx5e_setup_tc(struct net_device *netdev, u8 tc)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+	bool was_opened;
+	int err = 0;
+
+	if (tc && tc != MLX5E_MAX_NUM_TC)
+		return -EINVAL;
+
+	mutex_lock(&priv->state_lock);
+
+	was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+	if (was_opened)
+		mlx5e_close_locked(priv->netdev);
+
+	priv->params.num_tc = tc ? tc : 1;
+
+	if (was_opened)
+		err = mlx5e_open_locked(priv->netdev);
+
+	mutex_unlock(&priv->state_lock);
+
+	return err;
+}
+
+static int mlx5e_ndo_setup_tc(struct net_device *dev, u32 handle,
+			      __be16 proto, struct tc_to_netdev *tc)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+
+	if (TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS))
+		goto mqprio;
+
+	switch (tc->type) {
+	case TC_SETUP_CLSFLOWER:
+		switch (tc->cls_flower->command) {
+		case TC_CLSFLOWER_REPLACE:
+			return mlx5e_configure_flower(priv, proto, tc->cls_flower);
+		case TC_CLSFLOWER_DESTROY:
+			return mlx5e_delete_flower(priv, tc->cls_flower);
+		case TC_CLSFLOWER_STATS:
+			return mlx5e_stats_flower(priv, tc->cls_flower);
+		}
+	default:
+		return -EOPNOTSUPP;
+	}
+
+mqprio:
+	if (tc->type != TC_SETUP_MQPRIO)
+		return -EINVAL;
+
+	return mlx5e_setup_tc(dev, tc->tc);
+}
+
+static struct rtnl_link_stats64 *
+mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+	struct mlx5e_sw_stats *sstats = &priv->stats.sw;
+	struct mlx5e_vport_stats *vstats = &priv->stats.vport;
+	struct mlx5e_pport_stats *pstats = &priv->stats.pport;
+
+	if (mlx5e_is_uplink_rep(priv)) {
+		stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok);
+		stats->rx_bytes   = PPORT_802_3_GET(pstats, a_octets_received_ok);
+		stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok);
+		stats->tx_bytes   = PPORT_802_3_GET(pstats, a_octets_transmitted_ok);
+	} else {
+		stats->rx_packets = sstats->rx_packets;
+		stats->rx_bytes   = sstats->rx_bytes;
+		stats->tx_packets = sstats->tx_packets;
+		stats->tx_bytes   = sstats->tx_bytes;
+		stats->tx_dropped = sstats->tx_queue_dropped;
+	}
+
+	stats->rx_dropped = priv->stats.qcnt.rx_out_of_buffer;
+
+	stats->rx_length_errors =
+		PPORT_802_3_GET(pstats, a_in_range_length_errors) +
+		PPORT_802_3_GET(pstats, a_out_of_range_length_field) +
+		PPORT_802_3_GET(pstats, a_frame_too_long_errors);
+	stats->rx_crc_errors =
+		PPORT_802_3_GET(pstats, a_frame_check_sequence_errors);
+	stats->rx_frame_errors = PPORT_802_3_GET(pstats, a_alignment_errors);
+	stats->tx_aborted_errors = PPORT_2863_GET(pstats, if_out_discards);
+	stats->tx_carrier_errors =
+		PPORT_802_3_GET(pstats, a_symbol_error_during_carrier);
+	stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors +
+			   stats->rx_frame_errors;
+	stats->tx_errors = stats->tx_aborted_errors + stats->tx_carrier_errors;
+
+	/* vport multicast also counts packets that are dropped due to steering
+	 * or rx out of buffer
+	 */
+	stats->multicast =
+		VPORT_COUNTER_GET(vstats, received_eth_multicast.packets);
+
+	return stats;
+}
+
+static void mlx5e_set_rx_mode(struct net_device *dev)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+
+	queue_work(priv->wq, &priv->set_rx_mode_work);
+}
+
+static int mlx5e_set_mac(struct net_device *netdev, void *addr)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+	struct sockaddr *saddr = addr;
+
+	if (!is_valid_ether_addr(saddr->sa_data))
+		return -EADDRNOTAVAIL;
+
+	netif_addr_lock_bh(netdev);
+	ether_addr_copy(netdev->dev_addr, saddr->sa_data);
+	netif_addr_unlock_bh(netdev);
+
+	queue_work(priv->wq, &priv->set_rx_mode_work);
+
+	return 0;
+}
+
+#define MLX5E_SET_FEATURE(netdev, feature, enable)	\
+	do {						\
+		if (enable)				\
+			netdev->features |= feature;	\
+		else					\
+			netdev->features &= ~feature;	\
+	} while (0)
+
+typedef int (*mlx5e_feature_handler)(struct net_device *netdev, bool enable);
+
+static int set_feature_lro(struct net_device *netdev, bool enable)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+	bool was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+	int err;
+
+	mutex_lock(&priv->state_lock);
+
+	if (was_opened && (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST))
+		mlx5e_close_locked(priv->netdev);
+
+	priv->params.lro_en = enable;
+	err = mlx5e_modify_tirs_lro(priv);
+	if (err) {
+		netdev_err(netdev, "lro modify failed, %d\n", err);
+		priv->params.lro_en = !enable;
+	}
+
+	if (was_opened && (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST))
+		mlx5e_open_locked(priv->netdev);
+
+	mutex_unlock(&priv->state_lock);
+
+	return err;
+}
+
+static int set_feature_vlan_filter(struct net_device *netdev, bool enable)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+
+	if (enable)
+		mlx5e_enable_vlan_filter(priv);
+	else
+		mlx5e_disable_vlan_filter(priv);
+
+	return 0;
+}
+
+static int set_feature_tc_num_filters(struct net_device *netdev, bool enable)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+
+	if (!enable && mlx5e_tc_num_filters(priv)) {
+		netdev_err(netdev,
+			   "Active offloaded tc filters, can't turn hw_tc_offload off\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int set_feature_rx_all(struct net_device *netdev, bool enable)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+	struct mlx5_core_dev *mdev = priv->mdev;
+
+	return mlx5_set_port_fcs(mdev, !enable);
+}
+
+static int set_feature_rx_vlan(struct net_device *netdev, bool enable)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+	int err;
+
+	mutex_lock(&priv->state_lock);
+
+	priv->params.vlan_strip_disable = !enable;
+	err = mlx5e_modify_rqs_vsd(priv, !enable);
+	if (err)
+		priv->params.vlan_strip_disable = enable;
+
+	mutex_unlock(&priv->state_lock);
+
+	return err;
+}
+
+#ifdef CONFIG_RFS_ACCEL
+static int set_feature_arfs(struct net_device *netdev, bool enable)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+	int err;
+
+	if (enable)
+		err = mlx5e_arfs_enable(priv);
+	else
+		err = mlx5e_arfs_disable(priv);
+
+	return err;
+}
+#endif
+
+static int mlx5e_handle_feature(struct net_device *netdev,
+				netdev_features_t wanted_features,
+				netdev_features_t feature,
+				mlx5e_feature_handler feature_handler)
+{
+	netdev_features_t changes = wanted_features ^ netdev->features;
+	bool enable = !!(wanted_features & feature);
+	int err;
+
+	if (!(changes & feature))
+		return 0;
+
+	err = feature_handler(netdev, enable);
+	if (err) {
+		netdev_err(netdev, "%s feature 0x%llx failed err %d\n",
+			   enable ? "Enable" : "Disable", feature, err);
+		return err;
+	}
+
+	MLX5E_SET_FEATURE(netdev, feature, enable);
+	return 0;
+}
+
+static int mlx5e_set_features(struct net_device *netdev,
+			      netdev_features_t features)
+{
+	int err;
+
+	err  = mlx5e_handle_feature(netdev, features, NETIF_F_LRO,
+				    set_feature_lro);
+	err |= mlx5e_handle_feature(netdev, features,
+				    NETIF_F_HW_VLAN_CTAG_FILTER,
+				    set_feature_vlan_filter);
+	err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_TC,
+				    set_feature_tc_num_filters);
+	err |= mlx5e_handle_feature(netdev, features, NETIF_F_RXALL,
+				    set_feature_rx_all);
+	err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_VLAN_CTAG_RX,
+				    set_feature_rx_vlan);
+#ifdef CONFIG_RFS_ACCEL
+	err |= mlx5e_handle_feature(netdev, features, NETIF_F_NTUPLE,
+				    set_feature_arfs);
+#endif
+
+	return err ? -EINVAL : 0;
+}
+
+static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+	bool was_opened;
+	int err = 0;
+	bool reset;
+
+	mutex_lock(&priv->state_lock);
+
+	reset = !priv->params.lro_en &&
+		(priv->params.rq_wq_type !=
+		 MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ);
+
+	was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+	if (was_opened && reset)
+		mlx5e_close_locked(netdev);
+
+	netdev->mtu = new_mtu;
+	mlx5e_set_dev_port_mtu(netdev);
+
+	if (was_opened && reset)
+		err = mlx5e_open_locked(netdev);
+
+	mutex_unlock(&priv->state_lock);
+
+	return err;
+}
+
+static int mlx5e_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	switch (cmd) {
+	case SIOCSHWTSTAMP:
+		return mlx5e_hwstamp_set(dev, ifr);
+	case SIOCGHWTSTAMP:
+		return mlx5e_hwstamp_get(dev, ifr);
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+	struct mlx5_core_dev *mdev = priv->mdev;
+
+	return mlx5_eswitch_set_vport_mac(mdev->priv.eswitch, vf + 1, mac);
+}
+
+static int mlx5e_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos,
+			     __be16 vlan_proto)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+	struct mlx5_core_dev *mdev = priv->mdev;
+
+	if (vlan_proto != htons(ETH_P_8021Q))
+		return -EPROTONOSUPPORT;
+
+	return mlx5_eswitch_set_vport_vlan(mdev->priv.eswitch, vf + 1,
+					   vlan, qos);
+}
+
+static int mlx5e_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+	struct mlx5_core_dev *mdev = priv->mdev;
+
+	return mlx5_eswitch_set_vport_spoofchk(mdev->priv.eswitch, vf + 1, setting);
+}
+
+static int mlx5e_set_vf_trust(struct net_device *dev, int vf, bool setting)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+	struct mlx5_core_dev *mdev = priv->mdev;
+
+	return mlx5_eswitch_set_vport_trust(mdev->priv.eswitch, vf + 1, setting);
+}
+
+static int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
+			     int max_tx_rate)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+	struct mlx5_core_dev *mdev = priv->mdev;
+
+	if (min_tx_rate)
+		return -EOPNOTSUPP;
+
+	return mlx5_eswitch_set_vport_rate(mdev->priv.eswitch, vf + 1,
+					   max_tx_rate);
+}
+
+static int mlx5_vport_link2ifla(u8 esw_link)
+{
+	switch (esw_link) {
+	case MLX5_ESW_VPORT_ADMIN_STATE_DOWN:
+		return IFLA_VF_LINK_STATE_DISABLE;
+	case MLX5_ESW_VPORT_ADMIN_STATE_UP:
+		return IFLA_VF_LINK_STATE_ENABLE;
+	}
+	return IFLA_VF_LINK_STATE_AUTO;
+}
+
+static int mlx5_ifla_link2vport(u8 ifla_link)
+{
+	switch (ifla_link) {
+	case IFLA_VF_LINK_STATE_DISABLE:
+		return MLX5_ESW_VPORT_ADMIN_STATE_DOWN;
+	case IFLA_VF_LINK_STATE_ENABLE:
+		return MLX5_ESW_VPORT_ADMIN_STATE_UP;
+	}
+	return MLX5_ESW_VPORT_ADMIN_STATE_AUTO;
+}
+
+static int mlx5e_set_vf_link_state(struct net_device *dev, int vf,
+				   int link_state)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+	struct mlx5_core_dev *mdev = priv->mdev;
+
+	return mlx5_eswitch_set_vport_state(mdev->priv.eswitch, vf + 1,
+					    mlx5_ifla_link2vport(link_state));
+}
+
+static int mlx5e_get_vf_config(struct net_device *dev,
+			       int vf, struct ifla_vf_info *ivi)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+	struct mlx5_core_dev *mdev = priv->mdev;
+	int err;
+
+	err = mlx5_eswitch_get_vport_config(mdev->priv.eswitch, vf + 1, ivi);
+	if (err)
+		return err;
+	ivi->linkstate = mlx5_vport_link2ifla(ivi->linkstate);
+	return 0;
+}
+
+static int mlx5e_get_vf_stats(struct net_device *dev,
+			      int vf, struct ifla_vf_stats *vf_stats)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+	struct mlx5_core_dev *mdev = priv->mdev;
+
+	return mlx5_eswitch_get_vport_stats(mdev->priv.eswitch, vf + 1,
+					    vf_stats);
+}
+
+void mlx5e_add_vxlan_port(struct net_device *netdev,
+			  struct udp_tunnel_info *ti)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+
+	if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
+		return;
+
+	if (!mlx5e_vxlan_allowed(priv->mdev))
+		return;
+
+	mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 1);
+}
+
+void mlx5e_del_vxlan_port(struct net_device *netdev,
+			  struct udp_tunnel_info *ti)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+
+	if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
+		return;
+
+	if (!mlx5e_vxlan_allowed(priv->mdev))
+		return;
+
+	mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 0);
+}
+
+static netdev_features_t mlx5e_vxlan_features_check(struct mlx5e_priv *priv,
+						    struct sk_buff *skb,
+						    netdev_features_t features)
+{
+	struct udphdr *udph;
+	u16 proto;
+	u16 port = 0;
+
+	switch (vlan_get_protocol(skb)) {
+	case htons(ETH_P_IP):
+		proto = ip_hdr(skb)->protocol;
+		break;
+	case htons(ETH_P_IPV6):
+		proto = ipv6_hdr(skb)->nexthdr;
+		break;
+	default:
+		goto out;
+	}
+
+	if (proto == IPPROTO_UDP) {
+		udph = udp_hdr(skb);
+		port = be16_to_cpu(udph->dest);
+	}
+
+	/* Verify if UDP port is being offloaded by HW */
+	if (port && mlx5e_vxlan_lookup_port(priv, port))
+		return features;
+
+out:
+	/* Disable CSUM and GSO if the udp dport is not offloaded by HW */
+	return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
+}
+
+static netdev_features_t mlx5e_features_check(struct sk_buff *skb,
+					      struct net_device *netdev,
+					      netdev_features_t features)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+
+	features = vlan_features_check(skb, features);
+	features = vxlan_features_check(skb, features);
+
+	/* Validate if the tunneled packet is being offloaded by HW */
+	if (skb->encapsulation &&
+	    (features & NETIF_F_CSUM_MASK || features & NETIF_F_GSO_MASK))
+		return mlx5e_vxlan_features_check(priv, skb, features);
+
+	return features;
+}
+
+static void mlx5e_tx_timeout(struct net_device *dev)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+	bool sched_work = false;
+	int i;
+
+	netdev_err(dev, "TX timeout detected\n");
+
+	for (i = 0; i < priv->params.num_channels * priv->params.num_tc; i++) {
+		struct mlx5e_sq *sq = priv->txq_to_sq_map[i];
+
+		if (!netif_xmit_stopped(netdev_get_tx_queue(dev, i)))
+			continue;
+		sched_work = true;
+		clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
+		netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x\n",
+			   i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc);
+	}
+
+	if (sched_work && test_bit(MLX5E_STATE_OPENED, &priv->state))
+		schedule_work(&priv->tx_timeout_work);
+}
+
+static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+	struct bpf_prog *old_prog;
+	int err = 0;
+	bool reset, was_opened;
+	int i;
+
+	if (prog && prog->xdp_adjust_head) {
+		netdev_err(netdev, "Does not support bpf_xdp_adjust_head()\n");
+		return -EOPNOTSUPP;
+	}
+
+	mutex_lock(&priv->state_lock);
+
+	if ((netdev->features & NETIF_F_LRO) && prog) {
+		netdev_warn(netdev, "can't set XDP while LRO is on, disable LRO first\n");
+		err = -EINVAL;
+		goto unlock;
+	}
+
+	was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+	/* no need for full reset when exchanging programs */
+	reset = (!priv->xdp_prog || !prog);
+
+	if (was_opened && reset)
+		mlx5e_close_locked(netdev);
+	if (was_opened && !reset) {
+		/* num_channels is invariant here, so we can take the
+		 * batched reference right upfront.
+		 */
+		prog = bpf_prog_add(prog, priv->params.num_channels);
+		if (IS_ERR(prog)) {
+			err = PTR_ERR(prog);
+			goto unlock;
+		}
+	}
+
+	/* exchange programs, extra prog reference we got from caller
+	 * as long as we don't fail from this point onwards.
+	 */
+	old_prog = xchg(&priv->xdp_prog, prog);
+	if (old_prog)
+		bpf_prog_put(old_prog);
+
+	if (reset) /* change RQ type according to priv->xdp_prog */
+		mlx5e_set_rq_priv_params(priv);
+
+	if (was_opened && reset)
+		mlx5e_open_locked(netdev);
+
+	if (!test_bit(MLX5E_STATE_OPENED, &priv->state) || reset)
+		goto unlock;
+
+	/* exchanging programs w/o reset, we update ref counts on behalf
+	 * of the channels RQs here.
+	 */
+	for (i = 0; i < priv->params.num_channels; i++) {
+		struct mlx5e_channel *c = priv->channel[i];
+
+		clear_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state);
+		napi_synchronize(&c->napi);
+		/* prevent mlx5e_poll_rx_cq from accessing rq->xdp_prog */
+
+		old_prog = xchg(&c->rq.xdp_prog, prog);
+
+		set_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state);
+		/* napi_schedule in case we have missed anything */
+		set_bit(MLX5E_CHANNEL_NAPI_SCHED, &c->flags);
+		napi_schedule(&c->napi);
+
+		if (old_prog)
+			bpf_prog_put(old_prog);
+	}
+
+unlock:
+	mutex_unlock(&priv->state_lock);
+	return err;
+}
+
+static bool mlx5e_xdp_attached(struct net_device *dev)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+
+	return !!priv->xdp_prog;
+}
+
+static int mlx5e_xdp(struct net_device *dev, struct netdev_xdp *xdp)
+{
+	switch (xdp->command) {
+	case XDP_SETUP_PROG:
+		return mlx5e_xdp_set(dev, xdp->prog);
+	case XDP_QUERY_PROG:
+		xdp->prog_attached = mlx5e_xdp_attached(dev);
+		return 0;
+	default:
+		return -EINVAL;
+	}
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/* Fake "interrupt" called by netpoll (eg netconsole) to send skbs without
+ * reenabling interrupts.
+ */
+static void mlx5e_netpoll(struct net_device *dev)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+	int i;
+
+	for (i = 0; i < priv->params.num_channels; i++)
+		napi_schedule(&priv->channel[i]->napi);
+}
+#endif
+
+static const struct net_device_ops mlx5e_netdev_ops_basic = {
+	.ndo_open                = mlx5e_open,
+	.ndo_stop                = mlx5e_close,
+	.ndo_start_xmit          = mlx5e_xmit,
+	.ndo_setup_tc            = mlx5e_ndo_setup_tc,
+	.ndo_select_queue        = mlx5e_select_queue,
+	.ndo_get_stats64         = mlx5e_get_stats,
+	.ndo_set_rx_mode         = mlx5e_set_rx_mode,
+	.ndo_set_mac_address     = mlx5e_set_mac,
+	.ndo_vlan_rx_add_vid     = mlx5e_vlan_rx_add_vid,
+	.ndo_vlan_rx_kill_vid    = mlx5e_vlan_rx_kill_vid,
+	.ndo_set_features        = mlx5e_set_features,
+	.ndo_change_mtu          = mlx5e_change_mtu,
+	.ndo_do_ioctl            = mlx5e_ioctl,
+	.ndo_set_tx_maxrate      = mlx5e_set_tx_maxrate,
+#ifdef CONFIG_RFS_ACCEL
+	.ndo_rx_flow_steer	 = mlx5e_rx_flow_steer,
+#endif
+	.ndo_tx_timeout          = mlx5e_tx_timeout,
+	.ndo_xdp		 = mlx5e_xdp,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller     = mlx5e_netpoll,
+#endif
+};
+
+static const struct net_device_ops mlx5e_netdev_ops_sriov = {
+	.ndo_open                = mlx5e_open,
+	.ndo_stop                = mlx5e_close,
+	.ndo_start_xmit          = mlx5e_xmit,
+	.ndo_setup_tc            = mlx5e_ndo_setup_tc,
+	.ndo_select_queue        = mlx5e_select_queue,
+	.ndo_get_stats64         = mlx5e_get_stats,
+	.ndo_set_rx_mode         = mlx5e_set_rx_mode,
+	.ndo_set_mac_address     = mlx5e_set_mac,
+	.ndo_vlan_rx_add_vid     = mlx5e_vlan_rx_add_vid,
+	.ndo_vlan_rx_kill_vid    = mlx5e_vlan_rx_kill_vid,
+	.ndo_set_features        = mlx5e_set_features,
+	.ndo_change_mtu          = mlx5e_change_mtu,
+	.ndo_do_ioctl            = mlx5e_ioctl,
+	.ndo_udp_tunnel_add	 = mlx5e_add_vxlan_port,
+	.ndo_udp_tunnel_del	 = mlx5e_del_vxlan_port,
+	.ndo_set_tx_maxrate      = mlx5e_set_tx_maxrate,
+	.ndo_features_check      = mlx5e_features_check,
+#ifdef CONFIG_RFS_ACCEL
+	.ndo_rx_flow_steer	 = mlx5e_rx_flow_steer,
+#endif
+	.ndo_set_vf_mac          = mlx5e_set_vf_mac,
+	.ndo_set_vf_vlan         = mlx5e_set_vf_vlan,
+	.ndo_set_vf_spoofchk     = mlx5e_set_vf_spoofchk,
+	.ndo_set_vf_trust        = mlx5e_set_vf_trust,
+	.ndo_set_vf_rate         = mlx5e_set_vf_rate,
+	.ndo_get_vf_config       = mlx5e_get_vf_config,
+	.ndo_set_vf_link_state   = mlx5e_set_vf_link_state,
+	.ndo_get_vf_stats        = mlx5e_get_vf_stats,
+	.ndo_tx_timeout          = mlx5e_tx_timeout,
+	.ndo_xdp		 = mlx5e_xdp,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller     = mlx5e_netpoll,
+#endif
+	.ndo_has_offload_stats	 = mlx5e_has_offload_stats,
+	.ndo_get_offload_stats	 = mlx5e_get_offload_stats,
+};
+
+static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
+{
+	if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+		return -ENOTSUPP;
+	if (!MLX5_CAP_GEN(mdev, eth_net_offloads) ||
+	    !MLX5_CAP_GEN(mdev, nic_flow_table) ||
+	    !MLX5_CAP_ETH(mdev, csum_cap) ||
+	    !MLX5_CAP_ETH(mdev, max_lso_cap) ||
+	    !MLX5_CAP_ETH(mdev, vlan_cap) ||
+	    !MLX5_CAP_ETH(mdev, rss_ind_tbl_cap) ||
+	    MLX5_CAP_FLOWTABLE(mdev,
+			       flow_table_properties_nic_receive.max_ft_level)
+			       < 3) {
+		mlx5_core_warn(mdev,
+			       "Not creating net device, some required device capabilities are missing\n");
+		return -ENOTSUPP;
+	}
+	if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
+		mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");
+	if (!MLX5_CAP_GEN(mdev, cq_moderation))
+		mlx5_core_warn(mdev, "CQ modiration is not supported\n");
+
+	return 0;
+}
+
+u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev)
+{
+	int bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
+
+	return bf_buf_size -
+	       sizeof(struct mlx5e_tx_wqe) +
+	       2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/;
+}
+
+void mlx5e_build_default_indir_rqt(struct mlx5_core_dev *mdev,
+				   u32 *indirection_rqt, int len,
+				   int num_channels)
+{
+	int node = mdev->priv.numa_node;
+	int node_num_of_cores;
+	int i;
+
+	if (node == -1)
+		node = first_online_node;
+
+	node_num_of_cores = cpumask_weight(cpumask_of_node(node));
+
+	if (node_num_of_cores)
+		num_channels = min_t(int, num_channels, node_num_of_cores);
+
+	for (i = 0; i < len; i++)
+		indirection_rqt[i] = i % num_channels;
+}
+
+static int mlx5e_get_pci_bw(struct mlx5_core_dev *mdev, u32 *pci_bw)
+{
+	enum pcie_link_width width;
+	enum pci_bus_speed speed;
+	int err = 0;
+
+	err = pcie_get_minimum_link(mdev->pdev, &speed, &width);
+	if (err)
+		return err;
+
+	if (speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN)
+		return -EINVAL;
+
+	switch (speed) {
+	case PCIE_SPEED_2_5GT:
+		*pci_bw = 2500 * width;
+		break;
+	case PCIE_SPEED_5_0GT:
+		*pci_bw = 5000 * width;
+		break;
+	case PCIE_SPEED_8_0GT:
+		*pci_bw = 8000 * width;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static bool cqe_compress_heuristic(u32 link_speed, u32 pci_bw)
+{
+	return (link_speed && pci_bw &&
+		(pci_bw < 40000) && (pci_bw < link_speed));
+}
+
+void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
+{
+	params->rx_cq_period_mode = cq_period_mode;
+
+	params->rx_cq_moderation.pkts =
+		MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
+	params->rx_cq_moderation.usec =
+			MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
+
+	if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
+		params->rx_cq_moderation.usec =
+			MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE;
+}
+
+static void mlx5e_query_min_inline(struct mlx5_core_dev *mdev,
+				   u8 *min_inline_mode)
+{
+	switch (MLX5_CAP_ETH(mdev, wqe_inline_mode)) {
+	case MLX5_CAP_INLINE_MODE_L2:
+		*min_inline_mode = MLX5_INLINE_MODE_L2;
+		break;
+	case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
+		mlx5_query_nic_vport_min_inline(mdev, 0, min_inline_mode);
+		break;
+	case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
+		*min_inline_mode = MLX5_INLINE_MODE_NONE;
+		break;
+	}
+}
+
+u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
+{
+	int i;
+
+	/* The supported periods are organized in ascending order */
+	for (i = 0; i < MLX5E_LRO_TIMEOUT_ARR_SIZE - 1; i++)
+		if (MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]) >= wanted_timeout)
+			break;
+
+	return MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]);
+}
+
+static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
+					struct net_device *netdev,
+					const struct mlx5e_profile *profile,
+					void *ppriv)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+	u32 link_speed = 0;
+	u32 pci_bw = 0;
+	u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
+					 MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
+					 MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
+
+	priv->mdev                         = mdev;
+	priv->netdev                       = netdev;
+	priv->params.num_channels          = profile->max_nch(mdev);
+	priv->profile                      = profile;
+	priv->ppriv                        = ppriv;
+
+	priv->params.lro_timeout =
+		mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
+
+	priv->params.log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
+
+	/* set CQE compression */
+	priv->params.rx_cqe_compress_def = false;
+	if (MLX5_CAP_GEN(mdev, cqe_compression) &&
+	    MLX5_CAP_GEN(mdev, vport_group_manager)) {
+		mlx5e_get_max_linkspeed(mdev, &link_speed);
+		mlx5e_get_pci_bw(mdev, &pci_bw);
+		mlx5_core_dbg(mdev, "Max link speed = %d, PCI BW = %d\n",
+			      link_speed, pci_bw);
+		priv->params.rx_cqe_compress_def =
+			cqe_compress_heuristic(link_speed, pci_bw);
+	}
+
+	mlx5e_set_rq_priv_params(priv);
+	if (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
+		priv->params.lro_en = true;
+
+	priv->params.rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
+	mlx5e_set_rx_cq_mode_params(&priv->params, cq_period_mode);
+
+	priv->params.tx_cq_moderation.usec =
+		MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
+	priv->params.tx_cq_moderation.pkts =
+		MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
+	priv->params.tx_max_inline         = mlx5e_get_max_inline_cap(mdev);
+	mlx5e_query_min_inline(mdev, &priv->params.tx_min_inline_mode);
+	priv->params.num_tc                = 1;
+	priv->params.rss_hfunc             = ETH_RSS_HASH_XOR;
+
+	netdev_rss_key_fill(priv->params.toeplitz_hash_key,
+			    sizeof(priv->params.toeplitz_hash_key));
+
+	mlx5e_build_default_indir_rqt(mdev, priv->params.indirection_rqt,
+				      MLX5E_INDIR_RQT_SIZE, profile->max_nch(mdev));
+
+	priv->params.lro_wqe_sz =
+		MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ -
+		/* Extra room needed for build_skb */
+		MLX5_RX_HEADROOM -
+		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+	/* Initialize pflags */
+	MLX5E_SET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_BASED_MODER,
+			priv->params.rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
+	MLX5E_SET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS, priv->params.rx_cqe_compress_def);
+
+	mutex_init(&priv->state_lock);
+
+	INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
+	INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
+	INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work);
+	INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
+}
+
+static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+
+	mlx5_query_nic_vport_mac_address(priv->mdev, 0, netdev->dev_addr);
+	if (is_zero_ether_addr(netdev->dev_addr) &&
+	    !MLX5_CAP_GEN(priv->mdev, vport_group_manager)) {
+		eth_hw_addr_random(netdev);
+		mlx5_core_info(priv->mdev, "Assigned random MAC address %pM\n", netdev->dev_addr);
+	}
+}
+
+static const struct switchdev_ops mlx5e_switchdev_ops = {
+	.switchdev_port_attr_get	= mlx5e_attr_get,
+};
+
+static void mlx5e_build_nic_netdev(struct net_device *netdev)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+	struct mlx5_core_dev *mdev = priv->mdev;
+	bool fcs_supported;
+	bool fcs_enabled;
+
+	SET_NETDEV_DEV(netdev, &mdev->pdev->dev);
+
+	if (MLX5_CAP_GEN(mdev, vport_group_manager)) {
+		netdev->netdev_ops = &mlx5e_netdev_ops_sriov;
+#ifdef CONFIG_MLX5_CORE_EN_DCB
+		if (MLX5_CAP_GEN(mdev, qos))
+			netdev->dcbnl_ops = &mlx5e_dcbnl_ops;
+#endif
+	} else {
+		netdev->netdev_ops = &mlx5e_netdev_ops_basic;
+	}
+
+	netdev->watchdog_timeo    = 15 * HZ;
+
+	netdev->ethtool_ops	  = &mlx5e_ethtool_ops;
+
+	netdev->vlan_features    |= NETIF_F_SG;
+	netdev->vlan_features    |= NETIF_F_IP_CSUM;
+	netdev->vlan_features    |= NETIF_F_IPV6_CSUM;
+	netdev->vlan_features    |= NETIF_F_GRO;
+	netdev->vlan_features    |= NETIF_F_TSO;
+	netdev->vlan_features    |= NETIF_F_TSO6;
+	netdev->vlan_features    |= NETIF_F_RXCSUM;
+	netdev->vlan_features    |= NETIF_F_RXHASH;
+
+	if (!!MLX5_CAP_ETH(mdev, lro_cap))
+		netdev->vlan_features    |= NETIF_F_LRO;
+
+	netdev->hw_features       = netdev->vlan_features;
+	netdev->hw_features      |= NETIF_F_HW_VLAN_CTAG_TX;
+	netdev->hw_features      |= NETIF_F_HW_VLAN_CTAG_RX;
+	netdev->hw_features      |= NETIF_F_HW_VLAN_CTAG_FILTER;
+
+	if (mlx5e_vxlan_allowed(mdev)) {
+		netdev->hw_features     |= NETIF_F_GSO_UDP_TUNNEL |
+					   NETIF_F_GSO_UDP_TUNNEL_CSUM |
+					   NETIF_F_GSO_PARTIAL;
+		netdev->hw_enc_features |= NETIF_F_IP_CSUM;
+		netdev->hw_enc_features |= NETIF_F_IPV6_CSUM;
+		netdev->hw_enc_features |= NETIF_F_TSO;
+		netdev->hw_enc_features |= NETIF_F_TSO6;
+		netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL;
+		netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM |
+					   NETIF_F_GSO_PARTIAL;
+		netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
+	}
+
+	mlx5_query_port_fcs(mdev, &fcs_supported, &fcs_enabled);
+
+	if (fcs_supported)
+		netdev->hw_features |= NETIF_F_RXALL;
+
+	netdev->features          = netdev->hw_features;
+	if (!priv->params.lro_en)
+		netdev->features  &= ~NETIF_F_LRO;
+
+	if (fcs_enabled)
+		netdev->features  &= ~NETIF_F_RXALL;
+
+#define FT_CAP(f) MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.f)
+	if (FT_CAP(flow_modify_en) &&
+	    FT_CAP(modify_root) &&
+	    FT_CAP(identified_miss_table_mode) &&
+	    FT_CAP(flow_table_modify)) {
+		netdev->hw_features      |= NETIF_F_HW_TC;
+#ifdef CONFIG_RFS_ACCEL
+		netdev->hw_features	 |= NETIF_F_NTUPLE;
+#endif
+	}
+
+	netdev->features         |= NETIF_F_HIGHDMA;
+
+	netdev->priv_flags       |= IFF_UNICAST_FLT;
+
+	mlx5e_set_netdev_dev_addr(netdev);
+
+#ifdef CONFIG_NET_SWITCHDEV
+	if (MLX5_CAP_GEN(mdev, vport_group_manager))
+		netdev->switchdev_ops = &mlx5e_switchdev_ops;
+#endif
+}
+
+static void mlx5e_create_q_counter(struct mlx5e_priv *priv)
+{
+	struct mlx5_core_dev *mdev = priv->mdev;
+	int err;
+
+	err = mlx5_core_alloc_q_counter(mdev, &priv->q_counter);
+	if (err) {
+		mlx5_core_warn(mdev, "alloc queue counter failed, %d\n", err);
+		priv->q_counter = 0;
+	}
+}
+
+static void mlx5e_destroy_q_counter(struct mlx5e_priv *priv)
+{
+	if (!priv->q_counter)
+		return;
+
+	mlx5_core_dealloc_q_counter(priv->mdev, priv->q_counter);
+}
+
+static void mlx5e_nic_init(struct mlx5_core_dev *mdev,
+			   struct net_device *netdev,
+			   const struct mlx5e_profile *profile,
+			   void *ppriv)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+
+	mlx5e_build_nic_netdev_priv(mdev, netdev, profile, ppriv);
+	mlx5e_build_nic_netdev(netdev);
+	mlx5e_vxlan_init(priv);
+}
+
+static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
+{
+	struct mlx5_core_dev *mdev = priv->mdev;
+	struct mlx5_eswitch *esw = mdev->priv.eswitch;
+
+	mlx5e_vxlan_cleanup(priv);
+
+	if (MLX5_CAP_GEN(mdev, vport_group_manager))
+		mlx5_eswitch_unregister_vport_rep(esw, 0);
+
+	if (priv->xdp_prog)
+		bpf_prog_put(priv->xdp_prog);
+}
+
+static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
+{
+	struct mlx5_core_dev *mdev = priv->mdev;
+	int err;
+	int i;
+
+	err = mlx5e_create_indirect_rqts(priv);
+	if (err) {
+		mlx5_core_warn(mdev, "create indirect rqts failed, %d\n", err);
+		return err;
+	}
+
+	err = mlx5e_create_direct_rqts(priv);
+	if (err) {
+		mlx5_core_warn(mdev, "create direct rqts failed, %d\n", err);
+		goto err_destroy_indirect_rqts;
+	}
+
+	err = mlx5e_create_indirect_tirs(priv);
+	if (err) {
+		mlx5_core_warn(mdev, "create indirect tirs failed, %d\n", err);
+		goto err_destroy_direct_rqts;
+	}
+
+	err = mlx5e_create_direct_tirs(priv);
+	if (err) {
+		mlx5_core_warn(mdev, "create direct tirs failed, %d\n", err);
+		goto err_destroy_indirect_tirs;
+	}
+
+	err = mlx5e_create_flow_steering(priv);
+	if (err) {
+		mlx5_core_warn(mdev, "create flow steering failed, %d\n", err);
+		goto err_destroy_direct_tirs;
+	}
+
+	err = mlx5e_tc_init(priv);
+	if (err)
+		goto err_destroy_flow_steering;
+
+	return 0;
+
+err_destroy_flow_steering:
+	mlx5e_destroy_flow_steering(priv);
+err_destroy_direct_tirs:
+	mlx5e_destroy_direct_tirs(priv);
+err_destroy_indirect_tirs:
+	mlx5e_destroy_indirect_tirs(priv);
+err_destroy_direct_rqts:
+	for (i = 0; i < priv->profile->max_nch(mdev); i++)
+		mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt);
+err_destroy_indirect_rqts:
+	mlx5e_destroy_rqt(priv, &priv->indir_rqt);
+	return err;
+}
+
+static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
+{
+	int i;
+
+	mlx5e_tc_cleanup(priv);
+	mlx5e_destroy_flow_steering(priv);
+	mlx5e_destroy_direct_tirs(priv);
+	mlx5e_destroy_indirect_tirs(priv);
+	for (i = 0; i < priv->profile->max_nch(priv->mdev); i++)
+		mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt);
+	mlx5e_destroy_rqt(priv, &priv->indir_rqt);
+}
+
+static int mlx5e_init_nic_tx(struct mlx5e_priv *priv)
+{
+	int err;
+
+	err = mlx5e_create_tises(priv);
+	if (err) {
+		mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
+		return err;
+	}
+
+#ifdef CONFIG_MLX5_CORE_EN_DCB
+	mlx5e_dcbnl_initialize(priv);
+#endif
+	return 0;
+}
+
+static void mlx5e_nic_enable(struct mlx5e_priv *priv)
+{
+	struct net_device *netdev = priv->netdev;
+	struct mlx5_core_dev *mdev = priv->mdev;
+	struct mlx5_eswitch *esw = mdev->priv.eswitch;
+	struct mlx5_eswitch_rep rep;
+
+	mlx5_lag_add(mdev, netdev);
+
+	if (mlx5e_vxlan_allowed(mdev)) {
+		rtnl_lock();
+		udp_tunnel_get_rx_info(netdev);
+		rtnl_unlock();
+	}
+
+	mlx5e_enable_async_events(priv);
+	queue_work(priv->wq, &priv->set_rx_mode_work);
+
+	if (MLX5_CAP_GEN(mdev, vport_group_manager)) {
+		mlx5_query_nic_vport_mac_address(mdev, 0, rep.hw_id);
+		rep.load = mlx5e_nic_rep_load;
+		rep.unload = mlx5e_nic_rep_unload;
+		rep.vport = FDB_UPLINK_VPORT;
+		rep.netdev = netdev;
+		mlx5_eswitch_register_vport_rep(esw, 0, &rep);
+	}
+}
+
+static void mlx5e_nic_disable(struct mlx5e_priv *priv)
+{
+	queue_work(priv->wq, &priv->set_rx_mode_work);
+	mlx5e_disable_async_events(priv);
+	mlx5_lag_remove(priv->mdev);
+}
+
+static const struct mlx5e_profile mlx5e_nic_profile = {
+	.init		   = mlx5e_nic_init,
+	.cleanup	   = mlx5e_nic_cleanup,
+	.init_rx	   = mlx5e_init_nic_rx,
+	.cleanup_rx	   = mlx5e_cleanup_nic_rx,
+	.init_tx	   = mlx5e_init_nic_tx,
+	.cleanup_tx	   = mlx5e_cleanup_nic_tx,
+	.enable		   = mlx5e_nic_enable,
+	.disable	   = mlx5e_nic_disable,
+	.update_stats	   = mlx5e_update_stats,
+	.max_nch	   = mlx5e_get_max_num_channels,
+	.max_tc		   = MLX5E_MAX_NUM_TC,
+};
+
+struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
+				       const struct mlx5e_profile *profile,
+				       void *ppriv)
+{
+	int nch = profile->max_nch(mdev);
+	struct net_device *netdev;
+	struct mlx5e_priv *priv;
+
+	netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv),
+				    nch * profile->max_tc,
+				    nch);
+	if (!netdev) {
+		mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
+		return NULL;
+	}
+
+	profile->init(mdev, netdev, profile, ppriv);
+
+	netif_carrier_off(netdev);
+
+	priv = netdev_priv(netdev);
+
+	priv->wq = create_singlethread_workqueue("mlx5e");
+	if (!priv->wq)
+		goto err_cleanup_nic;
+
+	return netdev;
+
+err_cleanup_nic:
+	profile->cleanup(priv);
+	free_netdev(netdev);
+
+	return NULL;
+}
+
+int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev)
+{
+	const struct mlx5e_profile *profile;
+	struct mlx5e_priv *priv;
+	u16 max_mtu;
+	int err;
+
+	priv = netdev_priv(netdev);
+	profile = priv->profile;
+	clear_bit(MLX5E_STATE_DESTROYING, &priv->state);
+
+	err = profile->init_tx(priv);
+	if (err)
+		goto out;
+
+	err = mlx5e_open_drop_rq(priv);
+	if (err) {
+		mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
+		goto err_cleanup_tx;
+	}
+
+	err = profile->init_rx(priv);
+	if (err)
+		goto err_close_drop_rq;
+
+	mlx5e_create_q_counter(priv);
+
+	mlx5e_init_l2_addr(priv);
+
+	/* MTU range: 68 - hw-specific max */
+	netdev->min_mtu = ETH_MIN_MTU;
+	mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1);
+	netdev->max_mtu = MLX5E_HW2SW_MTU(max_mtu);
+
+	mlx5e_set_dev_port_mtu(netdev);
+
+	if (profile->enable)
+		profile->enable(priv);
+
+	rtnl_lock();
+	if (netif_running(netdev))
+		mlx5e_open(netdev);
+	netif_device_attach(netdev);
+	rtnl_unlock();
+
+	return 0;
+
+err_close_drop_rq:
+	mlx5e_close_drop_rq(priv);
+
+err_cleanup_tx:
+	profile->cleanup_tx(priv);
+
+out:
+	return err;
+}
+
+static void mlx5e_register_vport_rep(struct mlx5_core_dev *mdev)
+{
+	struct mlx5_eswitch *esw = mdev->priv.eswitch;
+	int total_vfs = MLX5_TOTAL_VPORTS(mdev);
+	int vport;
+	u8 mac[ETH_ALEN];
+
+	if (!MLX5_CAP_GEN(mdev, vport_group_manager))
+		return;
+
+	mlx5_query_nic_vport_mac_address(mdev, 0, mac);
+
+	for (vport = 1; vport < total_vfs; vport++) {
+		struct mlx5_eswitch_rep rep;
+
+		rep.load = mlx5e_vport_rep_load;
+		rep.unload = mlx5e_vport_rep_unload;
+		rep.vport = vport;
+		ether_addr_copy(rep.hw_id, mac);
+		mlx5_eswitch_register_vport_rep(esw, vport, &rep);
+	}
+}
+
+void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+	const struct mlx5e_profile *profile = priv->profile;
+
+	set_bit(MLX5E_STATE_DESTROYING, &priv->state);
+	if (profile->disable)
+		profile->disable(priv);
+
+	flush_workqueue(priv->wq);
+
+	rtnl_lock();
+	if (netif_running(netdev))
+		mlx5e_close(netdev);
+	netif_device_detach(netdev);
+	rtnl_unlock();
+
+	mlx5e_destroy_q_counter(priv);
+	profile->cleanup_rx(priv);
+	mlx5e_close_drop_rq(priv);
+	profile->cleanup_tx(priv);
+	cancel_delayed_work_sync(&priv->update_stats_work);
+}
+
+/* mlx5e_attach and mlx5e_detach scope should be only creating/destroying
+ * hardware contexts and to connect it to the current netdev.
+ */
+static int mlx5e_attach(struct mlx5_core_dev *mdev, void *vpriv)
+{
+	struct mlx5e_priv *priv = vpriv;
+	struct net_device *netdev = priv->netdev;
+	int err;
+
+	if (netif_device_present(netdev))
+		return 0;
+
+	err = mlx5e_create_mdev_resources(mdev);
+	if (err)
+		return err;
+
+	err = mlx5e_attach_netdev(mdev, netdev);
+	if (err) {
+		mlx5e_destroy_mdev_resources(mdev);
+		return err;
+	}
+
+	return 0;
+}
+
+static void mlx5e_detach(struct mlx5_core_dev *mdev, void *vpriv)
+{
+	struct mlx5e_priv *priv = vpriv;
+	struct net_device *netdev = priv->netdev;
+
+	if (!netif_device_present(netdev))
+		return;
+
+	mlx5e_detach_netdev(mdev, netdev);
+	mlx5e_destroy_mdev_resources(mdev);
+}
+
+static void *mlx5e_add(struct mlx5_core_dev *mdev)
+{
+	struct mlx5_eswitch *esw = mdev->priv.eswitch;
+	int total_vfs = MLX5_TOTAL_VPORTS(mdev);
+	void *ppriv = NULL;
+	void *priv;
+	int vport;
+	int err;
+	struct net_device *netdev;
+
+	err = mlx5e_check_required_hca_cap(mdev);
+	if (err)
+		return NULL;
+
+	mlx5e_register_vport_rep(mdev);
+
+	if (MLX5_CAP_GEN(mdev, vport_group_manager))
+		ppriv = &esw->offloads.vport_reps[0];
+
+	netdev = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, ppriv);
+	if (!netdev) {
+		mlx5_core_err(mdev, "mlx5e_create_netdev failed\n");
+		goto err_unregister_reps;
+	}
+
+	priv = netdev_priv(netdev);
+
+	err = mlx5e_attach(mdev, priv);
+	if (err) {
+		mlx5_core_err(mdev, "mlx5e_attach failed, %d\n", err);
+		goto err_destroy_netdev;
+	}
+
+	err = register_netdev(netdev);
+	if (err) {
+		mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
+		goto err_detach;
+	}
+
+	return priv;
+
+err_detach:
+	mlx5e_detach(mdev, priv);
+
+err_destroy_netdev:
+	mlx5e_destroy_netdev(mdev, priv);
+
+err_unregister_reps:
+	for (vport = 1; vport < total_vfs; vport++)
+		mlx5_eswitch_unregister_vport_rep(esw, vport);
+
+	return NULL;
+}
+
+void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv)
+{
+	const struct mlx5e_profile *profile = priv->profile;
+	struct net_device *netdev = priv->netdev;
+
+	destroy_workqueue(priv->wq);
+	if (profile->cleanup)
+		profile->cleanup(priv);
+	free_netdev(netdev);
+}
+
+static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
+{
+	struct mlx5_eswitch *esw = mdev->priv.eswitch;
+	int total_vfs = MLX5_TOTAL_VPORTS(mdev);
+	struct mlx5e_priv *priv = vpriv;
+	int vport;
+
+	for (vport = 1; vport < total_vfs; vport++)
+		mlx5_eswitch_unregister_vport_rep(esw, vport);
+
+	unregister_netdev(priv->netdev);
+	mlx5e_detach(mdev, vpriv);
+	mlx5e_destroy_netdev(mdev, priv);
+}
+
+static void *mlx5e_get_netdev(void *vpriv)
+{
+	struct mlx5e_priv *priv = vpriv;
+
+	return priv->netdev;
+}
+
+static struct mlx5_interface mlx5e_interface = {
+	.add       = mlx5e_add,
+	.remove    = mlx5e_remove,
+	.attach    = mlx5e_attach,
+	.detach    = mlx5e_detach,
+	.event     = mlx5e_async_event,
+	.protocol  = MLX5_INTERFACE_PROTOCOL_ETH,
+	.get_dev   = mlx5e_get_netdev,
+};
+
+void mlx5e_init(void)
+{
+	mlx5e_build_ptys2ethtool_map();
+	mlx5_register_interface(&mlx5e_interface);
+}
+
+void mlx5e_cleanup(void)
+{
+	mlx5_unregister_interface(&mlx5e_interface);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/en/rep.c b/drivers/net/ethernet/mellanox/mlx5/en/rep.c
new file mode 100644
index 0000000..72926bd
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/en/rep.c
@@ -0,0 +1,604 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <generated/utsrelease.h>
+#include "lib/fs.h"
+#include <net/switchdev.h>
+#include <net/pkt_cls.h>
+
+#include "sriov/eswitch.h"
+#include "en/en.h"
+#include "en/tc.h"
+
+static const char mlx5e_rep_driver_name[] = "mlx5e_rep";
+
+static void mlx5e_rep_get_drvinfo(struct net_device *dev,
+				  struct ethtool_drvinfo *drvinfo)
+{
+	strlcpy(drvinfo->driver, mlx5e_rep_driver_name,
+		sizeof(drvinfo->driver));
+	strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
+}
+
+static const struct counter_desc sw_rep_stats_desc[] = {
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
+};
+
+#define NUM_VPORT_REP_COUNTERS	ARRAY_SIZE(sw_rep_stats_desc)
+
+static void mlx5e_rep_get_strings(struct net_device *dev,
+				  u32 stringset, uint8_t *data)
+{
+	int i;
+
+	switch (stringset) {
+	case ETH_SS_STATS:
+		for (i = 0; i < NUM_VPORT_REP_COUNTERS; i++)
+			strcpy(data + (i * ETH_GSTRING_LEN),
+			       sw_rep_stats_desc[i].format);
+		break;
+	}
+}
+
+static void mlx5e_rep_update_hw_counters(struct mlx5e_priv *priv)
+{
+	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+	struct mlx5_eswitch_rep *rep = priv->ppriv;
+	struct rtnl_link_stats64 *vport_stats;
+	struct ifla_vf_stats vf_stats;
+	int err;
+
+	err = mlx5_eswitch_get_vport_stats(esw, rep->vport, &vf_stats);
+	if (err) {
+		pr_warn("vport %d error %d reading stats\n", rep->vport, err);
+		return;
+	}
+
+	vport_stats = &priv->stats.vf_vport;
+	/* flip tx/rx as we are reporting the counters for the switch vport */
+	vport_stats->rx_packets = vf_stats.tx_packets;
+	vport_stats->rx_bytes   = vf_stats.tx_bytes;
+	vport_stats->tx_packets = vf_stats.rx_packets;
+	vport_stats->tx_bytes   = vf_stats.rx_bytes;
+}
+
+static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv)
+{
+	struct mlx5e_sw_stats *s = &priv->stats.sw;
+	struct mlx5e_rq_stats *rq_stats;
+	struct mlx5e_sq_stats *sq_stats;
+	int i, j;
+
+	memset(s, 0, sizeof(*s));
+	for (i = 0; i < priv->params.num_channels; i++) {
+		rq_stats = &priv->channel[i]->rq.stats;
+
+		s->rx_packets	+= rq_stats->packets;
+		s->rx_bytes	+= rq_stats->bytes;
+
+		for (j = 0; j < priv->params.num_tc; j++) {
+			sq_stats = &priv->channel[i]->sq[j].stats;
+
+			s->tx_packets		+= sq_stats->packets;
+			s->tx_bytes		+= sq_stats->bytes;
+		}
+	}
+}
+
+static void mlx5e_rep_update_stats(struct mlx5e_priv *priv)
+{
+	mlx5e_rep_update_sw_counters(priv);
+	mlx5e_rep_update_hw_counters(priv);
+}
+
+static void mlx5e_rep_get_ethtool_stats(struct net_device *dev,
+					struct ethtool_stats *stats, u64 *data)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+	int i;
+
+	if (!data)
+		return;
+
+	mutex_lock(&priv->state_lock);
+	if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+		mlx5e_rep_update_sw_counters(priv);
+	mutex_unlock(&priv->state_lock);
+
+	for (i = 0; i < NUM_VPORT_REP_COUNTERS; i++)
+		data[i] = MLX5E_READ_CTR64_CPU(&priv->stats.sw,
+					       sw_rep_stats_desc, i);
+}
+
+static int mlx5e_rep_get_sset_count(struct net_device *dev, int sset)
+{
+	switch (sset) {
+	case ETH_SS_STATS:
+		return NUM_VPORT_REP_COUNTERS;
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static const struct ethtool_ops mlx5e_rep_ethtool_ops = {
+	.get_drvinfo	   = mlx5e_rep_get_drvinfo,
+	.get_link	   = ethtool_op_get_link,
+	.get_strings       = mlx5e_rep_get_strings,
+	.get_sset_count    = mlx5e_rep_get_sset_count,
+	.get_ethtool_stats = mlx5e_rep_get_ethtool_stats,
+};
+
+int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+	struct mlx5_eswitch_rep *rep = priv->ppriv;
+	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+
+	if (esw->mode == SRIOV_NONE)
+		return -EOPNOTSUPP;
+
+	switch (attr->id) {
+	case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
+		attr->u.ppid.id_len = ETH_ALEN;
+		ether_addr_copy(attr->u.ppid.id, rep->hw_id);
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	return 0;
+}
+
+int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
+
+{
+	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+	struct mlx5_eswitch_rep *rep = priv->ppriv;
+	struct mlx5e_channel *c;
+	int n, tc, err, num_sqs = 0;
+	u16 *sqs;
+
+	sqs = kcalloc(priv->params.num_channels * priv->params.num_tc, sizeof(u16), GFP_KERNEL);
+	if (!sqs)
+		return -ENOMEM;
+
+	for (n = 0; n < priv->params.num_channels; n++) {
+		c = priv->channel[n];
+		for (tc = 0; tc < c->num_tc; tc++)
+			sqs[num_sqs++] = c->sq[tc].sqn;
+	}
+
+	err = mlx5_eswitch_sqs2vport_start(esw, rep, sqs, num_sqs);
+
+	kfree(sqs);
+	return err;
+}
+
+int mlx5e_nic_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
+{
+	struct net_device *netdev = rep->netdev;
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+
+	if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+		return mlx5e_add_sqs_fwd_rules(priv);
+	return 0;
+}
+
+void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv)
+{
+	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+	struct mlx5_eswitch_rep *rep = priv->ppriv;
+
+	mlx5_eswitch_sqs2vport_stop(esw, rep);
+}
+
+void mlx5e_nic_rep_unload(struct mlx5_eswitch *esw,
+			  struct mlx5_eswitch_rep *rep)
+{
+	struct net_device *netdev = rep->netdev;
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+
+	if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+		mlx5e_remove_sqs_fwd_rules(priv);
+
+	/* clean (and re-init) existing uplink offloaded TC rules */
+	mlx5e_tc_cleanup(priv);
+	mlx5e_tc_init(priv);
+}
+
+static int mlx5e_rep_open(struct net_device *dev)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+	struct mlx5_eswitch_rep *rep = priv->ppriv;
+	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+	int err;
+
+	err = mlx5e_open(dev);
+	if (err)
+		return err;
+
+	err = mlx5_eswitch_set_vport_state(esw, rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_UP);
+	if (!err)
+		netif_carrier_on(dev);
+
+	return 0;
+}
+
+static int mlx5e_rep_close(struct net_device *dev)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+	struct mlx5_eswitch_rep *rep = priv->ppriv;
+	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+
+	(void)mlx5_eswitch_set_vport_state(esw, rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_DOWN);
+
+	return mlx5e_close(dev);
+}
+
+static int mlx5e_rep_get_phys_port_name(struct net_device *dev,
+					char *buf, size_t len)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+	struct mlx5_eswitch_rep *rep = priv->ppriv;
+	int ret;
+
+	ret = snprintf(buf, len, "%d", rep->vport - 1);
+	if (ret >= len)
+		return -EOPNOTSUPP;
+
+	return 0;
+}
+
+static int mlx5e_rep_ndo_setup_tc(struct net_device *dev, u32 handle,
+				  __be16 proto, struct tc_to_netdev *tc)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+
+	if (TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS))
+		return -EOPNOTSUPP;
+
+	if (tc->egress_dev) {
+		struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+		struct net_device *uplink_dev = mlx5_eswitch_get_uplink_netdev(esw);
+
+		return uplink_dev->netdev_ops->ndo_setup_tc(uplink_dev, handle,
+							    proto, tc);
+	}
+
+	switch (tc->type) {
+	case TC_SETUP_CLSFLOWER:
+		switch (tc->cls_flower->command) {
+		case TC_CLSFLOWER_REPLACE:
+			return mlx5e_configure_flower(priv, proto, tc->cls_flower);
+		case TC_CLSFLOWER_DESTROY:
+			return mlx5e_delete_flower(priv, tc->cls_flower);
+		case TC_CLSFLOWER_STATS:
+			return mlx5e_stats_flower(priv, tc->cls_flower);
+		}
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv)
+{
+	struct mlx5_eswitch_rep *rep = (struct mlx5_eswitch_rep *)priv->ppriv;
+	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+
+	if (rep && rep->vport == FDB_UPLINK_VPORT && esw->mode == SRIOV_OFFLOADS)
+		return true;
+
+	return false;
+}
+
+bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv)
+{
+	struct mlx5_eswitch_rep *rep = (struct mlx5_eswitch_rep *)priv->ppriv;
+
+	if (rep && rep->vport != FDB_UPLINK_VPORT)
+		return true;
+
+	return false;
+}
+
+bool mlx5e_has_offload_stats(const struct net_device *dev, int attr_id)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+
+	switch (attr_id) {
+	case IFLA_OFFLOAD_XSTATS_CPU_HIT:
+		if (mlx5e_is_vf_vport_rep(priv) || mlx5e_is_uplink_rep(priv))
+			return true;
+	}
+
+	return false;
+}
+
+static int
+mlx5e_get_sw_stats64(const struct net_device *dev,
+		     struct rtnl_link_stats64 *stats)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+	struct mlx5e_sw_stats *sstats = &priv->stats.sw;
+
+	stats->rx_packets = sstats->rx_packets;
+	stats->rx_bytes   = sstats->rx_bytes;
+	stats->tx_packets = sstats->tx_packets;
+	stats->tx_bytes   = sstats->tx_bytes;
+
+	stats->tx_dropped = sstats->tx_queue_dropped;
+
+	return 0;
+}
+
+int mlx5e_get_offload_stats(int attr_id, const struct net_device *dev,
+			    void *sp)
+{
+	switch (attr_id) {
+	case IFLA_OFFLOAD_XSTATS_CPU_HIT:
+		return mlx5e_get_sw_stats64(dev, sp);
+	}
+
+	return -EINVAL;
+}
+
+static struct rtnl_link_stats64 *
+mlx5e_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+
+	memcpy(stats, &priv->stats.vf_vport, sizeof(*stats));
+	return stats;
+}
+
+static const struct switchdev_ops mlx5e_rep_switchdev_ops = {
+	.switchdev_port_attr_get	= mlx5e_attr_get,
+};
+
+static const struct net_device_ops mlx5e_netdev_ops_rep = {
+	.ndo_open                = mlx5e_rep_open,
+	.ndo_stop                = mlx5e_rep_close,
+	.ndo_start_xmit          = mlx5e_xmit,
+	.ndo_get_phys_port_name  = mlx5e_rep_get_phys_port_name,
+	.ndo_setup_tc            = mlx5e_rep_ndo_setup_tc,
+	.ndo_get_stats64         = mlx5e_rep_get_stats,
+	.ndo_udp_tunnel_add      = mlx5e_add_vxlan_port,
+	.ndo_udp_tunnel_del      = mlx5e_del_vxlan_port,
+	.ndo_has_offload_stats	 = mlx5e_has_offload_stats,
+	.ndo_get_offload_stats	 = mlx5e_get_offload_stats,
+};
+
+static void mlx5e_build_rep_netdev_priv(struct mlx5_core_dev *mdev,
+					struct net_device *netdev,
+					const struct mlx5e_profile *profile,
+					void *ppriv)
+{
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+	u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
+					 MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
+					 MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
+
+	priv->params.log_sq_size           =
+		MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
+	priv->params.rq_wq_type = MLX5_WQ_TYPE_LINKED_LIST;
+	priv->params.log_rq_size = MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE;
+
+	priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type,
+					    BIT(priv->params.log_rq_size));
+
+	priv->params.rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
+	mlx5e_set_rx_cq_mode_params(&priv->params, cq_period_mode);
+
+	priv->params.tx_max_inline         = mlx5e_get_max_inline_cap(mdev);
+	priv->params.num_tc                = 1;
+
+	priv->params.lro_wqe_sz            =
+		MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
+
+	priv->mdev                         = mdev;
+	priv->netdev                       = netdev;
+	priv->params.num_channels          = profile->max_nch(mdev);
+	priv->profile                      = profile;
+	priv->ppriv                        = ppriv;
+
+	mutex_init(&priv->state_lock);
+
+	INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
+}
+
+static void mlx5e_build_rep_netdev(struct net_device *netdev)
+{
+	netdev->netdev_ops = &mlx5e_netdev_ops_rep;
+
+	netdev->watchdog_timeo    = 15 * HZ;
+
+	netdev->ethtool_ops	  = &mlx5e_rep_ethtool_ops;
+
+#ifdef CONFIG_NET_SWITCHDEV
+	netdev->switchdev_ops = &mlx5e_rep_switchdev_ops;
+#endif
+
+	netdev->features	 |= NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_TC | NETIF_F_NETNS_LOCAL;
+	netdev->hw_features      |= NETIF_F_HW_TC;
+
+	eth_hw_addr_random(netdev);
+}
+
+static void mlx5e_init_rep(struct mlx5_core_dev *mdev,
+			   struct net_device *netdev,
+			   const struct mlx5e_profile *profile,
+			   void *ppriv)
+{
+	mlx5e_build_rep_netdev_priv(mdev, netdev, profile, ppriv);
+	mlx5e_build_rep_netdev(netdev);
+}
+
+static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
+{
+	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+	struct mlx5_eswitch_rep *rep = priv->ppriv;
+	struct mlx5_core_dev *mdev = priv->mdev;
+	struct mlx5_flow_handle *flow_rule;
+	int err;
+	int i;
+
+	err = mlx5e_create_direct_rqts(priv);
+	if (err) {
+		mlx5_core_warn(mdev, "create direct rqts failed, %d\n", err);
+		return err;
+	}
+
+	err = mlx5e_create_direct_tirs(priv);
+	if (err) {
+		mlx5_core_warn(mdev, "create direct tirs failed, %d\n", err);
+		goto err_destroy_direct_rqts;
+	}
+
+	flow_rule = mlx5_eswitch_create_vport_rx_rule(esw,
+						      rep->vport,
+						      priv->direct_tir[0].tirn);
+	if (IS_ERR(flow_rule)) {
+		err = PTR_ERR(flow_rule);
+		goto err_destroy_direct_tirs;
+	}
+	rep->vport_rx_rule = flow_rule;
+
+	err = mlx5e_tc_init(priv);
+	if (err)
+		goto err_del_flow_rule;
+
+	return 0;
+
+err_del_flow_rule:
+	mlx5_del_flow_rules(rep->vport_rx_rule);
+err_destroy_direct_tirs:
+	mlx5e_destroy_direct_tirs(priv);
+err_destroy_direct_rqts:
+	for (i = 0; i < priv->params.num_channels; i++)
+		mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt);
+	return err;
+}
+
+static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
+{
+	struct mlx5_eswitch_rep *rep = priv->ppriv;
+	int i;
+
+	mlx5e_tc_cleanup(priv);
+	mlx5_del_flow_rules(rep->vport_rx_rule);
+	mlx5e_destroy_direct_tirs(priv);
+	for (i = 0; i < priv->params.num_channels; i++)
+		mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt);
+}
+
+static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
+{
+	int err;
+
+	err = mlx5e_create_tises(priv);
+	if (err) {
+		mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
+		return err;
+	}
+	return 0;
+}
+
+static int mlx5e_get_rep_max_num_channels(struct mlx5_core_dev *mdev)
+{
+#define	MLX5E_PORT_REPRESENTOR_NCH 1
+	return MLX5E_PORT_REPRESENTOR_NCH;
+}
+
+static struct mlx5e_profile mlx5e_rep_profile = {
+	.init			= mlx5e_init_rep,
+	.init_rx		= mlx5e_init_rep_rx,
+	.cleanup_rx		= mlx5e_cleanup_rep_rx,
+	.init_tx		= mlx5e_init_rep_tx,
+	.cleanup_tx		= mlx5e_cleanup_nic_tx,
+	.update_stats           = mlx5e_rep_update_stats,
+	.max_nch		= mlx5e_get_rep_max_num_channels,
+	.max_tc			= 1,
+};
+
+int mlx5e_vport_rep_load(struct mlx5_eswitch *esw,
+			 struct mlx5_eswitch_rep *rep)
+{
+	struct net_device *netdev;
+	int err;
+
+	netdev = mlx5e_create_netdev(esw->dev, &mlx5e_rep_profile, rep);
+	if (!netdev) {
+		pr_warn("Failed to create representor netdev for vport %d\n",
+			rep->vport);
+		return -EINVAL;
+	}
+
+	rep->netdev = netdev;
+
+	err = mlx5e_attach_netdev(esw->dev, netdev);
+	if (err) {
+		pr_warn("Failed to attach representor netdev for vport %d\n",
+			rep->vport);
+		goto err_destroy_netdev;
+	}
+
+	err = register_netdev(netdev);
+	if (err) {
+		pr_warn("Failed to register representor netdev for vport %d\n",
+			rep->vport);
+		goto err_detach_netdev;
+	}
+
+	return 0;
+
+err_detach_netdev:
+	mlx5e_detach_netdev(esw->dev, netdev);
+
+err_destroy_netdev:
+	mlx5e_destroy_netdev(esw->dev, netdev_priv(netdev));
+
+	return err;
+
+}
+
+void mlx5e_vport_rep_unload(struct mlx5_eswitch *esw,
+			    struct mlx5_eswitch_rep *rep)
+{
+	struct net_device *netdev = rep->netdev;
+
+	unregister_netdev(netdev);
+	mlx5e_detach_netdev(esw->dev, netdev);
+	mlx5e_destroy_netdev(esw->dev, netdev_priv(netdev));
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/en/rx.c b/drivers/net/ethernet/mellanox/mlx5/en/rx.c
new file mode 100644
index 0000000..edfbdb9
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/en/rx.c
@@ -0,0 +1,966 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include <net/busy_poll.h>
+#include "en/en.h"
+#include "en/tc.h"
+#include "sriov/eswitch.h"
+
+static inline bool mlx5e_rx_hw_stamp(struct mlx5e_tstamp *tstamp)
+{
+	return tstamp->hwtstamp_config.rx_filter == HWTSTAMP_FILTER_ALL;
+}
+
+static inline void mlx5e_read_cqe_slot(struct mlx5e_cq *cq, u32 cqcc,
+				       void *data)
+{
+	u32 ci = cqcc & cq->wq.sz_m1;
+
+	memcpy(data, mlx5_cqwq_get_wqe(&cq->wq, ci), sizeof(struct mlx5_cqe64));
+}
+
+static inline void mlx5e_read_title_slot(struct mlx5e_rq *rq,
+					 struct mlx5e_cq *cq, u32 cqcc)
+{
+	mlx5e_read_cqe_slot(cq, cqcc, &cq->title);
+	cq->decmprs_left        = be32_to_cpu(cq->title.byte_cnt);
+	cq->decmprs_wqe_counter = be16_to_cpu(cq->title.wqe_counter);
+	rq->stats.cqe_compress_blks++;
+}
+
+static inline void mlx5e_read_mini_arr_slot(struct mlx5e_cq *cq, u32 cqcc)
+{
+	mlx5e_read_cqe_slot(cq, cqcc, cq->mini_arr);
+	cq->mini_arr_idx = 0;
+}
+
+static inline void mlx5e_cqes_update_owner(struct mlx5e_cq *cq, u32 cqcc, int n)
+{
+	u8 op_own = (cqcc >> cq->wq.log_sz) & 1;
+	u32 wq_sz = 1 << cq->wq.log_sz;
+	u32 ci = cqcc & cq->wq.sz_m1;
+	u32 ci_top = min_t(u32, wq_sz, ci + n);
+
+	for (; ci < ci_top; ci++, n--) {
+		struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, ci);
+
+		cqe->op_own = op_own;
+	}
+
+	if (unlikely(ci == wq_sz)) {
+		op_own = !op_own;
+		for (ci = 0; ci < n; ci++) {
+			struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, ci);
+
+			cqe->op_own = op_own;
+		}
+	}
+}
+
+static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq,
+					struct mlx5e_cq *cq, u32 cqcc)
+{
+	u16 wqe_cnt_step;
+
+	cq->title.byte_cnt     = cq->mini_arr[cq->mini_arr_idx].byte_cnt;
+	cq->title.check_sum    = cq->mini_arr[cq->mini_arr_idx].checksum;
+	cq->title.op_own      &= 0xf0;
+	cq->title.op_own      |= 0x01 & (cqcc >> cq->wq.log_sz);
+	cq->title.wqe_counter  = cpu_to_be16(cq->decmprs_wqe_counter);
+
+	wqe_cnt_step =
+		rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ?
+		mpwrq_get_cqe_consumed_strides(&cq->title) : 1;
+	cq->decmprs_wqe_counter =
+		(cq->decmprs_wqe_counter + wqe_cnt_step) & rq->wq.sz_m1;
+}
+
+static inline void mlx5e_decompress_cqe_no_hash(struct mlx5e_rq *rq,
+						struct mlx5e_cq *cq, u32 cqcc)
+{
+	mlx5e_decompress_cqe(rq, cq, cqcc);
+	cq->title.rss_hash_type   = 0;
+	cq->title.rss_hash_result = 0;
+}
+
+static inline u32 mlx5e_decompress_cqes_cont(struct mlx5e_rq *rq,
+					     struct mlx5e_cq *cq,
+					     int update_owner_only,
+					     int budget_rem)
+{
+	u32 cqcc = cq->wq.cc + update_owner_only;
+	u32 cqe_count;
+	u32 i;
+
+	cqe_count = min_t(u32, cq->decmprs_left, budget_rem);
+
+	for (i = update_owner_only; i < cqe_count;
+	     i++, cq->mini_arr_idx++, cqcc++) {
+		if (cq->mini_arr_idx == MLX5_MINI_CQE_ARRAY_SIZE)
+			mlx5e_read_mini_arr_slot(cq, cqcc);
+
+		mlx5e_decompress_cqe_no_hash(rq, cq, cqcc);
+		rq->handle_rx_cqe(rq, &cq->title);
+	}
+	mlx5e_cqes_update_owner(cq, cq->wq.cc, cqcc - cq->wq.cc);
+	cq->wq.cc = cqcc;
+	cq->decmprs_left -= cqe_count;
+	rq->stats.cqe_compress_pkts += cqe_count;
+
+	return cqe_count;
+}
+
+static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq,
+					      struct mlx5e_cq *cq,
+					      int budget_rem)
+{
+	mlx5e_read_title_slot(rq, cq, cq->wq.cc);
+	mlx5e_read_mini_arr_slot(cq, cq->wq.cc + 1);
+	mlx5e_decompress_cqe(rq, cq, cq->wq.cc);
+	rq->handle_rx_cqe(rq, &cq->title);
+	cq->mini_arr_idx++;
+
+	return mlx5e_decompress_cqes_cont(rq, cq, 1, budget_rem) - 1;
+}
+
+void mlx5e_modify_rx_cqe_compression(struct mlx5e_priv *priv, bool val)
+{
+	bool was_opened;
+
+	if (!MLX5_CAP_GEN(priv->mdev, cqe_compression))
+		return;
+
+	mutex_lock(&priv->state_lock);
+
+	if (MLX5E_GET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS) == val)
+		goto unlock;
+
+	was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+	if (was_opened)
+		mlx5e_close_locked(priv->netdev);
+
+	MLX5E_SET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS, val);
+
+	if (was_opened)
+		mlx5e_open_locked(priv->netdev);
+
+unlock:
+	mutex_unlock(&priv->state_lock);
+}
+
+#define RQ_PAGE_SIZE(rq) ((1 << rq->buff.page_order) << PAGE_SHIFT)
+
+static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq,
+				      struct mlx5e_dma_info *dma_info)
+{
+	struct mlx5e_page_cache *cache = &rq->page_cache;
+	u32 tail_next = (cache->tail + 1) & (MLX5E_CACHE_SIZE - 1);
+
+	if (tail_next == cache->head) {
+		rq->stats.cache_full++;
+		return false;
+	}
+
+	cache->page_cache[cache->tail] = *dma_info;
+	cache->tail = tail_next;
+	return true;
+}
+
+static inline bool mlx5e_rx_cache_get(struct mlx5e_rq *rq,
+				      struct mlx5e_dma_info *dma_info)
+{
+	struct mlx5e_page_cache *cache = &rq->page_cache;
+
+	if (unlikely(cache->head == cache->tail)) {
+		rq->stats.cache_empty++;
+		return false;
+	}
+
+	if (page_ref_count(cache->page_cache[cache->head].page) != 1) {
+		rq->stats.cache_busy++;
+		return false;
+	}
+
+	*dma_info = cache->page_cache[cache->head];
+	cache->head = (cache->head + 1) & (MLX5E_CACHE_SIZE - 1);
+	rq->stats.cache_reuse++;
+
+	dma_sync_single_for_device(rq->pdev, dma_info->addr,
+				   RQ_PAGE_SIZE(rq),
+				   DMA_FROM_DEVICE);
+	return true;
+}
+
+static inline int mlx5e_page_alloc_mapped(struct mlx5e_rq *rq,
+					  struct mlx5e_dma_info *dma_info)
+{
+	struct page *page;
+
+	if (mlx5e_rx_cache_get(rq, dma_info))
+		return 0;
+
+	page = dev_alloc_pages(rq->buff.page_order);
+	if (unlikely(!page))
+		return -ENOMEM;
+
+	dma_info->page = page;
+	dma_info->addr = dma_map_page(rq->pdev, page, 0,
+				      RQ_PAGE_SIZE(rq), rq->buff.map_dir);
+	if (unlikely(dma_mapping_error(rq->pdev, dma_info->addr))) {
+		put_page(page);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info,
+			bool recycle)
+{
+	if (likely(recycle) && mlx5e_rx_cache_put(rq, dma_info))
+		return;
+
+	dma_unmap_page(rq->pdev, dma_info->addr, RQ_PAGE_SIZE(rq),
+		       rq->buff.map_dir);
+	put_page(dma_info->page);
+}
+
+int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix)
+{
+	struct mlx5e_dma_info *di = &rq->dma_info[ix];
+
+	if (unlikely(mlx5e_page_alloc_mapped(rq, di)))
+		return -ENOMEM;
+
+	wqe->data.addr = cpu_to_be64(di->addr + MLX5_RX_HEADROOM);
+	return 0;
+}
+
+void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix)
+{
+	struct mlx5e_dma_info *di = &rq->dma_info[ix];
+
+	mlx5e_page_release(rq, di, true);
+}
+
+static inline int mlx5e_mpwqe_strides_per_page(struct mlx5e_rq *rq)
+{
+	return rq->mpwqe_num_strides >> MLX5_MPWRQ_WQE_PAGE_ORDER;
+}
+
+static inline void mlx5e_add_skb_frag_mpwqe(struct mlx5e_rq *rq,
+					    struct sk_buff *skb,
+					    struct mlx5e_mpw_info *wi,
+					    u32 page_idx, u32 frag_offset,
+					    u32 len)
+{
+	unsigned int truesize =	ALIGN(len, rq->mpwqe_stride_sz);
+
+	dma_sync_single_for_cpu(rq->pdev,
+				wi->umr.dma_info[page_idx].addr + frag_offset,
+				len, DMA_FROM_DEVICE);
+	wi->skbs_frags[page_idx]++;
+	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+			wi->umr.dma_info[page_idx].page, frag_offset,
+			len, truesize);
+}
+
+static inline void
+mlx5e_copy_skb_header_mpwqe(struct device *pdev,
+			    struct sk_buff *skb,
+			    struct mlx5e_mpw_info *wi,
+			    u32 page_idx, u32 offset,
+			    u32 headlen)
+{
+	u16 headlen_pg = min_t(u32, headlen, PAGE_SIZE - offset);
+	struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[page_idx];
+	unsigned int len;
+
+	 /* Aligning len to sizeof(long) optimizes memcpy performance */
+	len = ALIGN(headlen_pg, sizeof(long));
+	dma_sync_single_for_cpu(pdev, dma_info->addr + offset, len,
+				DMA_FROM_DEVICE);
+	skb_copy_to_linear_data_offset(skb, 0,
+				       page_address(dma_info->page) + offset,
+				       len);
+	if (unlikely(offset + headlen > PAGE_SIZE)) {
+		dma_info++;
+		headlen_pg = len;
+		len = ALIGN(headlen - headlen_pg, sizeof(long));
+		dma_sync_single_for_cpu(pdev, dma_info->addr, len,
+					DMA_FROM_DEVICE);
+		skb_copy_to_linear_data_offset(skb, headlen_pg,
+					       page_address(dma_info->page),
+					       len);
+	}
+}
+
+static inline void mlx5e_post_umr_wqe(struct mlx5e_rq *rq, u16 ix)
+{
+	struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
+	struct mlx5e_sq *sq = &rq->channel->icosq;
+	struct mlx5_wq_cyc *wq = &sq->wq;
+	struct mlx5e_umr_wqe *wqe;
+	u8 num_wqebbs = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_BB);
+	u16 pi;
+
+	/* fill sq edge with nops to avoid wqe wrap around */
+	while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) {
+		sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP;
+		sq->db.ico_wqe[pi].num_wqebbs = 1;
+		mlx5e_send_nop(sq, false);
+	}
+
+	wqe = mlx5_wq_cyc_get_wqe(wq, pi);
+	memcpy(wqe, &wi->umr.wqe, sizeof(*wqe));
+	wqe->ctrl.opmod_idx_opcode =
+		cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
+			    MLX5_OPCODE_UMR);
+
+	sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_UMR;
+	sq->db.ico_wqe[pi].num_wqebbs = num_wqebbs;
+	sq->pc += num_wqebbs;
+	mlx5e_tx_notify_hw(sq, &wqe->ctrl, 0);
+}
+
+static int mlx5e_alloc_rx_umr_mpwqe(struct mlx5e_rq *rq,
+				    struct mlx5e_rx_wqe *wqe,
+				    u16 ix)
+{
+	struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
+	u64 dma_offset = (u64)mlx5e_get_wqe_mtt_offset(rq, ix) << PAGE_SHIFT;
+	int pg_strides = mlx5e_mpwqe_strides_per_page(rq);
+	int err;
+	int i;
+
+	for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) {
+		struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[i];
+
+		err = mlx5e_page_alloc_mapped(rq, dma_info);
+		if (unlikely(err))
+			goto err_unmap;
+		wi->umr.mtt[i] = cpu_to_be64(dma_info->addr | MLX5_EN_WR);
+		page_ref_add(dma_info->page, pg_strides);
+		wi->skbs_frags[i] = 0;
+	}
+
+	wi->consumed_strides = 0;
+	wqe->data.addr = cpu_to_be64(dma_offset);
+
+	return 0;
+
+err_unmap:
+	while (--i >= 0) {
+		struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[i];
+
+		page_ref_sub(dma_info->page, pg_strides);
+		mlx5e_page_release(rq, dma_info, true);
+	}
+
+	return err;
+}
+
+void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi)
+{
+	int pg_strides = mlx5e_mpwqe_strides_per_page(rq);
+	int i;
+
+	for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) {
+		struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[i];
+
+		page_ref_sub(dma_info->page, pg_strides - wi->skbs_frags[i]);
+		mlx5e_page_release(rq, dma_info, true);
+	}
+}
+
+void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq)
+{
+	struct mlx5_wq_ll *wq = &rq->wq;
+	struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head);
+
+	clear_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state);
+
+	if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state))) {
+		mlx5e_free_rx_mpwqe(rq, &rq->mpwqe.info[wq->head]);
+		return;
+	}
+
+	mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index));
+
+	/* ensure wqes are visible to device before updating doorbell record */
+	dma_wmb();
+
+	mlx5_wq_ll_update_db_record(wq);
+}
+
+int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix)
+{
+	int err;
+
+	err = mlx5e_alloc_rx_umr_mpwqe(rq, wqe, ix);
+	if (unlikely(err))
+		return err;
+	set_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state);
+	mlx5e_post_umr_wqe(rq, ix);
+	return -EBUSY;
+}
+
+void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
+{
+	struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
+
+	mlx5e_free_rx_mpwqe(rq, wi);
+}
+
+#define RQ_CANNOT_POST(rq) \
+	(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state) || \
+	 test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state))
+
+bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
+{
+	struct mlx5_wq_ll *wq = &rq->wq;
+
+	if (unlikely(RQ_CANNOT_POST(rq)))
+		return false;
+
+	while (!mlx5_wq_ll_is_full(wq)) {
+		struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head);
+		int err;
+
+		err = rq->alloc_wqe(rq, wqe, wq->head);
+		if (err == -EBUSY)
+			return true;
+		if (unlikely(err)) {
+			rq->stats.buff_alloc_err++;
+			break;
+		}
+
+		mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index));
+	}
+
+	/* ensure wqes are visible to device before updating doorbell record */
+	dma_wmb();
+
+	mlx5_wq_ll_update_db_record(wq);
+
+	return !mlx5_wq_ll_is_full(wq);
+}
+
+static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
+				 u32 cqe_bcnt)
+{
+	struct ethhdr	*eth = (struct ethhdr *)(skb->data);
+	struct iphdr	*ipv4;
+	struct ipv6hdr	*ipv6;
+	struct tcphdr	*tcp;
+	int network_depth = 0;
+	__be16 proto;
+	u16 tot_len;
+
+	u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe);
+	int tcp_ack = ((CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA  == l4_hdr_type) ||
+		       (CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA == l4_hdr_type));
+
+	skb->mac_len = ETH_HLEN;
+	proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth);
+
+	ipv4 = (struct iphdr *)(skb->data + network_depth);
+	ipv6 = (struct ipv6hdr *)(skb->data + network_depth);
+	tot_len = cqe_bcnt - network_depth;
+
+	if (proto == htons(ETH_P_IP)) {
+		tcp = (struct tcphdr *)(skb->data + network_depth +
+					sizeof(struct iphdr));
+		ipv6 = NULL;
+		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+	} else {
+		tcp = (struct tcphdr *)(skb->data + network_depth +
+					sizeof(struct ipv6hdr));
+		ipv4 = NULL;
+		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+	}
+
+	if (get_cqe_lro_tcppsh(cqe))
+		tcp->psh                = 1;
+
+	if (tcp_ack) {
+		tcp->ack                = 1;
+		tcp->ack_seq            = cqe->lro_ack_seq_num;
+		tcp->window             = cqe->lro_tcp_win;
+	}
+
+	if (ipv4) {
+		ipv4->ttl               = cqe->lro_min_ttl;
+		ipv4->tot_len           = cpu_to_be16(tot_len);
+		ipv4->check             = 0;
+		ipv4->check             = ip_fast_csum((unsigned char *)ipv4,
+						       ipv4->ihl);
+	} else {
+		ipv6->hop_limit         = cqe->lro_min_ttl;
+		ipv6->payload_len       = cpu_to_be16(tot_len -
+						      sizeof(struct ipv6hdr));
+	}
+}
+
+static inline void mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe,
+				      struct sk_buff *skb)
+{
+	u8 cht = cqe->rss_hash_type;
+	int ht = (cht & CQE_RSS_HTYPE_L4) ? PKT_HASH_TYPE_L4 :
+		 (cht & CQE_RSS_HTYPE_IP) ? PKT_HASH_TYPE_L3 :
+					    PKT_HASH_TYPE_NONE;
+	skb_set_hash(skb, be32_to_cpu(cqe->rss_hash_result), ht);
+}
+
+static inline bool is_first_ethertype_ip(struct sk_buff *skb)
+{
+	__be16 ethertype = ((struct ethhdr *)skb->data)->h_proto;
+
+	return (ethertype == htons(ETH_P_IP) || ethertype == htons(ETH_P_IPV6));
+}
+
+static inline void mlx5e_handle_csum(struct net_device *netdev,
+				     struct mlx5_cqe64 *cqe,
+				     struct mlx5e_rq *rq,
+				     struct sk_buff *skb,
+				     bool   lro)
+{
+	if (unlikely(!(netdev->features & NETIF_F_RXCSUM)))
+		goto csum_none;
+
+	if (lro) {
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+		return;
+	}
+
+	if (is_first_ethertype_ip(skb)) {
+		skb->ip_summed = CHECKSUM_COMPLETE;
+		skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
+		rq->stats.csum_complete++;
+		return;
+	}
+
+	if (likely((cqe->hds_ip_ext & CQE_L3_OK) &&
+		   (cqe->hds_ip_ext & CQE_L4_OK))) {
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+		if (cqe_is_tunneled(cqe)) {
+			skb->csum_level = 1;
+			skb->encapsulation = 1;
+			rq->stats.csum_unnecessary_inner++;
+		}
+		return;
+	}
+csum_none:
+	skb->ip_summed = CHECKSUM_NONE;
+	rq->stats.csum_none++;
+}
+
+static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
+				      u32 cqe_bcnt,
+				      struct mlx5e_rq *rq,
+				      struct sk_buff *skb)
+{
+	struct net_device *netdev = rq->netdev;
+	struct mlx5e_tstamp *tstamp = rq->tstamp;
+	int lro_num_seg;
+
+	lro_num_seg = be32_to_cpu(cqe->srqn) >> 24;
+	if (lro_num_seg > 1) {
+		mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
+		skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg);
+		rq->stats.lro_packets++;
+		rq->stats.lro_bytes += cqe_bcnt;
+	}
+
+	if (unlikely(mlx5e_rx_hw_stamp(tstamp)))
+		mlx5e_fill_hwstamp(tstamp, get_cqe_ts(cqe), skb_hwtstamps(skb));
+
+	skb_record_rx_queue(skb, rq->ix);
+
+	if (likely(netdev->features & NETIF_F_RXHASH))
+		mlx5e_skb_set_hash(cqe, skb);
+
+	if (cqe_has_vlan(cqe))
+		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+				       be16_to_cpu(cqe->vlan_info));
+
+	skb->mark = be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK;
+
+	mlx5e_handle_csum(netdev, cqe, rq, skb, !!lro_num_seg);
+	skb->protocol = eth_type_trans(skb, netdev);
+}
+
+static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
+					 struct mlx5_cqe64 *cqe,
+					 u32 cqe_bcnt,
+					 struct sk_buff *skb)
+{
+	rq->stats.packets++;
+	rq->stats.bytes += cqe_bcnt;
+	mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
+}
+
+static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_sq *sq)
+{
+	struct mlx5_wq_cyc *wq = &sq->wq;
+	struct mlx5e_tx_wqe *wqe;
+	u16 pi = (sq->pc - MLX5E_XDP_TX_WQEBBS) & wq->sz_m1; /* last pi */
+
+	wqe  = mlx5_wq_cyc_get_wqe(wq, pi);
+
+	wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
+	mlx5e_tx_notify_hw(sq, &wqe->ctrl, 0);
+}
+
+static inline void mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
+					struct mlx5e_dma_info *di,
+					unsigned int data_offset,
+					int len)
+{
+	struct mlx5e_sq          *sq   = &rq->channel->xdp_sq;
+	struct mlx5_wq_cyc       *wq   = &sq->wq;
+	u16                      pi    = sq->pc & wq->sz_m1;
+	struct mlx5e_tx_wqe      *wqe  = mlx5_wq_cyc_get_wqe(wq, pi);
+	struct mlx5e_sq_wqe_info *wi   = &sq->db.xdp.wqe_info[pi];
+
+	struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
+	struct mlx5_wqe_eth_seg  *eseg = &wqe->eth;
+	struct mlx5_wqe_data_seg *dseg;
+
+	dma_addr_t dma_addr  = di->addr + data_offset + MLX5E_XDP_MIN_INLINE;
+	unsigned int dma_len = len - MLX5E_XDP_MIN_INLINE;
+	void *data           = page_address(di->page) + data_offset;
+
+	if (unlikely(!mlx5e_sq_has_room_for(sq, MLX5E_XDP_TX_WQEBBS))) {
+		if (sq->db.xdp.doorbell) {
+			/* SQ is full, ring doorbell */
+			mlx5e_xmit_xdp_doorbell(sq);
+			sq->db.xdp.doorbell = false;
+		}
+		rq->stats.xdp_tx_full++;
+		mlx5e_page_release(rq, di, true);
+		return;
+	}
+
+	dma_sync_single_for_device(sq->pdev, dma_addr, dma_len,
+				   PCI_DMA_TODEVICE);
+
+	memset(wqe, 0, sizeof(*wqe));
+
+	/* copy the inline part */
+	memcpy(eseg->inline_hdr_start, data, MLX5E_XDP_MIN_INLINE);
+	eseg->inline_hdr_sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE);
+
+	dseg = (struct mlx5_wqe_data_seg *)cseg + (MLX5E_XDP_TX_DS_COUNT - 1);
+
+	/* write the dma part */
+	dseg->addr       = cpu_to_be64(dma_addr);
+	dseg->byte_count = cpu_to_be32(dma_len);
+	dseg->lkey       = sq->mkey_be;
+
+	cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND);
+	cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | MLX5E_XDP_TX_DS_COUNT);
+
+	sq->db.xdp.di[pi] = *di;
+	wi->opcode     = MLX5_OPCODE_SEND;
+	wi->num_wqebbs = MLX5E_XDP_TX_WQEBBS;
+	sq->pc += MLX5E_XDP_TX_WQEBBS;
+
+	sq->db.xdp.doorbell = true;
+	rq->stats.xdp_tx++;
+}
+
+/* returns true if packet was consumed by xdp */
+static inline bool mlx5e_xdp_handle(struct mlx5e_rq *rq,
+				    const struct bpf_prog *prog,
+				    struct mlx5e_dma_info *di,
+				    void *data, u16 len)
+{
+	struct xdp_buff xdp;
+	u32 act;
+
+	if (!prog)
+		return false;
+
+	xdp.data = data;
+	xdp.data_end = xdp.data + len;
+	act = bpf_prog_run_xdp(prog, &xdp);
+	switch (act) {
+	case XDP_PASS:
+		return false;
+	case XDP_TX:
+		mlx5e_xmit_xdp_frame(rq, di, MLX5_RX_HEADROOM, len);
+		return true;
+	default:
+		bpf_warn_invalid_xdp_action(act);
+	case XDP_ABORTED:
+	case XDP_DROP:
+		rq->stats.xdp_drop++;
+		mlx5e_page_release(rq, di, true);
+		return true;
+	}
+}
+
+static inline
+struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
+			     u16 wqe_counter, u32 cqe_bcnt)
+{
+	struct mlx5e_dma_info *di;
+	struct sk_buff *skb;
+	void *va, *data;
+	bool consumed;
+
+	di             = &rq->dma_info[wqe_counter];
+	va             = page_address(di->page);
+	data           = va + MLX5_RX_HEADROOM;
+
+	dma_sync_single_range_for_cpu(rq->pdev,
+				      di->addr,
+				      MLX5_RX_HEADROOM,
+				      rq->buff.wqe_sz,
+				      DMA_FROM_DEVICE);
+	prefetch(data);
+
+	if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
+		rq->stats.wqe_err++;
+		mlx5e_page_release(rq, di, true);
+		return NULL;
+	}
+
+	rcu_read_lock();
+	consumed = mlx5e_xdp_handle(rq, READ_ONCE(rq->xdp_prog), di, data,
+				    cqe_bcnt);
+	rcu_read_unlock();
+	if (consumed)
+		return NULL; /* page/packet was consumed by XDP */
+
+	skb = build_skb(va, RQ_PAGE_SIZE(rq));
+	if (unlikely(!skb)) {
+		rq->stats.buff_alloc_err++;
+		mlx5e_page_release(rq, di, true);
+		return NULL;
+	}
+
+	/* queue up for recycling ..*/
+	page_ref_inc(di->page);
+	mlx5e_page_release(rq, di, true);
+
+	skb_reserve(skb, MLX5_RX_HEADROOM);
+	skb_put(skb, cqe_bcnt);
+
+	return skb;
+}
+
+void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
+{
+	struct mlx5e_rx_wqe *wqe;
+	__be16 wqe_counter_be;
+	struct sk_buff *skb;
+	u16 wqe_counter;
+	u32 cqe_bcnt;
+
+	wqe_counter_be = cqe->wqe_counter;
+	wqe_counter    = be16_to_cpu(wqe_counter_be);
+	wqe            = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
+	cqe_bcnt       = be32_to_cpu(cqe->byte_cnt);
+
+	skb = skb_from_cqe(rq, cqe, wqe_counter, cqe_bcnt);
+	if (!skb)
+		goto wq_ll_pop;
+
+	mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+	napi_gro_receive(rq->cq.napi, skb);
+
+wq_ll_pop:
+	mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
+		       &wqe->next.next_wqe_index);
+}
+
+void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
+{
+	struct net_device *netdev = rq->netdev;
+	struct mlx5e_priv *priv = netdev_priv(netdev);
+	struct mlx5_eswitch_rep *rep = priv->ppriv;
+	struct mlx5e_rx_wqe *wqe;
+	struct sk_buff *skb;
+	__be16 wqe_counter_be;
+	u16 wqe_counter;
+	u32 cqe_bcnt;
+
+	wqe_counter_be = cqe->wqe_counter;
+	wqe_counter    = be16_to_cpu(wqe_counter_be);
+	wqe            = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
+	cqe_bcnt       = be32_to_cpu(cqe->byte_cnt);
+
+	skb = skb_from_cqe(rq, cqe, wqe_counter, cqe_bcnt);
+	if (!skb)
+		goto wq_ll_pop;
+
+	mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+
+	if (rep->vlan && skb_vlan_tag_present(skb))
+		skb_vlan_pop(skb);
+
+	napi_gro_receive(rq->cq.napi, skb);
+
+wq_ll_pop:
+	mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
+		       &wqe->next.next_wqe_index);
+}
+
+static inline void mlx5e_mpwqe_fill_rx_skb(struct mlx5e_rq *rq,
+					   struct mlx5_cqe64 *cqe,
+					   struct mlx5e_mpw_info *wi,
+					   u32 cqe_bcnt,
+					   struct sk_buff *skb)
+{
+	u16 stride_ix      = mpwrq_get_cqe_stride_index(cqe);
+	u32 wqe_offset     = stride_ix * rq->mpwqe_stride_sz;
+	u32 head_offset    = wqe_offset & (PAGE_SIZE - 1);
+	u32 page_idx       = wqe_offset >> PAGE_SHIFT;
+	u32 head_page_idx  = page_idx;
+	u16 headlen = min_t(u16, MLX5_MPWRQ_SMALL_PACKET_THRESHOLD, cqe_bcnt);
+	u32 frag_offset    = head_offset + headlen;
+	u16 byte_cnt       = cqe_bcnt - headlen;
+
+	if (unlikely(frag_offset >= PAGE_SIZE)) {
+		page_idx++;
+		frag_offset -= PAGE_SIZE;
+	}
+
+	while (byte_cnt) {
+		u32 pg_consumed_bytes =
+			min_t(u32, PAGE_SIZE - frag_offset, byte_cnt);
+
+		mlx5e_add_skb_frag_mpwqe(rq, skb, wi, page_idx, frag_offset,
+					 pg_consumed_bytes);
+		byte_cnt -= pg_consumed_bytes;
+		frag_offset = 0;
+		page_idx++;
+	}
+	/* copy header */
+	mlx5e_copy_skb_header_mpwqe(rq->pdev, skb, wi, head_page_idx,
+				    head_offset, headlen);
+	/* skb linear part was allocated with headlen and aligned to long */
+	skb->tail += headlen;
+	skb->len  += headlen;
+}
+
+void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
+{
+	u16 cstrides       = mpwrq_get_cqe_consumed_strides(cqe);
+	u16 wqe_id         = be16_to_cpu(cqe->wqe_id);
+	struct mlx5e_mpw_info *wi = &rq->mpwqe.info[wqe_id];
+	struct mlx5e_rx_wqe  *wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_id);
+	struct sk_buff *skb;
+	u16 cqe_bcnt;
+
+	wi->consumed_strides += cstrides;
+
+	if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
+		rq->stats.wqe_err++;
+		goto mpwrq_cqe_out;
+	}
+
+	if (unlikely(mpwrq_is_filler_cqe(cqe))) {
+		rq->stats.mpwqe_filler++;
+		goto mpwrq_cqe_out;
+	}
+
+	skb = napi_alloc_skb(rq->cq.napi,
+			     ALIGN(MLX5_MPWRQ_SMALL_PACKET_THRESHOLD,
+				   sizeof(long)));
+	if (unlikely(!skb)) {
+		rq->stats.buff_alloc_err++;
+		goto mpwrq_cqe_out;
+	}
+
+	prefetch(skb->data);
+	cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe);
+
+	mlx5e_mpwqe_fill_rx_skb(rq, cqe, wi, cqe_bcnt, skb);
+	mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+	napi_gro_receive(rq->cq.napi, skb);
+
+mpwrq_cqe_out:
+	if (likely(wi->consumed_strides < rq->mpwqe_num_strides))
+		return;
+
+	mlx5e_free_rx_mpwqe(rq, wi);
+	mlx5_wq_ll_pop(&rq->wq, cqe->wqe_id, &wqe->next.next_wqe_index);
+}
+
+int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
+{
+	struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
+	struct mlx5e_sq *xdp_sq = &rq->channel->xdp_sq;
+	int work_done = 0;
+
+	if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
+		return 0;
+
+	if (cq->decmprs_left)
+		work_done += mlx5e_decompress_cqes_cont(rq, cq, 0, budget);
+
+	for (; work_done < budget; work_done++) {
+		struct mlx5_cqe64 *cqe = mlx5e_get_cqe(cq);
+
+		if (!cqe)
+			break;
+
+		if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) {
+			work_done +=
+				mlx5e_decompress_cqes_start(rq, cq,
+							    budget - work_done);
+			continue;
+		}
+
+		mlx5_cqwq_pop(&cq->wq);
+
+		rq->handle_rx_cqe(rq, cqe);
+	}
+
+	if (xdp_sq->db.xdp.doorbell) {
+		mlx5e_xmit_xdp_doorbell(xdp_sq);
+		xdp_sq->db.xdp.doorbell = false;
+	}
+
+	mlx5_cqwq_update_db_record(&cq->wq);
+
+	/* ensure cq space is freed before enabling more cqes */
+	wmb();
+
+	return work_done;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/en/rx_am.c b/drivers/net/ethernet/mellanox/mlx5/en/rx_am.c
new file mode 100644
index 0000000..caaf7b2
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/en/rx_am.c
@@ -0,0 +1,335 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "en/en.h"
+
+/* Adaptive moderation profiles */
+#define MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE 256
+#define MLX5E_RX_AM_DEF_PROFILE_CQE 1
+#define MLX5E_RX_AM_DEF_PROFILE_EQE 1
+#define MLX5E_PARAMS_AM_NUM_PROFILES 5
+
+/* All profiles sizes must be MLX5E_PARAMS_AM_NUM_PROFILES */
+#define MLX5_AM_EQE_PROFILES { \
+	{1,   MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
+	{8,   MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
+	{64,  MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
+	{128, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
+	{256, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
+}
+
+#define MLX5_AM_CQE_PROFILES { \
+	{2,  256},             \
+	{8,  128},             \
+	{16, 64},              \
+	{32, 64},              \
+	{64, 64}               \
+}
+
+static const struct mlx5e_cq_moder
+profile[MLX5_CQ_PERIOD_NUM_MODES][MLX5E_PARAMS_AM_NUM_PROFILES] = {
+	MLX5_AM_EQE_PROFILES,
+	MLX5_AM_CQE_PROFILES,
+};
+
+static inline struct mlx5e_cq_moder mlx5e_am_get_profile(u8 cq_period_mode, int ix)
+{
+	return profile[cq_period_mode][ix];
+}
+
+struct mlx5e_cq_moder mlx5e_am_get_def_profile(u8 rx_cq_period_mode)
+{
+	int default_profile_ix;
+
+	if (rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
+		default_profile_ix = MLX5E_RX_AM_DEF_PROFILE_CQE;
+	else /* MLX5_CQ_PERIOD_MODE_START_FROM_EQE */
+		default_profile_ix = MLX5E_RX_AM_DEF_PROFILE_EQE;
+
+	return profile[rx_cq_period_mode][default_profile_ix];
+}
+
+/* Adaptive moderation logic */
+enum {
+	MLX5E_AM_START_MEASURE,
+	MLX5E_AM_MEASURE_IN_PROGRESS,
+	MLX5E_AM_APPLY_NEW_PROFILE,
+};
+
+enum {
+	MLX5E_AM_PARKING_ON_TOP,
+	MLX5E_AM_PARKING_TIRED,
+	MLX5E_AM_GOING_RIGHT,
+	MLX5E_AM_GOING_LEFT,
+};
+
+enum {
+	MLX5E_AM_STATS_WORSE,
+	MLX5E_AM_STATS_SAME,
+	MLX5E_AM_STATS_BETTER,
+};
+
+enum {
+	MLX5E_AM_STEPPED,
+	MLX5E_AM_TOO_TIRED,
+	MLX5E_AM_ON_EDGE,
+};
+
+static bool mlx5e_am_on_top(struct mlx5e_rx_am *am)
+{
+	switch (am->tune_state) {
+	case MLX5E_AM_PARKING_ON_TOP:
+	case MLX5E_AM_PARKING_TIRED:
+		WARN_ONCE(true, "mlx5e_am_on_top: PARKING\n");
+		return true;
+	case MLX5E_AM_GOING_RIGHT:
+		return (am->steps_left > 1) && (am->steps_right == 1);
+	default: /* MLX5E_AM_GOING_LEFT */
+		return (am->steps_right > 1) && (am->steps_left == 1);
+	}
+}
+
+static void mlx5e_am_turn(struct mlx5e_rx_am *am)
+{
+	switch (am->tune_state) {
+	case MLX5E_AM_PARKING_ON_TOP:
+	case MLX5E_AM_PARKING_TIRED:
+		WARN_ONCE(true, "mlx5e_am_turn: PARKING\n");
+		break;
+	case MLX5E_AM_GOING_RIGHT:
+		am->tune_state = MLX5E_AM_GOING_LEFT;
+		am->steps_left = 0;
+		break;
+	case MLX5E_AM_GOING_LEFT:
+		am->tune_state = MLX5E_AM_GOING_RIGHT;
+		am->steps_right = 0;
+		break;
+	}
+}
+
+static int mlx5e_am_step(struct mlx5e_rx_am *am)
+{
+	if (am->tired == (MLX5E_PARAMS_AM_NUM_PROFILES * 2))
+		return MLX5E_AM_TOO_TIRED;
+
+	switch (am->tune_state) {
+	case MLX5E_AM_PARKING_ON_TOP:
+	case MLX5E_AM_PARKING_TIRED:
+		WARN_ONCE(true, "mlx5e_am_step: PARKING\n");
+		break;
+	case MLX5E_AM_GOING_RIGHT:
+		if (am->profile_ix == (MLX5E_PARAMS_AM_NUM_PROFILES - 1))
+			return MLX5E_AM_ON_EDGE;
+		am->profile_ix++;
+		am->steps_right++;
+		break;
+	case MLX5E_AM_GOING_LEFT:
+		if (am->profile_ix == 0)
+			return MLX5E_AM_ON_EDGE;
+		am->profile_ix--;
+		am->steps_left++;
+		break;
+	}
+
+	am->tired++;
+	return MLX5E_AM_STEPPED;
+}
+
+static void mlx5e_am_park_on_top(struct mlx5e_rx_am *am)
+{
+	am->steps_right  = 0;
+	am->steps_left   = 0;
+	am->tired        = 0;
+	am->tune_state   = MLX5E_AM_PARKING_ON_TOP;
+}
+
+static void mlx5e_am_park_tired(struct mlx5e_rx_am *am)
+{
+	am->steps_right  = 0;
+	am->steps_left   = 0;
+	am->tune_state   = MLX5E_AM_PARKING_TIRED;
+}
+
+static void mlx5e_am_exit_parking(struct mlx5e_rx_am *am)
+{
+	am->tune_state = am->profile_ix ? MLX5E_AM_GOING_LEFT :
+					  MLX5E_AM_GOING_RIGHT;
+	mlx5e_am_step(am);
+}
+
+static int mlx5e_am_stats_compare(struct mlx5e_rx_am_stats *curr,
+				  struct mlx5e_rx_am_stats *prev)
+{
+	int diff;
+
+	if (!prev->ppms)
+		return curr->ppms ? MLX5E_AM_STATS_BETTER :
+				    MLX5E_AM_STATS_SAME;
+
+	diff = curr->ppms - prev->ppms;
+	if (((100 * abs(diff)) / prev->ppms) > 10) /* more than 10% diff */
+		return (diff > 0) ? MLX5E_AM_STATS_BETTER :
+				    MLX5E_AM_STATS_WORSE;
+
+	if (!prev->epms)
+		return curr->epms ? MLX5E_AM_STATS_WORSE :
+				    MLX5E_AM_STATS_SAME;
+
+	diff = curr->epms - prev->epms;
+	if (((100 * abs(diff)) / prev->epms) > 10) /* more than 10% diff */
+		return (diff < 0) ? MLX5E_AM_STATS_BETTER :
+				    MLX5E_AM_STATS_WORSE;
+
+	return MLX5E_AM_STATS_SAME;
+}
+
+static bool mlx5e_am_decision(struct mlx5e_rx_am_stats *curr_stats,
+			      struct mlx5e_rx_am *am)
+{
+	int prev_state = am->tune_state;
+	int prev_ix = am->profile_ix;
+	int stats_res;
+	int step_res;
+
+	switch (am->tune_state) {
+	case MLX5E_AM_PARKING_ON_TOP:
+		stats_res = mlx5e_am_stats_compare(curr_stats, &am->prev_stats);
+		if (stats_res != MLX5E_AM_STATS_SAME)
+			mlx5e_am_exit_parking(am);
+		break;
+
+	case MLX5E_AM_PARKING_TIRED:
+		am->tired--;
+		if (!am->tired)
+			mlx5e_am_exit_parking(am);
+		break;
+
+	case MLX5E_AM_GOING_RIGHT:
+	case MLX5E_AM_GOING_LEFT:
+		stats_res = mlx5e_am_stats_compare(curr_stats, &am->prev_stats);
+		if (stats_res != MLX5E_AM_STATS_BETTER)
+			mlx5e_am_turn(am);
+
+		if (mlx5e_am_on_top(am)) {
+			mlx5e_am_park_on_top(am);
+			break;
+		}
+
+		step_res = mlx5e_am_step(am);
+		switch (step_res) {
+		case MLX5E_AM_ON_EDGE:
+			mlx5e_am_park_on_top(am);
+			break;
+		case MLX5E_AM_TOO_TIRED:
+			mlx5e_am_park_tired(am);
+			break;
+		}
+
+		break;
+	}
+
+	if ((prev_state     != MLX5E_AM_PARKING_ON_TOP) ||
+	    (am->tune_state != MLX5E_AM_PARKING_ON_TOP))
+		am->prev_stats = *curr_stats;
+
+	return am->profile_ix != prev_ix;
+}
+
+static void mlx5e_am_sample(struct mlx5e_rq *rq,
+			    struct mlx5e_rx_am_sample *s)
+{
+	s->time	     = ktime_get();
+	s->pkt_ctr   = rq->stats.packets;
+	s->event_ctr = rq->cq.event_ctr;
+}
+
+#define MLX5E_AM_NEVENTS 64
+
+static void mlx5e_am_calc_stats(struct mlx5e_rx_am_sample *start,
+				struct mlx5e_rx_am_sample *end,
+				struct mlx5e_rx_am_stats *curr_stats)
+{
+	/* u32 holds up to 71 minutes, should be enough */
+	u32 delta_us = ktime_us_delta(end->time, start->time);
+	unsigned int npkts = end->pkt_ctr - start->pkt_ctr;
+
+	if (!delta_us) {
+		WARN_ONCE(true, "mlx5e_am_calc_stats: delta_us=0\n");
+		return;
+	}
+
+	curr_stats->ppms =            (npkts * USEC_PER_MSEC) / delta_us;
+	curr_stats->epms = (MLX5E_AM_NEVENTS * USEC_PER_MSEC) / delta_us;
+}
+
+void mlx5e_rx_am_work(struct work_struct *work)
+{
+	struct mlx5e_rx_am *am = container_of(work, struct mlx5e_rx_am,
+					      work);
+	struct mlx5e_rq *rq = container_of(am, struct mlx5e_rq, am);
+	struct mlx5e_cq_moder cur_profile = profile[am->mode][am->profile_ix];
+
+	mlx5_core_modify_cq_moderation(rq->priv->mdev, &rq->cq.mcq,
+				       cur_profile.usec, cur_profile.pkts);
+
+	am->state = MLX5E_AM_START_MEASURE;
+}
+
+void mlx5e_rx_am(struct mlx5e_rq *rq)
+{
+	struct mlx5e_rx_am *am = &rq->am;
+	struct mlx5e_rx_am_sample end_sample;
+	struct mlx5e_rx_am_stats curr_stats;
+	u16 nevents;
+
+	switch (am->state) {
+	case MLX5E_AM_MEASURE_IN_PROGRESS:
+		nevents = rq->cq.event_ctr - am->start_sample.event_ctr;
+		if (nevents < MLX5E_AM_NEVENTS)
+			break;
+		mlx5e_am_sample(rq, &end_sample);
+		mlx5e_am_calc_stats(&am->start_sample, &end_sample,
+				    &curr_stats);
+		if (mlx5e_am_decision(&curr_stats, am)) {
+			am->state = MLX5E_AM_APPLY_NEW_PROFILE;
+			schedule_work(&am->work);
+			break;
+		}
+		/* fall through */
+	case MLX5E_AM_START_MEASURE:
+		mlx5e_am_sample(rq, &am->start_sample);
+		am->state = MLX5E_AM_MEASURE_IN_PROGRESS;
+		break;
+	case MLX5E_AM_APPLY_NEW_PROFILE:
+		break;
+	}
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/en/selftest.c b/drivers/net/ethernet/mellanox/mlx5/en/selftest.c
new file mode 100644
index 0000000..b375682
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/en/selftest.c
@@ -0,0 +1,352 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies, Ltd.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <net/udp.h>
+#include "en/en.h"
+
+enum {
+	MLX5E_ST_LINK_STATE,
+	MLX5E_ST_LINK_SPEED,
+	MLX5E_ST_HEALTH_INFO,
+#ifdef CONFIG_INET
+	MLX5E_ST_LOOPBACK,
+#endif
+	MLX5E_ST_NUM,
+};
+
+const char mlx5e_self_tests[MLX5E_ST_NUM][ETH_GSTRING_LEN] = {
+	"Link Test",
+	"Speed Test",
+	"Health Test",
+#ifdef CONFIG_INET
+	"Loopback Test",
+#endif
+};
+
+int mlx5e_self_test_num(struct mlx5e_priv *priv)
+{
+	return ARRAY_SIZE(mlx5e_self_tests);
+}
+
+static int mlx5e_test_health_info(struct mlx5e_priv *priv)
+{
+	struct mlx5_core_health *health = &priv->mdev->priv.health;
+
+	return health->sick ? 1 : 0;
+}
+
+static int mlx5e_test_link_state(struct mlx5e_priv *priv)
+{
+	u8 port_state;
+
+	if (!netif_carrier_ok(priv->netdev))
+		return 1;
+
+	port_state = mlx5_query_vport_state(priv->mdev, MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT, 0);
+	return port_state == VPORT_STATE_UP ? 0 : 1;
+}
+
+static int mlx5e_test_link_speed(struct mlx5e_priv *priv)
+{
+	u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+	u32 eth_proto_oper;
+	int i;
+
+	if (!netif_carrier_ok(priv->netdev))
+		return 1;
+
+	if (mlx5_query_port_ptys(priv->mdev, out, sizeof(out), MLX5_PTYS_EN, 1))
+		return 1;
+
+	eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper);
+	for (i = 0; i < MLX5E_LINK_MODES_NUMBER; i++) {
+		if (eth_proto_oper & MLX5E_PROT_MASK(i))
+			return 0;
+	}
+	return 1;
+}
+
+#ifdef CONFIG_INET
+/* loopback test */
+#define MLX5E_TEST_PKT_SIZE (MLX5_MPWRQ_SMALL_PACKET_THRESHOLD - NET_IP_ALIGN)
+static const char mlx5e_test_text[ETH_GSTRING_LEN] = "MLX5E SELF TEST";
+#define MLX5E_TEST_MAGIC 0x5AEED15C001ULL
+
+struct mlx5ehdr {
+	__be32 version;
+	__be64 magic;
+	char   text[ETH_GSTRING_LEN];
+};
+
+static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv)
+{
+	struct sk_buff *skb = NULL;
+	struct mlx5ehdr *mlxh;
+	struct ethhdr *ethh;
+	struct udphdr *udph;
+	struct iphdr *iph;
+	int datalen, iplen;
+
+	datalen = MLX5E_TEST_PKT_SIZE -
+		  (sizeof(*ethh) + sizeof(*iph) + sizeof(*udph));
+
+	skb = netdev_alloc_skb(priv->netdev, MLX5E_TEST_PKT_SIZE);
+	if (!skb) {
+		netdev_err(priv->netdev, "\tFailed to alloc loopback skb\n");
+		return NULL;
+	}
+
+	prefetchw(skb->data);
+	skb_reserve(skb, NET_IP_ALIGN);
+
+	/*  Reserve for ethernet and IP header  */
+	ethh = (struct ethhdr *)skb_push(skb, ETH_HLEN);
+	skb_reset_mac_header(skb);
+
+	skb_set_network_header(skb, skb->len);
+	iph = (struct iphdr *)skb_put(skb, sizeof(struct iphdr));
+
+	skb_set_transport_header(skb, skb->len);
+	udph = (struct udphdr *)skb_put(skb, sizeof(struct udphdr));
+
+	/* Fill ETH header */
+	ether_addr_copy(ethh->h_dest, priv->netdev->dev_addr);
+	eth_zero_addr(ethh->h_source);
+	ethh->h_proto = htons(ETH_P_IP);
+
+	/* Fill UDP header */
+	udph->source = htons(9);
+	udph->dest = htons(9); /* Discard Protocol */
+	udph->len = htons(datalen + sizeof(struct udphdr));
+	udph->check = 0;
+
+	/* Fill IP header */
+	iph->ihl = 5;
+	iph->ttl = 32;
+	iph->version = 4;
+	iph->protocol = IPPROTO_UDP;
+	iplen = sizeof(struct iphdr) + sizeof(struct udphdr) + datalen;
+	iph->tot_len = htons(iplen);
+	iph->frag_off = 0;
+	iph->saddr = 0;
+	iph->daddr = 0;
+	iph->tos = 0;
+	iph->id = 0;
+	ip_send_check(iph);
+
+	/* Fill test header and data */
+	mlxh = (struct mlx5ehdr *)skb_put(skb, sizeof(*mlxh));
+	mlxh->version = 0;
+	mlxh->magic = cpu_to_be64(MLX5E_TEST_MAGIC);
+	strlcpy(mlxh->text, mlx5e_test_text, sizeof(mlxh->text));
+	datalen -= sizeof(*mlxh);
+	memset(skb_put(skb, datalen), 0, datalen);
+
+	skb->csum = 0;
+	skb->ip_summed = CHECKSUM_PARTIAL;
+	udp4_hwcsum(skb, iph->saddr, iph->daddr);
+
+	skb->protocol = htons(ETH_P_IP);
+	skb->pkt_type = PACKET_HOST;
+	skb->dev = priv->netdev;
+
+	return skb;
+}
+
+struct mlx5e_lbt_priv {
+	struct packet_type pt;
+	struct completion comp;
+	bool loopback_ok;
+};
+
+static int
+mlx5e_test_loopback_validate(struct sk_buff *skb,
+			     struct net_device *ndev,
+			     struct packet_type *pt,
+			     struct net_device *orig_ndev)
+{
+	struct mlx5e_lbt_priv *lbtp = pt->af_packet_priv;
+	struct mlx5ehdr *mlxh;
+	struct ethhdr *ethh;
+	struct udphdr *udph;
+	struct iphdr *iph;
+
+	/* We are only going to peek, no need to clone the SKB */
+	if (skb->protocol != htons(ETH_P_IP))
+		goto out;
+
+	if (MLX5E_TEST_PKT_SIZE - ETH_HLEN > skb_headlen(skb))
+		goto out;
+
+	ethh = (struct ethhdr *)skb_mac_header(skb);
+	if (!ether_addr_equal(ethh->h_dest, orig_ndev->dev_addr))
+		goto out;
+
+	iph = ip_hdr(skb);
+	if (iph->protocol != IPPROTO_UDP)
+		goto out;
+
+	udph = udp_hdr(skb);
+	if (udph->dest != htons(9))
+		goto out;
+
+	mlxh = (struct mlx5ehdr *)((char *)udph + sizeof(*udph));
+	if (mlxh->magic != cpu_to_be64(MLX5E_TEST_MAGIC))
+		goto out; /* so close ! */
+
+	/* bingo */
+	lbtp->loopback_ok = true;
+	complete(&lbtp->comp);
+out:
+	kfree_skb(skb);
+	return 0;
+}
+
+static int mlx5e_test_loopback_setup(struct mlx5e_priv *priv,
+				     struct mlx5e_lbt_priv *lbtp)
+{
+	int err = 0;
+
+	err = mlx5e_refresh_tirs_self_loopback(priv->mdev, true);
+	if (err) {
+		netdev_err(priv->netdev,
+			   "\tFailed to enable UC loopback err(%d)\n", err);
+		return err;
+	}
+
+	lbtp->loopback_ok = false;
+	init_completion(&lbtp->comp);
+
+	lbtp->pt.type = htons(ETH_P_ALL);
+	lbtp->pt.func = mlx5e_test_loopback_validate;
+	lbtp->pt.dev = priv->netdev;
+	lbtp->pt.af_packet_priv = lbtp;
+	dev_add_pack(&lbtp->pt);
+	return err;
+}
+
+static void mlx5e_test_loopback_cleanup(struct mlx5e_priv *priv,
+					struct mlx5e_lbt_priv *lbtp)
+{
+	dev_remove_pack(&lbtp->pt);
+	mlx5e_refresh_tirs_self_loopback(priv->mdev, false);
+}
+
+#define MLX5E_LB_VERIFY_TIMEOUT (msecs_to_jiffies(200))
+static int mlx5e_test_loopback(struct mlx5e_priv *priv)
+{
+	struct mlx5e_lbt_priv *lbtp;
+	struct sk_buff *skb = NULL;
+	int err;
+
+	if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+		netdev_err(priv->netdev,
+			   "\tCan't perform loobpack test while device is down\n");
+		return -ENODEV;
+	}
+
+	lbtp = kzalloc(sizeof(*lbtp), GFP_KERNEL);
+	if (!lbtp)
+		return -ENOMEM;
+	lbtp->loopback_ok = false;
+
+	err = mlx5e_test_loopback_setup(priv, lbtp);
+	if (err)
+		goto out;
+
+	skb = mlx5e_test_get_udp_skb(priv);
+	if (!skb) {
+		err = -ENOMEM;
+		goto cleanup;
+	}
+
+	skb_set_queue_mapping(skb, 0);
+	err = dev_queue_xmit(skb);
+	if (err) {
+		netdev_err(priv->netdev,
+			   "\tFailed to xmit loopback packet err(%d)\n",
+			   err);
+		goto cleanup;
+	}
+
+	wait_for_completion_timeout(&lbtp->comp, MLX5E_LB_VERIFY_TIMEOUT);
+	err = !lbtp->loopback_ok;
+
+cleanup:
+	mlx5e_test_loopback_cleanup(priv, lbtp);
+out:
+	kfree(lbtp);
+	return err;
+}
+#endif
+
+static int (*mlx5e_st_func[MLX5E_ST_NUM])(struct mlx5e_priv *) = {
+	mlx5e_test_link_state,
+	mlx5e_test_link_speed,
+	mlx5e_test_health_info,
+#ifdef CONFIG_INET
+	mlx5e_test_loopback,
+#endif
+};
+
+void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest,
+		     u64 *buf)
+{
+	struct mlx5e_priv *priv = netdev_priv(ndev);
+	int i;
+
+	memset(buf, 0, sizeof(u64) * MLX5E_ST_NUM);
+
+	mutex_lock(&priv->state_lock);
+	netdev_info(ndev, "Self test begin..\n");
+
+	for (i = 0; i < MLX5E_ST_NUM; i++) {
+		netdev_info(ndev, "\t[%d] %s start..\n",
+			    i, mlx5e_self_tests[i]);
+		buf[i] = mlx5e_st_func[i](priv);
+		netdev_info(ndev, "\t[%d] %s end: result(%lld)\n",
+			    i, mlx5e_self_tests[i], buf[i]);
+	}
+
+	mutex_unlock(&priv->state_lock);
+
+	for (i = 0; i < MLX5E_ST_NUM; i++) {
+		if (buf[i]) {
+			etest->flags |= ETH_TEST_FL_FAILED;
+			break;
+		}
+	}
+	netdev_info(ndev, "Self test out: status flags(0x%x)\n",
+		    etest->flags);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/en/stats.h b/drivers/net/ethernet/mellanox/mlx5/en/stats.h
new file mode 100644
index 0000000..f202f87
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/en/stats.h
@@ -0,0 +1,430 @@
+/*
+ * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __MLX5_EN_STATS_H__
+#define __MLX5_EN_STATS_H__
+
+#define MLX5E_READ_CTR64_CPU(ptr, dsc, i) \
+	(*(u64 *)((char *)ptr + dsc[i].offset))
+#define MLX5E_READ_CTR64_BE(ptr, dsc, i) \
+	be64_to_cpu(*(__be64 *)((char *)ptr + dsc[i].offset))
+#define MLX5E_READ_CTR32_CPU(ptr, dsc, i) \
+	(*(u32 *)((char *)ptr + dsc[i].offset))
+#define MLX5E_READ_CTR32_BE(ptr, dsc, i) \
+	be32_to_cpu(*(__be32 *)((char *)ptr + dsc[i].offset))
+
+#define MLX5E_DECLARE_STAT(type, fld) #fld, offsetof(type, fld)
+#define MLX5E_DECLARE_RX_STAT(type, fld) "rx%d_"#fld, offsetof(type, fld)
+#define MLX5E_DECLARE_TX_STAT(type, fld) "tx%d_"#fld, offsetof(type, fld)
+
+struct counter_desc {
+	char		format[ETH_GSTRING_LEN];
+	int		offset; /* Byte offset */
+};
+
+struct mlx5e_sw_stats {
+	u64 rx_packets;
+	u64 rx_bytes;
+	u64 tx_packets;
+	u64 tx_bytes;
+	u64 tx_tso_packets;
+	u64 tx_tso_bytes;
+	u64 tx_tso_inner_packets;
+	u64 tx_tso_inner_bytes;
+	u64 rx_lro_packets;
+	u64 rx_lro_bytes;
+	u64 rx_csum_unnecessary;
+	u64 rx_csum_none;
+	u64 rx_csum_complete;
+	u64 rx_csum_unnecessary_inner;
+	u64 rx_xdp_drop;
+	u64 rx_xdp_tx;
+	u64 rx_xdp_tx_full;
+	u64 tx_csum_partial;
+	u64 tx_csum_partial_inner;
+	u64 tx_queue_stopped;
+	u64 tx_queue_wake;
+	u64 tx_queue_dropped;
+	u64 tx_xmit_more;
+	u64 rx_wqe_err;
+	u64 rx_mpwqe_filler;
+	u64 rx_buff_alloc_err;
+	u64 rx_cqe_compress_blks;
+	u64 rx_cqe_compress_pkts;
+	u64 rx_cache_reuse;
+	u64 rx_cache_full;
+	u64 rx_cache_empty;
+	u64 rx_cache_busy;
+
+	/* Special handling counters */
+	u64 link_down_events_phy;
+};
+
+static const struct counter_desc sw_stats_desc[] = {
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_packets) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_bytes) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_reuse) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_full) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) },
+	{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, link_down_events_phy) },
+};
+
+struct mlx5e_qcounter_stats {
+	u32 rx_out_of_buffer;
+};
+
+static const struct counter_desc q_stats_desc[] = {
+	{ MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_out_of_buffer) },
+};
+
+#define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c)
+#define VPORT_COUNTER_GET(vstats, c) MLX5_GET64(query_vport_counter_out, \
+						vstats->query_vport_out, c)
+
+struct mlx5e_vport_stats {
+	__be64 query_vport_out[MLX5_ST_SZ_QW(query_vport_counter_out)];
+};
+
+static const struct counter_desc vport_stats_desc[] = {
+	{ "rx_vport_unicast_packets",
+		VPORT_COUNTER_OFF(received_eth_unicast.packets) },
+	{ "rx_vport_unicast_bytes",
+		VPORT_COUNTER_OFF(received_eth_unicast.octets) },
+	{ "tx_vport_unicast_packets",
+		VPORT_COUNTER_OFF(transmitted_eth_unicast.packets) },
+	{ "tx_vport_unicast_bytes",
+		VPORT_COUNTER_OFF(transmitted_eth_unicast.octets) },
+	{ "rx_vport_multicast_packets",
+		VPORT_COUNTER_OFF(received_eth_multicast.packets) },
+	{ "rx_vport_multicast_bytes",
+		VPORT_COUNTER_OFF(received_eth_multicast.octets) },
+	{ "tx_vport_multicast_packets",
+		VPORT_COUNTER_OFF(transmitted_eth_multicast.packets) },
+	{ "tx_vport_multicast_bytes",
+		VPORT_COUNTER_OFF(transmitted_eth_multicast.octets) },
+	{ "rx_vport_broadcast_packets",
+		VPORT_COUNTER_OFF(received_eth_broadcast.packets) },
+	{ "rx_vport_broadcast_bytes",
+		VPORT_COUNTER_OFF(received_eth_broadcast.octets) },
+	{ "tx_vport_broadcast_packets",
+		VPORT_COUNTER_OFF(transmitted_eth_broadcast.packets) },
+	{ "tx_vport_broadcast_bytes",
+		VPORT_COUNTER_OFF(transmitted_eth_broadcast.octets) },
+	{ "rx_vport_rdma_unicast_packets",
+		VPORT_COUNTER_OFF(received_ib_unicast.packets) },
+	{ "rx_vport_rdma_unicast_bytes",
+		VPORT_COUNTER_OFF(received_ib_unicast.octets) },
+	{ "tx_vport_rdma_unicast_packets",
+		VPORT_COUNTER_OFF(transmitted_ib_unicast.packets) },
+	{ "tx_vport_rdma_unicast_bytes",
+		VPORT_COUNTER_OFF(transmitted_ib_unicast.octets) },
+	{ "rx_vport_rdma_multicast_packets",
+		VPORT_COUNTER_OFF(received_ib_multicast.packets) },
+	{ "rx_vport_rdma_multicast_bytes",
+		VPORT_COUNTER_OFF(received_ib_multicast.octets) },
+	{ "tx_vport_rdma_multicast_packets",
+		VPORT_COUNTER_OFF(transmitted_ib_multicast.packets) },
+	{ "tx_vport_rdma_multicast_bytes",
+		VPORT_COUNTER_OFF(transmitted_ib_multicast.octets) },
+};
+
+#define PPORT_802_3_OFF(c) \
+	MLX5_BYTE_OFF(ppcnt_reg, \
+		      counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)
+#define PPORT_802_3_GET(pstats, c) \
+	MLX5_GET64(ppcnt_reg, pstats->IEEE_802_3_counters, \
+		   counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)
+#define PPORT_2863_OFF(c) \
+	MLX5_BYTE_OFF(ppcnt_reg, \
+		      counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
+#define PPORT_2863_GET(pstats, c) \
+	MLX5_GET64(ppcnt_reg, pstats->RFC_2863_counters, \
+		   counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
+#define PPORT_2819_OFF(c) \
+	MLX5_BYTE_OFF(ppcnt_reg, \
+		      counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
+#define PPORT_2819_GET(pstats, c) \
+	MLX5_GET64(ppcnt_reg, pstats->RFC_2819_counters, \
+		   counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
+#define PPORT_PER_PRIO_OFF(c) \
+	MLX5_BYTE_OFF(ppcnt_reg, \
+		      counter_set.eth_per_prio_grp_data_layout.c##_high)
+#define PPORT_PER_PRIO_GET(pstats, prio, c) \
+	MLX5_GET64(ppcnt_reg, pstats->per_prio_counters[prio], \
+		   counter_set.eth_per_prio_grp_data_layout.c##_high)
+#define NUM_PPORT_PRIO				8
+
+struct mlx5e_pport_stats {
+	__be64 IEEE_802_3_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
+	__be64 RFC_2863_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
+	__be64 RFC_2819_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
+	__be64 per_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)];
+	__be64 phy_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
+};
+
+static const struct counter_desc pport_802_3_stats_desc[] = {
+	{ "tx_packets_phy", PPORT_802_3_OFF(a_frames_transmitted_ok) },
+	{ "rx_packets_phy", PPORT_802_3_OFF(a_frames_received_ok) },
+	{ "rx_crc_errors_phy", PPORT_802_3_OFF(a_frame_check_sequence_errors) },
+	{ "tx_bytes_phy", PPORT_802_3_OFF(a_octets_transmitted_ok) },
+	{ "rx_bytes_phy", PPORT_802_3_OFF(a_octets_received_ok) },
+	{ "tx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) },
+	{ "tx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) },
+	{ "rx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_received_ok) },
+	{ "rx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_received_ok) },
+	{ "rx_in_range_len_errors_phy", PPORT_802_3_OFF(a_in_range_length_errors) },
+	{ "rx_out_of_range_len_phy", PPORT_802_3_OFF(a_out_of_range_length_field) },
+	{ "rx_oversize_pkts_phy", PPORT_802_3_OFF(a_frame_too_long_errors) },
+	{ "rx_symbol_err_phy", PPORT_802_3_OFF(a_symbol_error_during_carrier) },
+	{ "tx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_transmitted) },
+	{ "rx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_received) },
+	{ "rx_unsupported_op_phy", PPORT_802_3_OFF(a_unsupported_opcodes_received) },
+	{ "rx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) },
+	{ "tx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) },
+};
+
+static const struct counter_desc pport_2863_stats_desc[] = {
+	{ "rx_discards_phy", PPORT_2863_OFF(if_in_discards) },
+	{ "tx_discards_phy", PPORT_2863_OFF(if_out_discards) },
+	{ "tx_errors_phy", PPORT_2863_OFF(if_out_errors) },
+};
+
+static const struct counter_desc pport_2819_stats_desc[] = {
+	{ "rx_undersize_pkts_phy", PPORT_2819_OFF(ether_stats_undersize_pkts) },
+	{ "rx_fragments_phy", PPORT_2819_OFF(ether_stats_fragments) },
+	{ "rx_jabbers_phy", PPORT_2819_OFF(ether_stats_jabbers) },
+	{ "rx_64_bytes_phy", PPORT_2819_OFF(ether_stats_pkts64octets) },
+	{ "rx_65_to_127_bytes_phy", PPORT_2819_OFF(ether_stats_pkts65to127octets) },
+	{ "rx_128_to_255_bytes_phy", PPORT_2819_OFF(ether_stats_pkts128to255octets) },
+	{ "rx_256_to_511_bytes_phy", PPORT_2819_OFF(ether_stats_pkts256to511octets) },
+	{ "rx_512_to_1023_bytes_phy", PPORT_2819_OFF(ether_stats_pkts512to1023octets) },
+	{ "rx_1024_to_1518_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1024to1518octets) },
+	{ "rx_1519_to_2047_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1519to2047octets) },
+	{ "rx_2048_to_4095_bytes_phy", PPORT_2819_OFF(ether_stats_pkts2048to4095octets) },
+	{ "rx_4096_to_8191_bytes_phy", PPORT_2819_OFF(ether_stats_pkts4096to8191octets) },
+	{ "rx_8192_to_10239_bytes_phy", PPORT_2819_OFF(ether_stats_pkts8192to10239octets) },
+};
+
+static const struct counter_desc pport_per_prio_traffic_stats_desc[] = {
+	{ "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) },
+	{ "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) },
+	{ "tx_prio%d_bytes", PPORT_PER_PRIO_OFF(tx_octets) },
+	{ "tx_prio%d_packets", PPORT_PER_PRIO_OFF(tx_frames) },
+};
+
+static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
+	/* %s is "global" or "prio{i}" */
+	{ "rx_%s_pause", PPORT_PER_PRIO_OFF(rx_pause) },
+	{ "rx_%s_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) },
+	{ "tx_%s_pause", PPORT_PER_PRIO_OFF(tx_pause) },
+	{ "tx_%s_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) },
+	{ "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
+};
+
+#define PCIE_PERF_OFF(c) \
+	MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c)
+#define PCIE_PERF_GET(pcie_stats, c) \
+	MLX5_GET(mpcnt_reg, pcie_stats->pcie_perf_counters, \
+		 counter_set.pcie_perf_cntrs_grp_data_layout.c)
+#define PCIE_TAS_OFF(c) \
+	MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_tas_cntrs_grp_data_layout.c)
+#define PCIE_TAS_GET(pcie_stats, c) \
+	MLX5_GET(mpcnt_reg, pcie_stats->pcie_tas_counters, \
+		 counter_set.pcie_tas_cntrs_grp_data_layout.c)
+
+struct mlx5e_pcie_stats {
+	__be64 pcie_perf_counters[MLX5_ST_SZ_QW(mpcnt_reg)];
+	__be64 pcie_tas_counters[MLX5_ST_SZ_QW(mpcnt_reg)];
+};
+
+static const struct counter_desc pcie_perf_stats_desc[] = {
+	{ "rx_pci_signal_integrity", PCIE_PERF_OFF(rx_errors) },
+	{ "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) },
+};
+
+static const struct counter_desc pcie_tas_stats_desc[] = {
+	{ "tx_pci_transport_nonfatal_msg", PCIE_TAS_OFF(non_fatal_err_msg_sent) },
+	{ "tx_pci_transport_fatal_msg", PCIE_TAS_OFF(fatal_err_msg_sent) },
+};
+
+struct mlx5e_rq_stats {
+	u64 packets;
+	u64 bytes;
+	u64 csum_complete;
+	u64 csum_unnecessary_inner;
+	u64 csum_none;
+	u64 lro_packets;
+	u64 lro_bytes;
+	u64 xdp_drop;
+	u64 xdp_tx;
+	u64 xdp_tx_full;
+	u64 wqe_err;
+	u64 mpwqe_filler;
+	u64 buff_alloc_err;
+	u64 cqe_compress_blks;
+	u64 cqe_compress_pkts;
+	u64 cache_reuse;
+	u64 cache_full;
+	u64 cache_empty;
+	u64 cache_busy;
+};
+
+static const struct counter_desc rq_stats_desc[] = {
+	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
+	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
+	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
+	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
+	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
+	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) },
+	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx) },
+	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx_full) },
+	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) },
+	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
+	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
+	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler) },
+	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
+	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
+	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
+	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_reuse) },
+	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_full) },
+	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_empty) },
+	{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_busy) },
+};
+
+struct mlx5e_sq_stats {
+	/* commonly accessed in data path */
+	u64 packets;
+	u64 bytes;
+	u64 xmit_more;
+	u64 tso_packets;
+	u64 tso_bytes;
+	u64 tso_inner_packets;
+	u64 tso_inner_bytes;
+	u64 csum_partial_inner;
+	u64 nop;
+	/* less likely accessed in data path */
+	u64 csum_none;
+	u64 stopped;
+	u64 wake;
+	u64 dropped;
+};
+
+static const struct counter_desc sq_stats_desc[] = {
+	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, packets) },
+	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, bytes) },
+	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_packets) },
+	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
+	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
+	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
+	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
+	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
+	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
+	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
+	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) },
+	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },
+	{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
+};
+
+#define NUM_SW_COUNTERS			ARRAY_SIZE(sw_stats_desc)
+#define NUM_Q_COUNTERS			ARRAY_SIZE(q_stats_desc)
+#define NUM_VPORT_COUNTERS		ARRAY_SIZE(vport_stats_desc)
+#define NUM_PPORT_802_3_COUNTERS	ARRAY_SIZE(pport_802_3_stats_desc)
+#define NUM_PPORT_2863_COUNTERS		ARRAY_SIZE(pport_2863_stats_desc)
+#define NUM_PPORT_2819_COUNTERS		ARRAY_SIZE(pport_2819_stats_desc)
+#define NUM_PCIE_PERF_COUNTERS		ARRAY_SIZE(pcie_perf_stats_desc)
+#define NUM_PCIE_TAS_COUNTERS		ARRAY_SIZE(pcie_tas_stats_desc)
+#define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS \
+	ARRAY_SIZE(pport_per_prio_traffic_stats_desc)
+#define NUM_PPORT_PER_PRIO_PFC_COUNTERS \
+	ARRAY_SIZE(pport_per_prio_pfc_stats_desc)
+#define NUM_PPORT_COUNTERS		(NUM_PPORT_802_3_COUNTERS + \
+					 NUM_PPORT_2863_COUNTERS  + \
+					 NUM_PPORT_2819_COUNTERS  + \
+					 NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * \
+					 NUM_PPORT_PRIO)
+#define NUM_PCIE_COUNTERS		(NUM_PCIE_PERF_COUNTERS + NUM_PCIE_TAS_COUNTERS)
+#define NUM_RQ_STATS			ARRAY_SIZE(rq_stats_desc)
+#define NUM_SQ_STATS			ARRAY_SIZE(sq_stats_desc)
+
+struct mlx5e_stats {
+	struct mlx5e_sw_stats sw;
+	struct mlx5e_qcounter_stats qcnt;
+	struct mlx5e_vport_stats vport;
+	struct mlx5e_pport_stats pport;
+	struct mlx5e_pcie_stats pcie;
+	struct rtnl_link_stats64 vf_vport;
+};
+
+static const struct counter_desc mlx5e_pme_status_desc[] = {
+	{ "module_plug", 0 },
+	{ "module_unplug", 8 },
+};
+
+static const struct counter_desc mlx5e_pme_error_desc[] = {
+	{ "module_pwr_budget_exd", 0 },  /* power budget exceed */
+	{ "module_long_range", 8 },      /* long range for non MLNX cable */
+	{ "module_bus_stuck", 16 },      /* bus stuck (I2C or data shorted) */
+	{ "module_no_eeprom", 24 },      /* no eeprom/retry time out */
+	{ "module_enforce_part", 32 },   /* enforce part number list */
+	{ "module_unknown_id", 40 },     /* unknown identifier */
+	{ "module_high_temp", 48 },      /* high temperature */
+	{ "module_bad_shorted", 56 },    /* bad or shorted cable/module */
+	{ "module_unknown_status", 64 },
+};
+
+#endif /* __MLX5_EN_STATS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/en/tc.c b/drivers/net/ethernet/mellanox/mlx5/en/tc.c
new file mode 100644
index 0000000..2506736
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/en/tc.c
@@ -0,0 +1,1094 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <net/flow_dissector.h>
+#include <net/sch_generic.h>
+#include <net/pkt_cls.h>
+#include <net/tc_act/tc_gact.h>
+#include <net/tc_act/tc_skbedit.h>
+#include "lib/fs.h"
+#include "lib/device.h"
+#include <linux/rhashtable.h>
+#include <net/switchdev.h>
+#include <net/tc_act/tc_mirred.h>
+#include <net/tc_act/tc_vlan.h>
+#include <net/tc_act/tc_tunnel_key.h>
+#include <net/vxlan.h>
+#include "en/en.h"
+#include "en/tc.h"
+#include "sriov/eswitch.h"
+#include "en/vxlan.h"
+
+struct mlx5e_tc_flow {
+	struct rhash_head	node;
+	u64			cookie;
+	struct mlx5_flow_handle *rule;
+	struct list_head	encap; /* flows sharing the same encap */
+	struct mlx5_esw_flow_attr *attr;
+};
+
+enum {
+	MLX5_HEADER_TYPE_VXLAN = 0x0,
+	MLX5_HEADER_TYPE_NVGRE = 0x1,
+};
+
+#define MLX5E_TC_TABLE_NUM_ENTRIES 1024
+#define MLX5E_TC_TABLE_NUM_GROUPS 4
+
+static struct mlx5_flow_handle *
+mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
+		      struct mlx5_flow_spec *spec,
+		      u32 action, u32 flow_tag)
+{
+	struct mlx5_core_dev *dev = priv->mdev;
+	struct mlx5_flow_destination dest = { 0 };
+	struct mlx5_flow_act flow_act = {
+		.action = action,
+		.flow_tag = flow_tag,
+		.encap_id = 0,
+	};
+	struct mlx5_fc *counter = NULL;
+	struct mlx5_flow_handle *rule;
+	bool table_created = false;
+
+	if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
+		dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+		dest.ft = priv->fs.vlan.ft.t;
+	} else if (action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+		counter = mlx5_fc_create(dev, true);
+		if (IS_ERR(counter))
+			return ERR_CAST(counter);
+
+		dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+		dest.counter = counter;
+	}
+
+	if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
+		priv->fs.tc.t =
+			mlx5_create_auto_grouped_flow_table(priv->fs.ns,
+							    MLX5E_TC_PRIO,
+							    MLX5E_TC_TABLE_NUM_ENTRIES,
+							    MLX5E_TC_TABLE_NUM_GROUPS,
+							    0, 0);
+		if (IS_ERR(priv->fs.tc.t)) {
+			netdev_err(priv->netdev,
+				   "Failed to create tc offload table\n");
+			rule = ERR_CAST(priv->fs.tc.t);
+			goto err_create_ft;
+		}
+
+		table_created = true;
+	}
+
+	spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+	rule = mlx5_add_flow_rules(priv->fs.tc.t, spec, &flow_act, &dest, 1);
+
+	if (IS_ERR(rule))
+		goto err_add_rule;
+
+	return rule;
+
+err_add_rule:
+	if (table_created) {
+		mlx5_destroy_flow_table(priv->fs.tc.t);
+		priv->fs.tc.t = NULL;
+	}
+err_create_ft:
+	mlx5_fc_destroy(dev, counter);
+
+	return rule;
+}
+
+static struct mlx5_flow_handle *
+mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
+		      struct mlx5_flow_spec *spec,
+		      struct mlx5_esw_flow_attr *attr)
+{
+	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+	int err;
+
+	err = mlx5_eswitch_add_vlan_action(esw, attr);
+	if (err)
+		return ERR_PTR(err);
+
+	return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
+}
+
+static void mlx5e_detach_encap(struct mlx5e_priv *priv,
+			       struct mlx5e_tc_flow *flow) {
+	struct list_head *next = flow->encap.next;
+
+	list_del(&flow->encap);
+	if (list_empty(next)) {
+		struct mlx5_encap_entry *e;
+
+		e = list_entry(next, struct mlx5_encap_entry, flows);
+		if (e->n) {
+			mlx5_encap_dealloc(priv->mdev, e->encap_id);
+			neigh_release(e->n);
+		}
+		hlist_del_rcu(&e->encap_hlist);
+		kfree(e);
+	}
+}
+
+static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
+			      struct mlx5e_tc_flow *flow)
+{
+	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+	struct mlx5_fc *counter = NULL;
+
+	counter = mlx5_flow_rule_counter(flow->rule);
+
+	mlx5_del_flow_rules(flow->rule);
+
+	if (esw && esw->mode == SRIOV_OFFLOADS) {
+		mlx5_eswitch_del_vlan_action(esw, flow->attr);
+		if (flow->attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
+			mlx5e_detach_encap(priv, flow);
+	}
+
+	mlx5_fc_destroy(priv->mdev, counter);
+
+	if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
+		mlx5_destroy_flow_table(priv->fs.tc.t);
+		priv->fs.tc.t = NULL;
+	}
+}
+
+static void parse_vxlan_attr(struct mlx5_flow_spec *spec,
+			     struct tc_cls_flower_offload *f)
+{
+	void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+				       outer_headers);
+	void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+				       outer_headers);
+	void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+				    misc_parameters);
+	void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+				    misc_parameters);
+
+	MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
+	MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
+
+	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
+		struct flow_dissector_key_keyid *key =
+			skb_flow_dissector_target(f->dissector,
+						  FLOW_DISSECTOR_KEY_ENC_KEYID,
+						  f->key);
+		struct flow_dissector_key_keyid *mask =
+			skb_flow_dissector_target(f->dissector,
+						  FLOW_DISSECTOR_KEY_ENC_KEYID,
+						  f->mask);
+		MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
+			 be32_to_cpu(mask->keyid));
+		MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
+			 be32_to_cpu(key->keyid));
+	}
+}
+
+static int parse_tunnel_attr(struct mlx5e_priv *priv,
+			     struct mlx5_flow_spec *spec,
+			     struct tc_cls_flower_offload *f)
+{
+	void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+				       outer_headers);
+	void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+				       outer_headers);
+
+	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
+		struct flow_dissector_key_ports *key =
+			skb_flow_dissector_target(f->dissector,
+						  FLOW_DISSECTOR_KEY_ENC_PORTS,
+						  f->key);
+		struct flow_dissector_key_ports *mask =
+			skb_flow_dissector_target(f->dissector,
+						  FLOW_DISSECTOR_KEY_ENC_PORTS,
+						  f->mask);
+
+		/* Full udp dst port must be given */
+		if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst)))
+			return -EOPNOTSUPP;
+
+		/* udp src port isn't supported */
+		if (memchr_inv(&mask->src, 0, sizeof(mask->src)))
+			return -EOPNOTSUPP;
+
+		if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->dst)) &&
+		    MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
+			parse_vxlan_attr(spec, f);
+		else
+			return -EOPNOTSUPP;
+
+		MLX5_SET(fte_match_set_lyr_2_4, headers_c,
+			 udp_dport, ntohs(mask->dst));
+		MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+			 udp_dport, ntohs(key->dst));
+
+	} else { /* udp dst port must be given */
+			return -EOPNOTSUPP;
+	}
+
+	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
+		struct flow_dissector_key_ipv4_addrs *key =
+			skb_flow_dissector_target(f->dissector,
+						  FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
+						  f->key);
+		struct flow_dissector_key_ipv4_addrs *mask =
+			skb_flow_dissector_target(f->dissector,
+						  FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
+						  f->mask);
+		MLX5_SET(fte_match_set_lyr_2_4, headers_c,
+			 src_ipv4_src_ipv6.ipv4_layout.ipv4,
+			 ntohl(mask->src));
+		MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+			 src_ipv4_src_ipv6.ipv4_layout.ipv4,
+			 ntohl(key->src));
+
+		MLX5_SET(fte_match_set_lyr_2_4, headers_c,
+			 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
+			 ntohl(mask->dst));
+		MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+			 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
+			 ntohl(key->dst));
+	}
+
+	MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
+	MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
+
+	/* Enforce DMAC when offloading incoming tunneled flows.
+	 * Flow counters require a match on the DMAC.
+	 */
+	MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16);
+	MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0);
+	ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+				     dmac_47_16), priv->netdev->dev_addr);
+
+	/* let software handle IP fragments */
+	MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
+	MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
+
+	return 0;
+}
+
+static int __parse_cls_flower(struct mlx5e_priv *priv,
+			      struct mlx5_flow_spec *spec,
+			      struct tc_cls_flower_offload *f,
+			      u8 *min_inline)
+{
+	void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+				       outer_headers);
+	void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+				       outer_headers);
+	u16 addr_type = 0;
+	u8 ip_proto = 0;
+
+	*min_inline = MLX5_INLINE_MODE_L2;
+
+	if (f->dissector->used_keys &
+	    ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
+	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
+	      BIT(FLOW_DISSECTOR_KEY_VLAN) |
+	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
+	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
+	      BIT(FLOW_DISSECTOR_KEY_PORTS) |
+	      BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
+	      BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
+	      BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
+	      BIT(FLOW_DISSECTOR_KEY_ENC_PORTS)	|
+	      BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL))) {
+		netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
+			    f->dissector->used_keys);
+		return -EOPNOTSUPP;
+	}
+
+	if ((dissector_uses_key(f->dissector,
+				FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) ||
+	     dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID) ||
+	     dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) &&
+	    dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
+		struct flow_dissector_key_control *key =
+			skb_flow_dissector_target(f->dissector,
+						  FLOW_DISSECTOR_KEY_ENC_CONTROL,
+						  f->key);
+		switch (key->addr_type) {
+		case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
+			if (parse_tunnel_attr(priv, spec, f))
+				return -EOPNOTSUPP;
+			break;
+		default:
+			return -EOPNOTSUPP;
+		}
+
+		/* In decap flow, header pointers should point to the inner
+		 * headers, outer header were already set by parse_tunnel_attr
+		 */
+		headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+					 inner_headers);
+		headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+					 inner_headers);
+	}
+
+	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
+		struct flow_dissector_key_control *key =
+			skb_flow_dissector_target(f->dissector,
+						  FLOW_DISSECTOR_KEY_CONTROL,
+						  f->key);
+
+		struct flow_dissector_key_control *mask =
+			skb_flow_dissector_target(f->dissector,
+						  FLOW_DISSECTOR_KEY_CONTROL,
+						  f->mask);
+		addr_type = key->addr_type;
+
+		if (mask->flags & FLOW_DIS_IS_FRAGMENT) {
+			MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
+			MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
+				 key->flags & FLOW_DIS_IS_FRAGMENT);
+		}
+	}
+
+	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
+		struct flow_dissector_key_basic *key =
+			skb_flow_dissector_target(f->dissector,
+						  FLOW_DISSECTOR_KEY_BASIC,
+						  f->key);
+		struct flow_dissector_key_basic *mask =
+			skb_flow_dissector_target(f->dissector,
+						  FLOW_DISSECTOR_KEY_BASIC,
+						  f->mask);
+		ip_proto = key->ip_proto;
+
+		MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
+			 ntohs(mask->n_proto));
+		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
+			 ntohs(key->n_proto));
+
+		MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
+			 mask->ip_proto);
+		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
+			 key->ip_proto);
+
+		if (mask->ip_proto)
+			*min_inline = MLX5_INLINE_MODE_IP;
+	}
+
+	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+		struct flow_dissector_key_eth_addrs *key =
+			skb_flow_dissector_target(f->dissector,
+						  FLOW_DISSECTOR_KEY_ETH_ADDRS,
+						  f->key);
+		struct flow_dissector_key_eth_addrs *mask =
+			skb_flow_dissector_target(f->dissector,
+						  FLOW_DISSECTOR_KEY_ETH_ADDRS,
+						  f->mask);
+
+		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
+					     dmac_47_16),
+				mask->dst);
+		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+					     dmac_47_16),
+				key->dst);
+
+		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
+					     smac_47_16),
+				mask->src);
+		ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+					     smac_47_16),
+				key->src);
+	}
+
+	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
+		struct flow_dissector_key_vlan *key =
+			skb_flow_dissector_target(f->dissector,
+						  FLOW_DISSECTOR_KEY_VLAN,
+						  f->key);
+		struct flow_dissector_key_vlan *mask =
+			skb_flow_dissector_target(f->dissector,
+						  FLOW_DISSECTOR_KEY_VLAN,
+						  f->mask);
+		if (mask->vlan_id || mask->vlan_priority) {
+			MLX5_SET(fte_match_set_lyr_2_4, headers_c, vlan_tag, 1);
+			MLX5_SET(fte_match_set_lyr_2_4, headers_v, vlan_tag, 1);
+
+			MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id);
+			MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id);
+
+			MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority);
+			MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority);
+		}
+	}
+
+	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+		struct flow_dissector_key_ipv4_addrs *key =
+			skb_flow_dissector_target(f->dissector,
+						  FLOW_DISSECTOR_KEY_IPV4_ADDRS,
+						  f->key);
+		struct flow_dissector_key_ipv4_addrs *mask =
+			skb_flow_dissector_target(f->dissector,
+						  FLOW_DISSECTOR_KEY_IPV4_ADDRS,
+						  f->mask);
+
+		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
+				    src_ipv4_src_ipv6.ipv4_layout.ipv4),
+		       &mask->src, sizeof(mask->src));
+		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+				    src_ipv4_src_ipv6.ipv4_layout.ipv4),
+		       &key->src, sizeof(key->src));
+		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
+				    dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
+		       &mask->dst, sizeof(mask->dst));
+		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+				    dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
+		       &key->dst, sizeof(key->dst));
+
+		if (mask->src || mask->dst)
+			*min_inline = MLX5_INLINE_MODE_IP;
+	}
+
+	if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
+		struct flow_dissector_key_ipv6_addrs *key =
+			skb_flow_dissector_target(f->dissector,
+						  FLOW_DISSECTOR_KEY_IPV6_ADDRS,
+						  f->key);
+		struct flow_dissector_key_ipv6_addrs *mask =
+			skb_flow_dissector_target(f->dissector,
+						  FLOW_DISSECTOR_KEY_IPV6_ADDRS,
+						  f->mask);
+
+		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
+				    src_ipv4_src_ipv6.ipv6_layout.ipv6),
+		       &mask->src, sizeof(mask->src));
+		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+				    src_ipv4_src_ipv6.ipv6_layout.ipv6),
+		       &key->src, sizeof(key->src));
+
+		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
+				    dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+		       &mask->dst, sizeof(mask->dst));
+		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+				    dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+		       &key->dst, sizeof(key->dst));
+
+		if (ipv6_addr_type(&mask->src) != IPV6_ADDR_ANY ||
+		    ipv6_addr_type(&mask->dst) != IPV6_ADDR_ANY)
+			*min_inline = MLX5_INLINE_MODE_IP;
+	}
+
+	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
+		struct flow_dissector_key_ports *key =
+			skb_flow_dissector_target(f->dissector,
+						  FLOW_DISSECTOR_KEY_PORTS,
+						  f->key);
+		struct flow_dissector_key_ports *mask =
+			skb_flow_dissector_target(f->dissector,
+						  FLOW_DISSECTOR_KEY_PORTS,
+						  f->mask);
+		switch (ip_proto) {
+		case IPPROTO_TCP:
+			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
+				 tcp_sport, ntohs(mask->src));
+			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+				 tcp_sport, ntohs(key->src));
+
+			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
+				 tcp_dport, ntohs(mask->dst));
+			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+				 tcp_dport, ntohs(key->dst));
+			break;
+
+		case IPPROTO_UDP:
+			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
+				 udp_sport, ntohs(mask->src));
+			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+				 udp_sport, ntohs(key->src));
+
+			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
+				 udp_dport, ntohs(mask->dst));
+			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+				 udp_dport, ntohs(key->dst));
+			break;
+		default:
+			netdev_err(priv->netdev,
+				   "Only UDP and TCP transport are supported\n");
+			return -EINVAL;
+		}
+
+		if (mask->src || mask->dst)
+			*min_inline = MLX5_INLINE_MODE_TCP_UDP;
+	}
+
+	return 0;
+}
+
+static int parse_cls_flower(struct mlx5e_priv *priv,
+			    struct mlx5_flow_spec *spec,
+			    struct tc_cls_flower_offload *f)
+{
+	struct mlx5_core_dev *dev = priv->mdev;
+	struct mlx5_eswitch *esw = dev->priv.eswitch;
+	struct mlx5_eswitch_rep *rep = priv->ppriv;
+	u8 min_inline;
+	int err;
+
+	err = __parse_cls_flower(priv, spec, f, &min_inline);
+
+	if (!err && esw->mode == SRIOV_OFFLOADS &&
+	    rep->vport != FDB_UPLINK_VPORT) {
+		if (min_inline > esw->offloads.inline_mode) {
+			netdev_warn(priv->netdev,
+				    "Flow is not offloaded due to min inline setting, required %d actual %d\n",
+				    min_inline, esw->offloads.inline_mode);
+			return -EOPNOTSUPP;
+		}
+	}
+
+	return err;
+}
+
+static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
+				u32 *action, u32 *flow_tag)
+{
+	const struct tc_action *a;
+	LIST_HEAD(actions);
+
+	if (tc_no_actions(exts))
+		return -EINVAL;
+
+	*flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
+	*action = 0;
+
+	tcf_exts_to_list(exts, &actions);
+	list_for_each_entry(a, &actions, list) {
+		/* Only support a single action per rule */
+		if (*action)
+			return -EINVAL;
+
+		if (is_tcf_gact_shot(a)) {
+			*action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
+			if (MLX5_CAP_FLOWTABLE(priv->mdev,
+					       flow_table_properties_nic_receive.flow_counter))
+				*action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
+			continue;
+		}
+
+		if (is_tcf_skbedit_mark(a)) {
+			u32 mark = tcf_skbedit_mark(a);
+
+			if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
+				netdev_warn(priv->netdev, "Bad flow mark - only 16 bit is supported: 0x%x\n",
+					    mark);
+				return -EINVAL;
+			}
+
+			*flow_tag = mark;
+			*action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+			continue;
+		}
+
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static inline int cmp_encap_info(struct mlx5_encap_info *a,
+				 struct mlx5_encap_info *b)
+{
+	return memcmp(a, b, sizeof(*a));
+}
+
+static inline int hash_encap_info(struct mlx5_encap_info *info)
+{
+	return jhash(info, sizeof(*info), 0);
+}
+
+static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
+				   struct net_device *mirred_dev,
+				   struct net_device **out_dev,
+				   struct flowi4 *fl4,
+				   struct neighbour **out_n,
+				   __be32 *saddr,
+				   int *out_ttl)
+{
+	struct rtable *rt;
+	struct neighbour *n = NULL;
+	int ttl;
+
+#if IS_ENABLED(CONFIG_INET)
+	rt = ip_route_output_key(dev_net(mirred_dev), fl4);
+	if (IS_ERR(rt)) {
+		pr_warn("%s: no route to %pI4\n", __func__, &fl4->daddr);
+		return -EOPNOTSUPP;
+	}
+#else
+	return -EOPNOTSUPP;
+#endif
+
+	if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev)) {
+		pr_warn("%s: Can't offload the flow, netdevices aren't on the same HW e-switch\n",
+			__func__);
+		ip_rt_put(rt);
+		return -EOPNOTSUPP;
+	}
+
+	ttl = ip4_dst_hoplimit(&rt->dst);
+	n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
+	ip_rt_put(rt);
+	if (!n)
+		return -ENOMEM;
+
+	*out_n = n;
+	*saddr = fl4->saddr;
+	*out_ttl = ttl;
+	*out_dev = rt->dst.dev;
+
+	return 0;
+}
+
+static int gen_vxlan_header_ipv4(struct net_device *out_dev,
+				 char buf[],
+				 unsigned char h_dest[ETH_ALEN],
+				 int ttl,
+				 __be32 daddr,
+				 __be32 saddr,
+				 __be16 udp_dst_port,
+				 __be32 vx_vni)
+{
+	int encap_size = VXLAN_HLEN + sizeof(struct iphdr) + ETH_HLEN;
+	struct ethhdr *eth = (struct ethhdr *)buf;
+	struct iphdr  *ip = (struct iphdr *)((char *)eth + sizeof(struct ethhdr));
+	struct udphdr *udp = (struct udphdr *)((char *)ip + sizeof(struct iphdr));
+	struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
+
+	memset(buf, 0, encap_size);
+
+	ether_addr_copy(eth->h_dest, h_dest);
+	ether_addr_copy(eth->h_source, out_dev->dev_addr);
+	eth->h_proto = htons(ETH_P_IP);
+
+	ip->daddr = daddr;
+	ip->saddr = saddr;
+
+	ip->ttl = ttl;
+	ip->protocol = IPPROTO_UDP;
+	ip->version = 0x4;
+	ip->ihl = 0x5;
+
+	udp->dest = udp_dst_port;
+	vxh->vx_flags = VXLAN_HF_VNI;
+	vxh->vx_vni = vxlan_vni_field(vx_vni);
+
+	return encap_size;
+}
+
+static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
+					  struct net_device *mirred_dev,
+					  struct mlx5_encap_entry *e,
+					  struct net_device **out_dev)
+{
+	int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
+	struct flowi4 fl4 = {};
+	struct neighbour *n;
+	char *encap_header;
+	int encap_size;
+	__be32 saddr;
+	int ttl;
+	int err;
+
+	encap_header = kzalloc(max_encap_size, GFP_KERNEL);
+	if (!encap_header)
+		return -ENOMEM;
+
+	switch (e->tunnel_type) {
+	case MLX5_HEADER_TYPE_VXLAN:
+		fl4.flowi4_proto = IPPROTO_UDP;
+		fl4.fl4_dport = e->tun_info.tp_dst;
+		break;
+	default:
+		err = -EOPNOTSUPP;
+		goto out;
+	}
+	fl4.daddr = e->tun_info.daddr;
+
+	err = mlx5e_route_lookup_ipv4(priv, mirred_dev, out_dev,
+				      &fl4, &n, &saddr, &ttl);
+	if (err)
+		goto out;
+
+	e->n = n;
+	e->out_dev = *out_dev;
+
+	if (!(n->nud_state & NUD_VALID)) {
+		err = -ENOTSUPP;
+		goto out;
+	}
+
+	neigh_ha_snapshot(e->h_dest, n, *out_dev);
+
+	switch (e->tunnel_type) {
+	case MLX5_HEADER_TYPE_VXLAN:
+		encap_size = gen_vxlan_header_ipv4(*out_dev, encap_header,
+						   e->h_dest, ttl,
+						   e->tun_info.daddr,
+						   saddr, e->tun_info.tp_dst,
+						   e->tun_info.tun_id);
+		break;
+	default:
+		err = -EOPNOTSUPP;
+		goto out;
+	}
+
+	err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
+			       encap_size, encap_header, &e->encap_id);
+out:
+	kfree(encap_header);
+	return err;
+}
+
+static int mlx5e_attach_encap(struct mlx5e_priv *priv,
+			      struct ip_tunnel_info *tun_info,
+			      struct net_device *mirred_dev,
+			      struct mlx5_esw_flow_attr *attr)
+{
+	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+	unsigned short family = ip_tunnel_info_af(tun_info);
+	struct ip_tunnel_key *key = &tun_info->key;
+	struct mlx5_encap_info info;
+	struct mlx5_encap_entry *e;
+	struct net_device *out_dev;
+	uintptr_t hash_key;
+	bool found = false;
+	int tunnel_type;
+	int err;
+
+	/* udp dst port must be given */
+	if (!memchr_inv(&key->tp_dst, 0, sizeof(key->tp_dst)))
+		return -EOPNOTSUPP;
+
+	if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->tp_dst)) &&
+	    MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
+		info.tp_dst = key->tp_dst;
+		info.tun_id = tunnel_id_to_key32(key->tun_id);
+		tunnel_type = MLX5_HEADER_TYPE_VXLAN;
+	} else {
+		return -EOPNOTSUPP;
+	}
+
+	switch (family) {
+	case AF_INET:
+		info.daddr = key->u.ipv4.dst;
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	hash_key = hash_encap_info(&info);
+
+	hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
+				   encap_hlist, hash_key) {
+		if (!cmp_encap_info(&e->tun_info, &info)) {
+			found = true;
+			break;
+		}
+	}
+
+	if (found) {
+		attr->encap = e;
+		return 0;
+	}
+
+	e = kzalloc(sizeof(*e), GFP_KERNEL);
+	if (!e)
+		return -ENOMEM;
+
+	e->tun_info = info;
+	e->tunnel_type = tunnel_type;
+	INIT_LIST_HEAD(&e->flows);
+
+	err = mlx5e_create_encap_header_ipv4(priv, mirred_dev, e, &out_dev);
+	if (err)
+		goto out_err;
+
+	attr->encap = e;
+	hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
+
+	return err;
+
+out_err:
+	kfree(e);
+	return err;
+}
+
+static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
+				struct mlx5e_tc_flow *flow)
+{
+	struct mlx5_esw_flow_attr *attr = flow->attr;
+	struct ip_tunnel_info *info = NULL;
+	const struct tc_action *a;
+	LIST_HEAD(actions);
+	bool encap = false;
+	int err;
+
+	if (tc_no_actions(exts))
+		return -EINVAL;
+
+	memset(attr, 0, sizeof(*attr));
+	attr->in_rep = priv->ppriv;
+
+	tcf_exts_to_list(exts, &actions);
+	list_for_each_entry(a, &actions, list) {
+		if (is_tcf_gact_shot(a)) {
+			attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
+					MLX5_FLOW_CONTEXT_ACTION_COUNT;
+			continue;
+		}
+
+		if (is_tcf_mirred_egress_redirect(a)) {
+			int ifindex = tcf_mirred_ifindex(a);
+			struct net_device *out_dev;
+			struct mlx5e_priv *out_priv;
+
+			out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
+
+			if (switchdev_port_same_parent_id(priv->netdev,
+							  out_dev)) {
+				attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+					MLX5_FLOW_CONTEXT_ACTION_COUNT;
+				out_priv = netdev_priv(out_dev);
+				attr->out_rep = out_priv->ppriv;
+			} else if (encap) {
+				err = mlx5e_attach_encap(priv, info,
+							 out_dev, attr);
+				if (err)
+					return err;
+				list_add(&flow->encap, &attr->encap->flows);
+				attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP |
+					MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+					MLX5_FLOW_CONTEXT_ACTION_COUNT;
+				out_priv = netdev_priv(attr->encap->out_dev);
+				attr->out_rep = out_priv->ppriv;
+			} else {
+				pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
+				       priv->netdev->name, out_dev->name);
+				return -EINVAL;
+			}
+			continue;
+		}
+
+		if (is_tcf_tunnel_set(a)) {
+			info = tcf_tunnel_info(a);
+			if (info)
+				encap = true;
+			else
+				return -EOPNOTSUPP;
+			continue;
+		}
+
+		if (is_tcf_vlan(a)) {
+			if (tcf_vlan_action(a) == VLAN_F_POP) {
+				attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
+			} else if (tcf_vlan_action(a) == VLAN_F_PUSH) {
+				if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q))
+					return -EOPNOTSUPP;
+
+				attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
+				attr->vlan = tcf_vlan_push_vid(a);
+			}
+			continue;
+		}
+
+		if (is_tcf_tunnel_release(a)) {
+			attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
+			continue;
+		}
+
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
+			   struct tc_cls_flower_offload *f)
+{
+	struct mlx5e_tc_table *tc = &priv->fs.tc;
+	int err = 0;
+	bool fdb_flow = false;
+	u32 flow_tag, action;
+	struct mlx5e_tc_flow *flow;
+	struct mlx5_flow_spec *spec;
+	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+
+	if (esw && esw->mode == SRIOV_OFFLOADS)
+		fdb_flow = true;
+
+	if (fdb_flow)
+		flow = kzalloc(sizeof(*flow) +
+			       sizeof(struct mlx5_esw_flow_attr),
+			       GFP_KERNEL);
+	else
+		flow = kzalloc(sizeof(*flow), GFP_KERNEL);
+
+	spec = mlx5_vzalloc(sizeof(*spec));
+	if (!spec || !flow) {
+		err = -ENOMEM;
+		goto err_free;
+	}
+
+	flow->cookie = f->cookie;
+
+	err = parse_cls_flower(priv, spec, f);
+	if (err < 0)
+		goto err_free;
+
+	if (fdb_flow) {
+		flow->attr  = (struct mlx5_esw_flow_attr *)(flow + 1);
+		err = parse_tc_fdb_actions(priv, f->exts, flow);
+		if (err < 0)
+			goto err_free;
+		flow->rule = mlx5e_tc_add_fdb_flow(priv, spec, flow->attr);
+	} else {
+		err = parse_tc_nic_actions(priv, f->exts, &action, &flow_tag);
+		if (err < 0)
+			goto err_free;
+		flow->rule = mlx5e_tc_add_nic_flow(priv, spec, action, flow_tag);
+	}
+
+	if (IS_ERR(flow->rule)) {
+		err = PTR_ERR(flow->rule);
+		goto err_free;
+	}
+
+	err = rhashtable_insert_fast(&tc->ht, &flow->node,
+				     tc->ht_params);
+	if (err)
+		goto err_del_rule;
+
+	goto out;
+
+err_del_rule:
+	mlx5_del_flow_rules(flow->rule);
+
+err_free:
+	kfree(flow);
+out:
+	kvfree(spec);
+	return err;
+}
+
+int mlx5e_delete_flower(struct mlx5e_priv *priv,
+			struct tc_cls_flower_offload *f)
+{
+	struct mlx5e_tc_flow *flow;
+	struct mlx5e_tc_table *tc = &priv->fs.tc;
+
+	flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
+				      tc->ht_params);
+	if (!flow)
+		return -EINVAL;
+
+	rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params);
+
+	mlx5e_tc_del_flow(priv, flow);
+
+
+	kfree(flow);
+
+	return 0;
+}
+
+int mlx5e_stats_flower(struct mlx5e_priv *priv,
+		       struct tc_cls_flower_offload *f)
+{
+	struct mlx5e_tc_table *tc = &priv->fs.tc;
+	struct mlx5e_tc_flow *flow;
+	struct tc_action *a;
+	struct mlx5_fc *counter;
+	LIST_HEAD(actions);
+	u64 bytes;
+	u64 packets;
+	u64 lastuse;
+
+	flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
+				      tc->ht_params);
+	if (!flow)
+		return -EINVAL;
+
+	counter = mlx5_flow_rule_counter(flow->rule);
+	if (!counter)
+		return 0;
+
+	mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
+
+	tcf_exts_to_list(f->exts, &actions);
+	list_for_each_entry(a, &actions, list)
+		tcf_action_stats_update(a, bytes, packets, lastuse);
+
+	return 0;
+}
+
+static const struct rhashtable_params mlx5e_tc_flow_ht_params = {
+	.head_offset = offsetof(struct mlx5e_tc_flow, node),
+	.key_offset = offsetof(struct mlx5e_tc_flow, cookie),
+	.key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
+	.automatic_shrinking = true,
+};
+
+int mlx5e_tc_init(struct mlx5e_priv *priv)
+{
+	struct mlx5e_tc_table *tc = &priv->fs.tc;
+
+	tc->ht_params = mlx5e_tc_flow_ht_params;
+	return rhashtable_init(&tc->ht, &tc->ht_params);
+}
+
+static void _mlx5e_tc_del_flow(void *ptr, void *arg)
+{
+	struct mlx5e_tc_flow *flow = ptr;
+	struct mlx5e_priv *priv = arg;
+
+	mlx5e_tc_del_flow(priv, flow);
+	kfree(flow);
+}
+
+void mlx5e_tc_cleanup(struct mlx5e_priv *priv)
+{
+	struct mlx5e_tc_table *tc = &priv->fs.tc;
+
+	rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, priv);
+
+	if (!IS_ERR_OR_NULL(tc->t)) {
+		mlx5_destroy_flow_table(tc->t);
+		tc->t = NULL;
+	}
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/en/tc.h b/drivers/net/ethernet/mellanox/mlx5/en/tc.h
new file mode 100644
index 0000000..34bf903
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/en/tc.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __MLX5_EN_TC_H__
+#define __MLX5_EN_TC_H__
+
+#define MLX5E_TC_FLOW_ID_MASK 0x0000ffff
+
+int mlx5e_tc_init(struct mlx5e_priv *priv);
+void mlx5e_tc_cleanup(struct mlx5e_priv *priv);
+
+int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
+			   struct tc_cls_flower_offload *f);
+int mlx5e_delete_flower(struct mlx5e_priv *priv,
+			struct tc_cls_flower_offload *f);
+
+int mlx5e_stats_flower(struct mlx5e_priv *priv,
+		       struct tc_cls_flower_offload *f);
+
+static inline int mlx5e_tc_num_filters(struct mlx5e_priv *priv)
+{
+	return atomic_read(&priv->fs.tc.ht.nelems);
+}
+
+#endif /* __MLX5_EN_TC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/en/tx.c b/drivers/net/ethernet/mellanox/mlx5/en/tx.c
new file mode 100644
index 0000000..1aa99cd
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/en/tx.c
@@ -0,0 +1,559 @@
+/*
+ * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/tcp.h>
+#include <linux/if_vlan.h>
+#include "en/en.h"
+
+#define MLX5E_SQ_NOPS_ROOM  MLX5_SEND_WQE_MAX_WQEBBS
+#define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\
+			    MLX5E_SQ_NOPS_ROOM)
+
+void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw)
+{
+	struct mlx5_wq_cyc                *wq  = &sq->wq;
+
+	u16 pi = sq->pc & wq->sz_m1;
+	struct mlx5e_tx_wqe              *wqe  = mlx5_wq_cyc_get_wqe(wq, pi);
+
+	struct mlx5_wqe_ctrl_seg         *cseg = &wqe->ctrl;
+
+	memset(cseg, 0, sizeof(*cseg));
+
+	cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_NOP);
+	cseg->qpn_ds           = cpu_to_be32((sq->sqn << 8) | 0x01);
+
+	sq->pc++;
+	sq->stats.nop++;
+
+	if (notify_hw) {
+		cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
+		mlx5e_tx_notify_hw(sq, &wqe->ctrl, 0);
+	}
+}
+
+static inline void mlx5e_tx_dma_unmap(struct device *pdev,
+				      struct mlx5e_sq_dma *dma)
+{
+	switch (dma->type) {
+	case MLX5E_DMA_MAP_SINGLE:
+		dma_unmap_single(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
+		break;
+	case MLX5E_DMA_MAP_PAGE:
+		dma_unmap_page(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
+		break;
+	default:
+		WARN_ONCE(true, "mlx5e_tx_dma_unmap unknown DMA type!\n");
+	}
+}
+
+static inline void mlx5e_dma_push(struct mlx5e_sq *sq,
+				  dma_addr_t addr,
+				  u32 size,
+				  enum mlx5e_dma_map_type map_type)
+{
+	u32 i = sq->dma_fifo_pc & sq->dma_fifo_mask;
+
+	sq->db.txq.dma_fifo[i].addr = addr;
+	sq->db.txq.dma_fifo[i].size = size;
+	sq->db.txq.dma_fifo[i].type = map_type;
+	sq->dma_fifo_pc++;
+}
+
+static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_sq *sq, u32 i)
+{
+	return &sq->db.txq.dma_fifo[i & sq->dma_fifo_mask];
+}
+
+static void mlx5e_dma_unmap_wqe_err(struct mlx5e_sq *sq, u8 num_dma)
+{
+	int i;
+
+	for (i = 0; i < num_dma; i++) {
+		struct mlx5e_sq_dma *last_pushed_dma =
+			mlx5e_dma_get(sq, --sq->dma_fifo_pc);
+
+		mlx5e_tx_dma_unmap(sq->pdev, last_pushed_dma);
+	}
+}
+
+u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
+		       void *accel_priv, select_queue_fallback_t fallback)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+	int channel_ix = fallback(dev, skb);
+	int up = 0;
+
+	if (!netdev_get_num_tc(dev))
+		return channel_ix;
+
+	if (skb_vlan_tag_present(skb))
+		up = skb->vlan_tci >> VLAN_PRIO_SHIFT;
+
+	/* channel_ix can be larger than num_channels since
+	 * dev->num_real_tx_queues = num_channels * num_tc
+	 */
+	if (channel_ix >= priv->params.num_channels)
+		channel_ix = reciprocal_scale(channel_ix,
+					      priv->params.num_channels);
+
+	return priv->channeltc_to_txq_map[channel_ix][up];
+}
+
+static inline int mlx5e_skb_l2_header_offset(struct sk_buff *skb)
+{
+#define MLX5E_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
+
+	return max(skb_network_offset(skb), MLX5E_MIN_INLINE);
+}
+
+static inline int mlx5e_skb_l3_header_offset(struct sk_buff *skb)
+{
+	struct flow_keys keys;
+
+	if (skb_transport_header_was_set(skb))
+		return skb_transport_offset(skb);
+	else if (skb_flow_dissect_flow_keys(skb, &keys, 0))
+		return keys.control.thoff;
+	else
+		return mlx5e_skb_l2_header_offset(skb);
+}
+
+static inline unsigned int mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
+						 struct sk_buff *skb)
+{
+	int hlen;
+
+	switch (mode) {
+	case MLX5_INLINE_MODE_TCP_UDP:
+		hlen = eth_get_headlen(skb->data, skb_headlen(skb));
+		if (hlen == ETH_HLEN && !skb_vlan_tag_present(skb))
+			hlen += VLAN_HLEN;
+		return hlen;
+	case MLX5_INLINE_MODE_IP:
+		/* When transport header is set to zero, it means no transport
+		 * header. When transport header is set to 0xff's, it means
+		 * transport header wasn't set.
+		 */
+		if (skb_transport_offset(skb))
+			return mlx5e_skb_l3_header_offset(skb);
+		/* fall through */
+	case MLX5_INLINE_MODE_L2:
+	default:
+		return mlx5e_skb_l2_header_offset(skb);
+	}
+}
+
+static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
+					    struct sk_buff *skb, bool bf)
+{
+	/* Some NIC TX decisions, e.g loopback, are based on the packet
+	 * headers and occur before the data gather.
+	 * Therefore these headers must be copied into the WQE
+	 */
+	if (bf) {
+		u16 ihs = skb_headlen(skb);
+
+		if (skb_vlan_tag_present(skb))
+			ihs += VLAN_HLEN;
+
+		if (ihs <= sq->max_inline)
+			return skb_headlen(skb);
+	}
+	return mlx5e_calc_min_inline(sq->min_inline_mode, skb);
+}
+
+static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data,
+					    unsigned int *skb_len,
+					    unsigned int len)
+{
+	*skb_len -= len;
+	*skb_data += len;
+}
+
+static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs,
+				     unsigned char **skb_data,
+				     unsigned int *skb_len)
+{
+	struct vlan_ethhdr *vhdr = (struct vlan_ethhdr *)start;
+	int cpy1_sz = 2 * ETH_ALEN;
+	int cpy2_sz = ihs - cpy1_sz;
+
+	memcpy(vhdr, *skb_data, cpy1_sz);
+	mlx5e_tx_skb_pull_inline(skb_data, skb_len, cpy1_sz);
+	vhdr->h_vlan_proto = skb->vlan_proto;
+	vhdr->h_vlan_TCI = cpu_to_be16(skb_vlan_tag_get(skb));
+	memcpy(&vhdr->h_vlan_encapsulated_proto, *skb_data, cpy2_sz);
+	mlx5e_tx_skb_pull_inline(skb_data, skb_len, cpy2_sz);
+}
+
+static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
+{
+	struct mlx5_wq_cyc       *wq   = &sq->wq;
+
+	u16 pi = sq->pc & wq->sz_m1;
+	struct mlx5e_tx_wqe      *wqe  = mlx5_wq_cyc_get_wqe(wq, pi);
+	struct mlx5e_tx_wqe_info *wi   = &sq->db.txq.wqe_info[pi];
+
+	struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
+	struct mlx5_wqe_eth_seg  *eseg = &wqe->eth;
+	struct mlx5_wqe_data_seg *dseg;
+
+	unsigned char *skb_data = skb->data;
+	unsigned int skb_len = skb->len;
+	u8  opcode = MLX5_OPCODE_SEND;
+	dma_addr_t dma_addr = 0;
+	unsigned int num_bytes;
+	bool bf = false;
+	u16 headlen;
+	u16 ds_cnt;
+	u16 ihs;
+	int i;
+
+	memset(wqe, 0, sizeof(*wqe));
+
+	if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
+		eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
+		if (skb->encapsulation) {
+			eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM |
+					  MLX5_ETH_WQE_L4_INNER_CSUM;
+			sq->stats.csum_partial_inner++;
+		} else {
+			eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
+		}
+	} else
+		sq->stats.csum_none++;
+
+	if (sq->cc != sq->prev_cc) {
+		sq->prev_cc = sq->cc;
+		sq->bf_budget = (sq->cc == sq->pc) ? MLX5E_SQ_BF_BUDGET : 0;
+	}
+
+	if (skb_is_gso(skb)) {
+		eseg->mss    = cpu_to_be16(skb_shinfo(skb)->gso_size);
+		opcode       = MLX5_OPCODE_LSO;
+
+		if (skb->encapsulation) {
+			ihs = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
+			sq->stats.tso_inner_packets++;
+			sq->stats.tso_inner_bytes += skb->len - ihs;
+		} else {
+			ihs = skb_transport_offset(skb) + tcp_hdrlen(skb);
+			sq->stats.tso_packets++;
+			sq->stats.tso_bytes += skb->len - ihs;
+		}
+
+		num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
+	} else {
+		bf = sq->bf_budget &&
+		     !skb->xmit_more &&
+		     !skb_shinfo(skb)->nr_frags;
+		ihs = mlx5e_get_inline_hdr_size(sq, skb, bf);
+		num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
+	}
+
+	wi->num_bytes = num_bytes;
+
+	if (skb_vlan_tag_present(skb)) {
+		mlx5e_insert_vlan(eseg->inline_hdr_start, skb, ihs, &skb_data,
+				  &skb_len);
+		ihs += VLAN_HLEN;
+	} else {
+		memcpy(eseg->inline_hdr_start, skb_data, ihs);
+		mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs);
+	}
+
+	eseg->inline_hdr_sz = cpu_to_be16(ihs);
+
+	ds_cnt  = sizeof(*wqe) / MLX5_SEND_WQE_DS;
+	ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr_start),
+			       MLX5_SEND_WQE_DS);
+	dseg    = (struct mlx5_wqe_data_seg *)cseg + ds_cnt;
+
+	wi->num_dma = 0;
+
+	headlen = skb_len - skb->data_len;
+	if (headlen) {
+		dma_addr = dma_map_single(sq->pdev, skb_data, headlen,
+					  DMA_TO_DEVICE);
+		if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
+			goto dma_unmap_wqe_err;
+
+		dseg->addr       = cpu_to_be64(dma_addr);
+		dseg->lkey       = sq->mkey_be;
+		dseg->byte_count = cpu_to_be32(headlen);
+
+		mlx5e_dma_push(sq, dma_addr, headlen, MLX5E_DMA_MAP_SINGLE);
+		wi->num_dma++;
+
+		dseg++;
+	}
+
+	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+		struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+		int fsz = skb_frag_size(frag);
+
+		dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
+					    DMA_TO_DEVICE);
+		if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
+			goto dma_unmap_wqe_err;
+
+		dseg->addr       = cpu_to_be64(dma_addr);
+		dseg->lkey       = sq->mkey_be;
+		dseg->byte_count = cpu_to_be32(fsz);
+
+		mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
+		wi->num_dma++;
+
+		dseg++;
+	}
+
+	ds_cnt += wi->num_dma;
+
+	cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode);
+	cseg->qpn_ds           = cpu_to_be32((sq->sqn << 8) | ds_cnt);
+
+	sq->db.txq.skb[pi] = skb;
+
+	wi->num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
+	sq->pc += wi->num_wqebbs;
+
+	netdev_tx_sent_queue(sq->txq, wi->num_bytes);
+
+	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
+		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
+	if (unlikely(!mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM))) {
+		netif_tx_stop_queue(sq->txq);
+		sq->stats.stopped++;
+	}
+
+	sq->stats.xmit_more += skb->xmit_more;
+	if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) {
+		int bf_sz = 0;
+
+		if (bf && test_bit(MLX5E_SQ_STATE_BF_ENABLE, &sq->state))
+			bf_sz = wi->num_wqebbs << 3;
+
+		cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
+		mlx5e_tx_notify_hw(sq, &wqe->ctrl, bf_sz);
+	}
+
+	/* fill sq edge with nops to avoid wqe wrap around */
+	while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) {
+		sq->db.txq.skb[pi] = NULL;
+		mlx5e_send_nop(sq, false);
+	}
+
+	if (bf)
+		sq->bf_budget--;
+
+	sq->stats.packets++;
+	sq->stats.bytes += num_bytes;
+	return NETDEV_TX_OK;
+
+dma_unmap_wqe_err:
+	sq->stats.dropped++;
+	mlx5e_dma_unmap_wqe_err(sq, wi->num_dma);
+
+	dev_kfree_skb_any(skb);
+
+	return NETDEV_TX_OK;
+}
+
+netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct mlx5e_priv *priv = netdev_priv(dev);
+	struct mlx5e_sq *sq = priv->txq_to_sq_map[skb_get_queue_mapping(skb)];
+
+	return mlx5e_sq_xmit(sq, skb);
+}
+
+bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
+{
+	struct mlx5e_sq *sq;
+	u32 dma_fifo_cc;
+	u32 nbytes;
+	u16 npkts;
+	u16 sqcc;
+	int i;
+
+	sq = container_of(cq, struct mlx5e_sq, cq);
+
+	if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
+		return false;
+
+	npkts = 0;
+	nbytes = 0;
+
+	/* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
+	 * otherwise a cq overrun may occur
+	 */
+	sqcc = sq->cc;
+
+	/* avoid dirtying sq cache line every cqe */
+	dma_fifo_cc = sq->dma_fifo_cc;
+
+	for (i = 0; i < MLX5E_TX_CQ_POLL_BUDGET; i++) {
+		struct mlx5_cqe64 *cqe;
+		u16 wqe_counter;
+		bool last_wqe;
+
+		cqe = mlx5e_get_cqe(cq);
+		if (!cqe)
+			break;
+
+		mlx5_cqwq_pop(&cq->wq);
+
+		wqe_counter = be16_to_cpu(cqe->wqe_counter);
+
+		do {
+			struct mlx5e_tx_wqe_info *wi;
+			struct sk_buff *skb;
+			u16 ci;
+			int j;
+
+			last_wqe = (sqcc == wqe_counter);
+
+			ci = sqcc & sq->wq.sz_m1;
+			skb = sq->db.txq.skb[ci];
+			wi = &sq->db.txq.wqe_info[ci];
+
+			if (unlikely(!skb)) { /* nop */
+				sqcc++;
+				continue;
+			}
+
+			if (unlikely(skb_shinfo(skb)->tx_flags &
+				     SKBTX_HW_TSTAMP)) {
+				struct skb_shared_hwtstamps hwts = {};
+
+				mlx5e_fill_hwstamp(sq->tstamp,
+						   get_cqe_ts(cqe), &hwts);
+				skb_tstamp_tx(skb, &hwts);
+			}
+
+			for (j = 0; j < wi->num_dma; j++) {
+				struct mlx5e_sq_dma *dma =
+					mlx5e_dma_get(sq, dma_fifo_cc++);
+
+				mlx5e_tx_dma_unmap(sq->pdev, dma);
+			}
+
+			npkts++;
+			nbytes += wi->num_bytes;
+			sqcc += wi->num_wqebbs;
+			napi_consume_skb(skb, napi_budget);
+		} while (!last_wqe);
+	}
+
+	mlx5_cqwq_update_db_record(&cq->wq);
+
+	/* ensure cq space is freed before enabling more cqes */
+	wmb();
+
+	sq->dma_fifo_cc = dma_fifo_cc;
+	sq->cc = sqcc;
+
+	netdev_tx_completed_queue(sq->txq, npkts, nbytes);
+
+	if (netif_tx_queue_stopped(sq->txq) &&
+	    mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM)) {
+		netif_tx_wake_queue(sq->txq);
+		sq->stats.wake++;
+	}
+
+	return (i == MLX5E_TX_CQ_POLL_BUDGET);
+}
+
+static void mlx5e_free_txq_sq_descs(struct mlx5e_sq *sq)
+{
+	struct mlx5e_tx_wqe_info *wi;
+	struct sk_buff *skb;
+	u16 ci;
+	int i;
+
+	while (sq->cc != sq->pc) {
+		ci = sq->cc & sq->wq.sz_m1;
+		skb = sq->db.txq.skb[ci];
+		wi = &sq->db.txq.wqe_info[ci];
+
+		if (!skb) { /* nop */
+			sq->cc++;
+			continue;
+		}
+
+		for (i = 0; i < wi->num_dma; i++) {
+			struct mlx5e_sq_dma *dma =
+				mlx5e_dma_get(sq, sq->dma_fifo_cc++);
+
+			mlx5e_tx_dma_unmap(sq->pdev, dma);
+		}
+
+		dev_kfree_skb_any(skb);
+		sq->cc += wi->num_wqebbs;
+	}
+}
+
+static void mlx5e_free_xdp_sq_descs(struct mlx5e_sq *sq)
+{
+	struct mlx5e_sq_wqe_info *wi;
+	struct mlx5e_dma_info *di;
+	u16 ci;
+
+	while (sq->cc != sq->pc) {
+		ci = sq->cc & sq->wq.sz_m1;
+		di = &sq->db.xdp.di[ci];
+		wi = &sq->db.xdp.wqe_info[ci];
+
+		if (wi->opcode == MLX5_OPCODE_NOP) {
+			sq->cc++;
+			continue;
+		}
+
+		sq->cc += wi->num_wqebbs;
+
+		mlx5e_page_release(&sq->channel->rq, di, false);
+	}
+}
+
+void mlx5e_free_sq_descs(struct mlx5e_sq *sq)
+{
+	switch (sq->type) {
+	case MLX5E_SQ_TXQ:
+		mlx5e_free_txq_sq_descs(sq);
+		break;
+	case MLX5E_SQ_XDP:
+		mlx5e_free_xdp_sq_descs(sq);
+		break;
+	}
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/en/txrx.c b/drivers/net/ethernet/mellanox/mlx5/en/txrx.c
new file mode 100644
index 0000000..8bd4649
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/en/txrx.c
@@ -0,0 +1,232 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "en/en.h"
+
+struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq)
+{
+	struct mlx5_cqwq *wq = &cq->wq;
+	u32 ci = mlx5_cqwq_get_ci(wq);
+	struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci);
+	int cqe_ownership_bit = cqe->op_own & MLX5_CQE_OWNER_MASK;
+	int sw_ownership_val = mlx5_cqwq_get_wrap_cnt(wq) & 1;
+
+	if (cqe_ownership_bit != sw_ownership_val)
+		return NULL;
+
+	/* ensure cqe content is read after cqe ownership bit */
+	rmb();
+
+	return cqe;
+}
+
+static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
+{
+	struct mlx5e_sq *sq = container_of(cq, struct mlx5e_sq, cq);
+	struct mlx5_wq_cyc *wq;
+	struct mlx5_cqe64 *cqe;
+	u16 sqcc;
+
+	if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
+		return;
+
+	cqe = mlx5e_get_cqe(cq);
+	if (likely(!cqe))
+		return;
+
+	wq = &sq->wq;
+
+	/* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
+	 * otherwise a cq overrun may occur
+	 */
+	sqcc = sq->cc;
+
+	do {
+		u16 ci = be16_to_cpu(cqe->wqe_counter) & wq->sz_m1;
+		struct mlx5e_sq_wqe_info *icowi = &sq->db.ico_wqe[ci];
+
+		mlx5_cqwq_pop(&cq->wq);
+		sqcc += icowi->num_wqebbs;
+
+		if (unlikely((cqe->op_own >> 4) != MLX5_CQE_REQ)) {
+			WARN_ONCE(true, "mlx5e: Bad OP in ICOSQ CQE: 0x%x\n",
+				  cqe->op_own);
+			break;
+		}
+
+		switch (icowi->opcode) {
+		case MLX5_OPCODE_NOP:
+			break;
+		case MLX5_OPCODE_UMR:
+			mlx5e_post_rx_mpwqe(&sq->channel->rq);
+			break;
+		default:
+			WARN_ONCE(true,
+				  "mlx5e: Bad OPCODE in ICOSQ WQE info: 0x%x\n",
+				  icowi->opcode);
+		}
+
+	} while ((cqe = mlx5e_get_cqe(cq)));
+
+	mlx5_cqwq_update_db_record(&cq->wq);
+
+	/* ensure cq space is freed before enabling more cqes */
+	wmb();
+
+	sq->cc = sqcc;
+}
+
+static inline bool mlx5e_poll_xdp_tx_cq(struct mlx5e_cq *cq)
+{
+	struct mlx5e_sq *sq;
+	u16 sqcc;
+	int i;
+
+	sq = container_of(cq, struct mlx5e_sq, cq);
+
+	if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
+		return false;
+
+	/* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
+	 * otherwise a cq overrun may occur
+	 */
+	sqcc = sq->cc;
+
+	for (i = 0; i < MLX5E_TX_CQ_POLL_BUDGET; i++) {
+		struct mlx5_cqe64 *cqe;
+		u16 wqe_counter;
+		bool last_wqe;
+
+		cqe = mlx5e_get_cqe(cq);
+		if (!cqe)
+			break;
+
+		mlx5_cqwq_pop(&cq->wq);
+
+		wqe_counter = be16_to_cpu(cqe->wqe_counter);
+
+		do {
+			struct mlx5e_sq_wqe_info *wi;
+			struct mlx5e_dma_info *di;
+			u16 ci;
+
+			last_wqe = (sqcc == wqe_counter);
+
+			ci = sqcc & sq->wq.sz_m1;
+			di = &sq->db.xdp.di[ci];
+			wi = &sq->db.xdp.wqe_info[ci];
+
+			if (unlikely(wi->opcode == MLX5_OPCODE_NOP)) {
+				sqcc++;
+				continue;
+			}
+
+			sqcc += wi->num_wqebbs;
+			/* Recycle RX page */
+			mlx5e_page_release(&sq->channel->rq, di, true);
+		} while (!last_wqe);
+	}
+
+	mlx5_cqwq_update_db_record(&cq->wq);
+
+	/* ensure cq space is freed before enabling more cqes */
+	wmb();
+
+	sq->cc = sqcc;
+	return (i == MLX5E_TX_CQ_POLL_BUDGET);
+}
+
+int mlx5e_napi_poll(struct napi_struct *napi, int budget)
+{
+	struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel,
+					       napi);
+	bool busy = false;
+	int work_done;
+	int i;
+
+	clear_bit(MLX5E_CHANNEL_NAPI_SCHED, &c->flags);
+
+	for (i = 0; i < c->num_tc; i++)
+		busy |= mlx5e_poll_tx_cq(&c->sq[i].cq, budget);
+
+	work_done = mlx5e_poll_rx_cq(&c->rq.cq, budget);
+	busy |= work_done == budget;
+
+	if (c->xdp)
+		busy |= mlx5e_poll_xdp_tx_cq(&c->xdp_sq.cq);
+
+	mlx5e_poll_ico_cq(&c->icosq.cq);
+
+	busy |= mlx5e_post_rx_wqes(&c->rq);
+
+	if (busy)
+		return budget;
+
+	napi_complete_done(napi, work_done);
+
+	/* avoid losing completion event during/after polling cqs */
+	if (test_bit(MLX5E_CHANNEL_NAPI_SCHED, &c->flags)) {
+		napi_schedule(napi);
+		return work_done;
+	}
+
+	for (i = 0; i < c->num_tc; i++)
+		mlx5e_cq_arm(&c->sq[i].cq);
+
+	if (test_bit(MLX5E_RQ_STATE_AM, &c->rq.state))
+		mlx5e_rx_am(&c->rq);
+
+	mlx5e_cq_arm(&c->rq.cq);
+	mlx5e_cq_arm(&c->icosq.cq);
+
+	return work_done;
+}
+
+void mlx5e_completion_event(struct mlx5_core_cq *mcq)
+{
+	struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);
+
+	cq->event_ctr++;
+	set_bit(MLX5E_CHANNEL_NAPI_SCHED, &cq->channel->flags);
+	napi_schedule(cq->napi);
+}
+
+void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event)
+{
+	struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);
+	struct mlx5e_channel *c = cq->channel;
+	struct mlx5e_priv *priv = c->priv;
+	struct net_device *netdev = priv->netdev;
+
+	netdev_err(netdev, "%s: cqn=0x%.6x event=0x%.2x\n",
+		   __func__, mcq->cqn, event);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/en/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/en/vxlan.c
new file mode 100644
index 0000000..575105a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/en/vxlan.c
@@ -0,0 +1,182 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies, Ltd.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include "lib/driver.h"
+#include "mlx5.h"
+#include "en/vxlan.h"
+
+void mlx5e_vxlan_init(struct mlx5e_priv *priv)
+{
+	struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
+
+	spin_lock_init(&vxlan_db->lock);
+	INIT_RADIX_TREE(&vxlan_db->tree, GFP_ATOMIC);
+}
+
+static int mlx5e_vxlan_core_add_port_cmd(struct mlx5_core_dev *mdev, u16 port)
+{
+	u32 in[MLX5_ST_SZ_DW(add_vxlan_udp_dport_in)]   = {0};
+	u32 out[MLX5_ST_SZ_DW(add_vxlan_udp_dport_out)] = {0};
+
+	MLX5_SET(add_vxlan_udp_dport_in, in, opcode,
+		 MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT);
+	MLX5_SET(add_vxlan_udp_dport_in, in, vxlan_udp_port, port);
+	return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+}
+
+static int mlx5e_vxlan_core_del_port_cmd(struct mlx5_core_dev *mdev, u16 port)
+{
+	u32 in[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_in)]   = {0};
+	u32 out[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_out)] = {0};
+
+	MLX5_SET(delete_vxlan_udp_dport_in, in, opcode,
+		 MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT);
+	MLX5_SET(delete_vxlan_udp_dport_in, in, vxlan_udp_port, port);
+	return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+}
+
+struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port)
+{
+	struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
+	struct mlx5e_vxlan *vxlan;
+
+	spin_lock(&vxlan_db->lock);
+	vxlan = radix_tree_lookup(&vxlan_db->tree, port);
+	spin_unlock(&vxlan_db->lock);
+
+	return vxlan;
+}
+
+static void mlx5e_vxlan_add_port(struct work_struct *work)
+{
+	struct mlx5e_vxlan_work *vxlan_work =
+		container_of(work, struct mlx5e_vxlan_work, work);
+	struct mlx5e_priv *priv = vxlan_work->priv;
+	struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
+	u16 port = vxlan_work->port;
+	struct mlx5e_vxlan *vxlan;
+	int err;
+
+	if (mlx5e_vxlan_lookup_port(priv, port))
+		goto free_work;
+
+	if (mlx5e_vxlan_core_add_port_cmd(priv->mdev, port))
+		goto free_work;
+
+	vxlan = kzalloc(sizeof(*vxlan), GFP_KERNEL);
+	if (!vxlan)
+		goto err_delete_port;
+
+	vxlan->udp_port = port;
+
+	spin_lock_irq(&vxlan_db->lock);
+	err = radix_tree_insert(&vxlan_db->tree, vxlan->udp_port, vxlan);
+	spin_unlock_irq(&vxlan_db->lock);
+	if (err)
+		goto err_free;
+
+	goto free_work;
+
+err_free:
+	kfree(vxlan);
+err_delete_port:
+	mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
+free_work:
+	kfree(vxlan_work);
+}
+
+static void __mlx5e_vxlan_core_del_port(struct mlx5e_priv *priv, u16 port)
+{
+	struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
+	struct mlx5e_vxlan *vxlan;
+
+	spin_lock_irq(&vxlan_db->lock);
+	vxlan = radix_tree_delete(&vxlan_db->tree, port);
+	spin_unlock_irq(&vxlan_db->lock);
+
+	if (!vxlan)
+		return;
+
+	mlx5e_vxlan_core_del_port_cmd(priv->mdev, vxlan->udp_port);
+
+	kfree(vxlan);
+}
+
+static void mlx5e_vxlan_del_port(struct work_struct *work)
+{
+	struct mlx5e_vxlan_work *vxlan_work =
+		container_of(work, struct mlx5e_vxlan_work, work);
+	struct mlx5e_priv *priv = vxlan_work->priv;
+	u16 port = vxlan_work->port;
+
+	__mlx5e_vxlan_core_del_port(priv, port);
+
+	kfree(vxlan_work);
+}
+
+void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, sa_family_t sa_family,
+			    u16 port, int add)
+{
+	struct mlx5e_vxlan_work *vxlan_work;
+
+	vxlan_work = kmalloc(sizeof(*vxlan_work), GFP_ATOMIC);
+	if (!vxlan_work)
+		return;
+
+	if (add)
+		INIT_WORK(&vxlan_work->work, mlx5e_vxlan_add_port);
+	else
+		INIT_WORK(&vxlan_work->work, mlx5e_vxlan_del_port);
+
+	vxlan_work->priv = priv;
+	vxlan_work->port = port;
+	vxlan_work->sa_family = sa_family;
+	queue_work(priv->wq, &vxlan_work->work);
+}
+
+void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv)
+{
+	struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
+	struct mlx5e_vxlan *vxlan;
+	unsigned int port = 0;
+
+	spin_lock_irq(&vxlan_db->lock);
+	while (radix_tree_gang_lookup(&vxlan_db->tree, (void **)&vxlan, port, 1)) {
+		port = vxlan->udp_port;
+		spin_unlock_irq(&vxlan_db->lock);
+		__mlx5e_vxlan_core_del_port(priv, (u16)port);
+		spin_lock_irq(&vxlan_db->lock);
+	}
+	spin_unlock_irq(&vxlan_db->lock);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/en/vxlan.h b/drivers/net/ethernet/mellanox/mlx5/en/vxlan.h
new file mode 100644
index 0000000..579827f
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/en/vxlan.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies, Ltd.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __MLX5_VXLAN_H__
+#define __MLX5_VXLAN_H__
+
+#include "lib/driver.h"
+#include "en/en.h"
+
+struct mlx5e_vxlan {
+	u16 udp_port;
+};
+
+struct mlx5e_vxlan_work {
+	struct work_struct	work;
+	struct mlx5e_priv	*priv;
+	sa_family_t		sa_family;
+	u16			port;
+};
+
+static inline bool mlx5e_vxlan_allowed(struct mlx5_core_dev *mdev)
+{
+	return (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) &&
+		mlx5_core_is_pf(mdev));
+}
+
+void mlx5e_vxlan_init(struct mlx5e_priv *priv);
+void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv);
+
+void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, sa_family_t sa_family,
+			    u16 port, int add);
+struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port);
+
+#endif /* __MLX5_VXLAN_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/en/wq.c b/drivers/net/ethernet/mellanox/mlx5/en/wq.c
new file mode 100644
index 0000000..4efc67b
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/en/wq.c
@@ -0,0 +1,196 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "lib/driver.h"
+#include "en/wq.h"
+#include "mlx5.h"
+
+u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq)
+{
+	return (u32)wq->sz_m1 + 1;
+}
+
+u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq)
+{
+	return wq->sz_m1 + 1;
+}
+
+u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq)
+{
+	return (u32)wq->sz_m1 + 1;
+}
+
+static u32 mlx5_wq_cyc_get_byte_size(struct mlx5_wq_cyc *wq)
+{
+	return mlx5_wq_cyc_get_size(wq) << wq->log_stride;
+}
+
+static u32 mlx5_cqwq_get_byte_size(struct mlx5_cqwq *wq)
+{
+	return mlx5_cqwq_get_size(wq) << wq->log_stride;
+}
+
+static u32 mlx5_wq_ll_get_byte_size(struct mlx5_wq_ll *wq)
+{
+	return mlx5_wq_ll_get_size(wq) << wq->log_stride;
+}
+
+int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+		       void *wqc, struct mlx5_wq_cyc *wq,
+		       struct mlx5_wq_ctrl *wq_ctrl)
+{
+	int err;
+
+	wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride);
+	wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1;
+
+	err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
+	if (err) {
+		mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
+		return err;
+	}
+
+	err = mlx5_buf_alloc_node(mdev, mlx5_wq_cyc_get_byte_size(wq),
+				  &wq_ctrl->buf, param->buf_numa_node);
+	if (err) {
+		mlx5_core_warn(mdev, "mlx5_buf_alloc_node() failed, %d\n", err);
+		goto err_db_free;
+	}
+
+	wq->buf = wq_ctrl->buf.direct.buf;
+	wq->db  = wq_ctrl->db.db;
+
+	wq_ctrl->mdev = mdev;
+
+	return 0;
+
+err_db_free:
+	mlx5_db_free(mdev, &wq_ctrl->db);
+
+	return err;
+}
+
+int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+		     void *cqc, struct mlx5_cqwq *wq,
+		     struct mlx5_frag_wq_ctrl *wq_ctrl)
+{
+	int err;
+
+	wq->log_stride	= 6 + MLX5_GET(cqc, cqc, cqe_sz);
+	wq->log_sz	= MLX5_GET(cqc, cqc, log_cq_size);
+	wq->sz_m1	= (1 << wq->log_sz) - 1;
+	wq->log_frag_strides = PAGE_SHIFT - wq->log_stride;
+	wq->frag_sz_m1	= (1 << wq->log_frag_strides) - 1;
+
+	err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
+	if (err) {
+		mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
+		return err;
+	}
+
+	err = mlx5_frag_buf_alloc_node(mdev, mlx5_cqwq_get_byte_size(wq),
+				       &wq_ctrl->frag_buf,
+				       param->buf_numa_node);
+	if (err) {
+		mlx5_core_warn(mdev, "mlx5_frag_buf_alloc_node() failed, %d\n",
+			       err);
+		goto err_db_free;
+	}
+
+	wq->frag_buf = wq_ctrl->frag_buf;
+	wq->db  = wq_ctrl->db.db;
+
+	wq_ctrl->mdev = mdev;
+
+	return 0;
+
+err_db_free:
+	mlx5_db_free(mdev, &wq_ctrl->db);
+
+	return err;
+}
+
+int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+		      void *wqc, struct mlx5_wq_ll *wq,
+		      struct mlx5_wq_ctrl *wq_ctrl)
+{
+	struct mlx5_wqe_srq_next_seg *next_seg;
+	int err;
+	int i;
+
+	wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride);
+	wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1;
+
+	err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
+	if (err) {
+		mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
+		return err;
+	}
+
+	err = mlx5_buf_alloc_node(mdev, mlx5_wq_ll_get_byte_size(wq),
+				  &wq_ctrl->buf, param->buf_numa_node);
+	if (err) {
+		mlx5_core_warn(mdev, "mlx5_buf_alloc_node() failed, %d\n", err);
+		goto err_db_free;
+	}
+
+	wq->buf = wq_ctrl->buf.direct.buf;
+	wq->db  = wq_ctrl->db.db;
+
+	for (i = 0; i < wq->sz_m1; i++) {
+		next_seg = mlx5_wq_ll_get_wqe(wq, i);
+		next_seg->next_wqe_index = cpu_to_be16(i + 1);
+	}
+	next_seg = mlx5_wq_ll_get_wqe(wq, i);
+	wq->tail_next = &next_seg->next_wqe_index;
+
+	wq_ctrl->mdev = mdev;
+
+	return 0;
+
+err_db_free:
+	mlx5_db_free(mdev, &wq_ctrl->db);
+
+	return err;
+}
+
+void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl)
+{
+	mlx5_buf_free(wq_ctrl->mdev, &wq_ctrl->buf);
+	mlx5_db_free(wq_ctrl->mdev, &wq_ctrl->db);
+}
+
+void mlx5_cqwq_destroy(struct mlx5_frag_wq_ctrl *wq_ctrl)
+{
+	mlx5_frag_buf_free(wq_ctrl->mdev, &wq_ctrl->frag_buf);
+	mlx5_db_free(wq_ctrl->mdev, &wq_ctrl->db);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/en/wq.h b/drivers/net/ethernet/mellanox/mlx5/en/wq.h
new file mode 100644
index 0000000..7e0e468
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/en/wq.h
@@ -0,0 +1,184 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __MLX5_WQ_H__
+#define __MLX5_WQ_H__
+
+#include "lib/mlx5_ifc.h"
+
+struct mlx5_wq_param {
+	int		linear;
+	int		buf_numa_node;
+	int		db_numa_node;
+};
+
+struct mlx5_wq_ctrl {
+	struct mlx5_core_dev	*mdev;
+	struct mlx5_buf		buf;
+	struct mlx5_db		db;
+};
+
+struct mlx5_frag_wq_ctrl {
+	struct mlx5_core_dev	*mdev;
+	struct mlx5_frag_buf	frag_buf;
+	struct mlx5_db		db;
+};
+
+struct mlx5_wq_cyc {
+	void			*buf;
+	__be32			*db;
+	u16			sz_m1;
+	u8			log_stride;
+};
+
+struct mlx5_cqwq {
+	struct mlx5_frag_buf	frag_buf;
+	__be32			*db;
+	u32			sz_m1;
+	u32			frag_sz_m1;
+	u32			cc; /* consumer counter */
+	u8			log_sz;
+	u8			log_stride;
+	u8			log_frag_strides;
+};
+
+struct mlx5_wq_ll {
+	void			*buf;
+	__be32			*db;
+	__be16			*tail_next;
+	u16			sz_m1;
+	u16			head;
+	u16			wqe_ctr;
+	u16			cur_sz;
+	u8			log_stride;
+};
+
+int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+		       void *wqc, struct mlx5_wq_cyc *wq,
+		       struct mlx5_wq_ctrl *wq_ctrl);
+u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq);
+
+int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+		     void *cqc, struct mlx5_cqwq *wq,
+		     struct mlx5_frag_wq_ctrl *wq_ctrl);
+u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq);
+
+int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+		      void *wqc, struct mlx5_wq_ll *wq,
+		      struct mlx5_wq_ctrl *wq_ctrl);
+u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq);
+
+void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl);
+void mlx5_cqwq_destroy(struct mlx5_frag_wq_ctrl *wq_ctrl);
+
+static inline u16 mlx5_wq_cyc_ctr2ix(struct mlx5_wq_cyc *wq, u16 ctr)
+{
+	return ctr & wq->sz_m1;
+}
+
+static inline void *mlx5_wq_cyc_get_wqe(struct mlx5_wq_cyc *wq, u16 ix)
+{
+	return wq->buf + (ix << wq->log_stride);
+}
+
+static inline int mlx5_wq_cyc_cc_bigger(u16 cc1, u16 cc2)
+{
+	int equal   = (cc1 == cc2);
+	int smaller = 0x8000 & (cc1 - cc2);
+
+	return !equal && !smaller;
+}
+
+static inline u32 mlx5_cqwq_get_ci(struct mlx5_cqwq *wq)
+{
+	return wq->cc & wq->sz_m1;
+}
+
+static inline void *mlx5_cqwq_get_wqe(struct mlx5_cqwq *wq, u32 ix)
+{
+	unsigned int frag = (ix >> wq->log_frag_strides);
+
+	return wq->frag_buf.frags[frag].buf +
+		((wq->frag_sz_m1 & ix) << wq->log_stride);
+}
+
+static inline u32 mlx5_cqwq_get_wrap_cnt(struct mlx5_cqwq *wq)
+{
+	return wq->cc >> wq->log_sz;
+}
+
+static inline void mlx5_cqwq_pop(struct mlx5_cqwq *wq)
+{
+	wq->cc++;
+}
+
+static inline void mlx5_cqwq_update_db_record(struct mlx5_cqwq *wq)
+{
+	*wq->db = cpu_to_be32(wq->cc & 0xffffff);
+}
+
+static inline int mlx5_wq_ll_is_full(struct mlx5_wq_ll *wq)
+{
+	return wq->cur_sz == wq->sz_m1;
+}
+
+static inline int mlx5_wq_ll_is_empty(struct mlx5_wq_ll *wq)
+{
+	return !wq->cur_sz;
+}
+
+static inline void *mlx5_wq_ll_get_wqe(struct mlx5_wq_ll *wq, u16 ix)
+{
+	return wq->buf + (ix << wq->log_stride);
+}
+
+static inline void mlx5_wq_ll_push(struct mlx5_wq_ll *wq, u16 head_next)
+{
+	wq->head = head_next;
+	wq->wqe_ctr++;
+	wq->cur_sz++;
+}
+
+static inline void mlx5_wq_ll_pop(struct mlx5_wq_ll *wq, __be16 ix,
+				  __be16 *next_tail_next)
+{
+	*wq->tail_next = ix;
+	wq->tail_next = next_tail_next;
+	wq->cur_sz--;
+}
+
+static inline void mlx5_wq_ll_update_db_record(struct mlx5_wq_ll *wq)
+{
+	*wq->db = cpu_to_be32(wq->wqe_ctr);
+}
+
+#endif /* __MLX5_WQ_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/eq.c b/drivers/net/ethernet/mellanox/mlx5/eq.c
new file mode 100644
index 0000000..2b5cf44
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/eq.c
@@ -0,0 +1,782 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include "lib/driver.h"
+#include "lib/cmd.h"
+#include "mlx5.h"
+#ifdef CONFIG_MLX5_CORE_EN
+#include <sriov/eswitch.h>
+#endif
+
+enum {
+	MLX5_EQE_SIZE		= sizeof(struct mlx5_eqe),
+	MLX5_EQE_OWNER_INIT_VAL	= 0x1,
+};
+
+enum {
+	MLX5_EQ_STATE_ARMED		= 0x9,
+	MLX5_EQ_STATE_FIRED		= 0xa,
+	MLX5_EQ_STATE_ALWAYS_ARMED	= 0xb,
+};
+
+enum {
+	MLX5_NUM_SPARE_EQE	= 0x80,
+	MLX5_NUM_ASYNC_EQE	= 0x100,
+	MLX5_NUM_CMD_EQE	= 32,
+	MLX5_NUM_PF_DRAIN	= 64,
+};
+
+enum {
+	MLX5_EQ_DOORBEL_OFFSET	= 0x40,
+};
+
+#define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG)	    | \
+			       (1ull << MLX5_EVENT_TYPE_COMM_EST)	    | \
+			       (1ull << MLX5_EVENT_TYPE_SQ_DRAINED)	    | \
+			       (1ull << MLX5_EVENT_TYPE_CQ_ERROR)	    | \
+			       (1ull << MLX5_EVENT_TYPE_WQ_CATAS_ERROR)	    | \
+			       (1ull << MLX5_EVENT_TYPE_PATH_MIG_FAILED)    | \
+			       (1ull << MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
+			       (1ull << MLX5_EVENT_TYPE_WQ_ACCESS_ERROR)    | \
+			       (1ull << MLX5_EVENT_TYPE_PORT_CHANGE)	    | \
+			       (1ull << MLX5_EVENT_TYPE_SRQ_CATAS_ERROR)    | \
+			       (1ull << MLX5_EVENT_TYPE_SRQ_LAST_WQE)	    | \
+			       (1ull << MLX5_EVENT_TYPE_SRQ_RQ_LIMIT))
+
+struct map_eq_in {
+	u64	mask;
+	u32	reserved;
+	u32	unmap_eqn;
+};
+
+struct cre_des_eq {
+	u8	reserved[15];
+	u8	eqn;
+};
+
+static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn)
+{
+	u32 out[MLX5_ST_SZ_DW(destroy_eq_out)] = {0};
+	u32 in[MLX5_ST_SZ_DW(destroy_eq_in)]   = {0};
+
+	MLX5_SET(destroy_eq_in, in, opcode, MLX5_CMD_OP_DESTROY_EQ);
+	MLX5_SET(destroy_eq_in, in, eq_number, eqn);
+	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
+static struct mlx5_eqe *get_eqe(struct mlx5_eq *eq, u32 entry)
+{
+	return mlx5_buf_offset(&eq->buf, entry * MLX5_EQE_SIZE);
+}
+
+static struct mlx5_eqe *next_eqe_sw(struct mlx5_eq *eq)
+{
+	struct mlx5_eqe *eqe = get_eqe(eq, eq->cons_index & (eq->nent - 1));
+
+	return ((eqe->owner & 1) ^ !!(eq->cons_index & eq->nent)) ? NULL : eqe;
+}
+
+static const char *eqe_type_str(u8 type)
+{
+	switch (type) {
+	case MLX5_EVENT_TYPE_COMP:
+		return "MLX5_EVENT_TYPE_COMP";
+	case MLX5_EVENT_TYPE_PATH_MIG:
+		return "MLX5_EVENT_TYPE_PATH_MIG";
+	case MLX5_EVENT_TYPE_COMM_EST:
+		return "MLX5_EVENT_TYPE_COMM_EST";
+	case MLX5_EVENT_TYPE_SQ_DRAINED:
+		return "MLX5_EVENT_TYPE_SQ_DRAINED";
+	case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
+		return "MLX5_EVENT_TYPE_SRQ_LAST_WQE";
+	case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
+		return "MLX5_EVENT_TYPE_SRQ_RQ_LIMIT";
+	case MLX5_EVENT_TYPE_CQ_ERROR:
+		return "MLX5_EVENT_TYPE_CQ_ERROR";
+	case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
+		return "MLX5_EVENT_TYPE_WQ_CATAS_ERROR";
+	case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
+		return "MLX5_EVENT_TYPE_PATH_MIG_FAILED";
+	case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
+		return "MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR";
+	case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
+		return "MLX5_EVENT_TYPE_WQ_ACCESS_ERROR";
+	case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
+		return "MLX5_EVENT_TYPE_SRQ_CATAS_ERROR";
+	case MLX5_EVENT_TYPE_INTERNAL_ERROR:
+		return "MLX5_EVENT_TYPE_INTERNAL_ERROR";
+	case MLX5_EVENT_TYPE_PORT_CHANGE:
+		return "MLX5_EVENT_TYPE_PORT_CHANGE";
+	case MLX5_EVENT_TYPE_GPIO_EVENT:
+		return "MLX5_EVENT_TYPE_GPIO_EVENT";
+	case MLX5_EVENT_TYPE_PORT_MODULE_EVENT:
+		return "MLX5_EVENT_TYPE_PORT_MODULE_EVENT";
+	case MLX5_EVENT_TYPE_REMOTE_CONFIG:
+		return "MLX5_EVENT_TYPE_REMOTE_CONFIG";
+	case MLX5_EVENT_TYPE_DB_BF_CONGESTION:
+		return "MLX5_EVENT_TYPE_DB_BF_CONGESTION";
+	case MLX5_EVENT_TYPE_STALL_EVENT:
+		return "MLX5_EVENT_TYPE_STALL_EVENT";
+	case MLX5_EVENT_TYPE_CMD:
+		return "MLX5_EVENT_TYPE_CMD";
+	case MLX5_EVENT_TYPE_PAGE_REQUEST:
+		return "MLX5_EVENT_TYPE_PAGE_REQUEST";
+	case MLX5_EVENT_TYPE_PAGE_FAULT:
+		return "MLX5_EVENT_TYPE_PAGE_FAULT";
+	default:
+		return "Unrecognized event";
+	}
+}
+
+static enum mlx5_dev_event port_subtype_event(u8 subtype)
+{
+	switch (subtype) {
+	case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
+		return MLX5_DEV_EVENT_PORT_DOWN;
+	case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
+		return MLX5_DEV_EVENT_PORT_UP;
+	case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
+		return MLX5_DEV_EVENT_PORT_INITIALIZED;
+	case MLX5_PORT_CHANGE_SUBTYPE_LID:
+		return MLX5_DEV_EVENT_LID_CHANGE;
+	case MLX5_PORT_CHANGE_SUBTYPE_PKEY:
+		return MLX5_DEV_EVENT_PKEY_CHANGE;
+	case MLX5_PORT_CHANGE_SUBTYPE_GUID:
+		return MLX5_DEV_EVENT_GUID_CHANGE;
+	case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
+		return MLX5_DEV_EVENT_CLIENT_REREG;
+	}
+	return -1;
+}
+
+static void eq_update_ci(struct mlx5_eq *eq, int arm)
+{
+	__be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2);
+	u32 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24);
+	__raw_writel((__force u32) cpu_to_be32(val), addr);
+	/* We still want ordering, just not swabbing, so add a barrier */
+	mb();
+}
+
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+static void eqe_pf_action(struct work_struct *work)
+{
+	struct mlx5_pagefault *pfault = container_of(work,
+						     struct mlx5_pagefault,
+						     work);
+	struct mlx5_eq *eq = pfault->eq;
+
+	mlx5_core_page_fault(eq->dev, pfault);
+	mempool_free(pfault, eq->pf_ctx.pool);
+}
+
+static void eq_pf_process(struct mlx5_eq *eq)
+{
+	struct mlx5_core_dev *dev = eq->dev;
+	struct mlx5_eqe_page_fault *pf_eqe;
+	struct mlx5_pagefault *pfault;
+	struct mlx5_eqe *eqe;
+	int set_ci = 0;
+
+	while ((eqe = next_eqe_sw(eq))) {
+		pfault = mempool_alloc(eq->pf_ctx.pool, GFP_ATOMIC);
+		if (!pfault) {
+			schedule_work(&eq->pf_ctx.work);
+			break;
+		}
+
+		dma_rmb();
+		pf_eqe = &eqe->data.page_fault;
+		pfault->event_subtype = eqe->sub_type;
+		pfault->bytes_committed = be32_to_cpu(pf_eqe->bytes_committed);
+
+		mlx5_core_dbg(dev,
+			      "PAGE_FAULT: subtype: 0x%02x, bytes_committed: 0x%06x\n",
+			      eqe->sub_type, pfault->bytes_committed);
+
+		switch (eqe->sub_type) {
+		case MLX5_PFAULT_SUBTYPE_RDMA:
+			/* RDMA based event */
+			pfault->type =
+				be32_to_cpu(pf_eqe->rdma.pftype_token) >> 24;
+			pfault->token =
+				be32_to_cpu(pf_eqe->rdma.pftype_token) &
+				MLX5_24BIT_MASK;
+			pfault->rdma.r_key =
+				be32_to_cpu(pf_eqe->rdma.r_key);
+			pfault->rdma.packet_size =
+				be16_to_cpu(pf_eqe->rdma.packet_length);
+			pfault->rdma.rdma_op_len =
+				be32_to_cpu(pf_eqe->rdma.rdma_op_len);
+			pfault->rdma.rdma_va =
+				be64_to_cpu(pf_eqe->rdma.rdma_va);
+			mlx5_core_dbg(dev,
+				      "PAGE_FAULT: type:0x%x, token: 0x%06x, r_key: 0x%08x\n",
+				      pfault->type, pfault->token,
+				      pfault->rdma.r_key);
+			mlx5_core_dbg(dev,
+				      "PAGE_FAULT: rdma_op_len: 0x%08x, rdma_va: 0x%016llx\n",
+				      pfault->rdma.rdma_op_len,
+				      pfault->rdma.rdma_va);
+			break;
+
+		case MLX5_PFAULT_SUBTYPE_WQE:
+			/* WQE based event */
+			pfault->type =
+				be32_to_cpu(pf_eqe->wqe.pftype_wq) >> 24;
+			pfault->token =
+				be32_to_cpu(pf_eqe->wqe.token);
+			pfault->wqe.wq_num =
+				be32_to_cpu(pf_eqe->wqe.pftype_wq) &
+				MLX5_24BIT_MASK;
+			pfault->wqe.wqe_index =
+				be16_to_cpu(pf_eqe->wqe.wqe_index);
+			pfault->wqe.packet_size =
+				be16_to_cpu(pf_eqe->wqe.packet_length);
+			mlx5_core_dbg(dev,
+				      "PAGE_FAULT: type:0x%x, token: 0x%06x, wq_num: 0x%06x, wqe_index: 0x%04x\n",
+				      pfault->type, pfault->token,
+				      pfault->wqe.wq_num,
+				      pfault->wqe.wqe_index);
+			break;
+
+		default:
+			mlx5_core_warn(dev,
+				       "Unsupported page fault event sub-type: 0x%02hhx\n",
+				       eqe->sub_type);
+			/* Unsupported page faults should still be
+			 * resolved by the page fault handler
+			 */
+		}
+
+		pfault->eq = eq;
+		INIT_WORK(&pfault->work, eqe_pf_action);
+		queue_work(eq->pf_ctx.wq, &pfault->work);
+
+		++eq->cons_index;
+		++set_ci;
+
+		if (unlikely(set_ci >= MLX5_NUM_SPARE_EQE)) {
+			eq_update_ci(eq, 0);
+			set_ci = 0;
+		}
+	}
+
+	eq_update_ci(eq, 1);
+}
+
+static irqreturn_t mlx5_eq_pf_int(int irq, void *eq_ptr)
+{
+	struct mlx5_eq *eq = eq_ptr;
+	unsigned long flags;
+
+	if (spin_trylock_irqsave(&eq->pf_ctx.lock, flags)) {
+		eq_pf_process(eq);
+		spin_unlock_irqrestore(&eq->pf_ctx.lock, flags);
+	} else {
+		schedule_work(&eq->pf_ctx.work);
+	}
+
+	return IRQ_HANDLED;
+}
+
+/* mempool_refill() was proposed but unfortunately wasn't accepted
+ * http://lkml.iu.edu/hypermail/linux/kernel/1512.1/05073.html
+ * Chip workaround.
+ */
+static void mempool_refill(mempool_t *pool)
+{
+	while (pool->curr_nr < pool->min_nr)
+		mempool_free(mempool_alloc(pool, GFP_KERNEL), pool);
+}
+
+static void eq_pf_action(struct work_struct *work)
+{
+	struct mlx5_eq *eq = container_of(work, struct mlx5_eq, pf_ctx.work);
+
+	mempool_refill(eq->pf_ctx.pool);
+
+	spin_lock_irq(&eq->pf_ctx.lock);
+	eq_pf_process(eq);
+	spin_unlock_irq(&eq->pf_ctx.lock);
+}
+
+static int init_pf_ctx(struct mlx5_eq_pagefault *pf_ctx, const char *name)
+{
+	spin_lock_init(&pf_ctx->lock);
+	INIT_WORK(&pf_ctx->work, eq_pf_action);
+
+	pf_ctx->wq = alloc_ordered_workqueue(name,
+					     WQ_MEM_RECLAIM);
+	if (!pf_ctx->wq)
+		return -ENOMEM;
+
+	pf_ctx->pool = mempool_create_kmalloc_pool
+		(MLX5_NUM_PF_DRAIN, sizeof(struct mlx5_pagefault));
+	if (!pf_ctx->pool)
+		goto err_wq;
+
+	return 0;
+err_wq:
+	destroy_workqueue(pf_ctx->wq);
+	return -ENOMEM;
+}
+
+int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 token,
+				u32 wq_num, u8 type, int error)
+{
+	u32 out[MLX5_ST_SZ_DW(page_fault_resume_out)] = {0};
+	u32 in[MLX5_ST_SZ_DW(page_fault_resume_in)]   = {0};
+
+	MLX5_SET(page_fault_resume_in, in, opcode,
+		 MLX5_CMD_OP_PAGE_FAULT_RESUME);
+	MLX5_SET(page_fault_resume_in, in, error, !!error);
+	MLX5_SET(page_fault_resume_in, in, page_fault_type, type);
+	MLX5_SET(page_fault_resume_in, in, wq_number, wq_num);
+	MLX5_SET(page_fault_resume_in, in, token, token);
+
+	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+EXPORT_SYMBOL_GPL(mlx5_core_page_fault_resume);
+#endif
+
+static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
+{
+	struct mlx5_eq *eq = eq_ptr;
+	struct mlx5_core_dev *dev = eq->dev;
+	struct mlx5_eqe *eqe;
+	int set_ci = 0;
+	u32 cqn = -1;
+	u32 rsn;
+	u8 port;
+
+	while ((eqe = next_eqe_sw(eq))) {
+		/*
+		 * Make sure we read EQ entry contents after we've
+		 * checked the ownership bit.
+		 */
+		dma_rmb();
+
+		mlx5_core_dbg(eq->dev, "eqn %d, eqe type %s\n",
+			      eq->eqn, eqe_type_str(eqe->type));
+		switch (eqe->type) {
+		case MLX5_EVENT_TYPE_COMP:
+			cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff;
+			mlx5_cq_completion(dev, cqn);
+			break;
+
+		case MLX5_EVENT_TYPE_PATH_MIG:
+		case MLX5_EVENT_TYPE_COMM_EST:
+		case MLX5_EVENT_TYPE_SQ_DRAINED:
+		case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
+		case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
+		case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
+		case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
+		case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
+			rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
+			rsn |= (eqe->data.qp_srq.type << MLX5_USER_INDEX_LEN);
+			mlx5_core_dbg(dev, "event %s(%d) arrived on resource 0x%x\n",
+				      eqe_type_str(eqe->type), eqe->type, rsn);
+			mlx5_rsc_event(dev, rsn, eqe->type);
+			break;
+
+		case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
+		case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
+			rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
+			mlx5_core_dbg(dev, "SRQ event %s(%d): srqn 0x%x\n",
+				      eqe_type_str(eqe->type), eqe->type, rsn);
+			mlx5_srq_event(dev, rsn, eqe->type);
+			break;
+
+		case MLX5_EVENT_TYPE_CMD:
+			mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector));
+			break;
+
+		case MLX5_EVENT_TYPE_PORT_CHANGE:
+			port = (eqe->data.port.port >> 4) & 0xf;
+			switch (eqe->sub_type) {
+			case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
+			case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
+			case MLX5_PORT_CHANGE_SUBTYPE_LID:
+			case MLX5_PORT_CHANGE_SUBTYPE_PKEY:
+			case MLX5_PORT_CHANGE_SUBTYPE_GUID:
+			case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
+			case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
+				if (dev->event)
+					dev->event(dev, port_subtype_event(eqe->sub_type),
+						   (unsigned long)port);
+				break;
+			default:
+				mlx5_core_warn(dev, "Port event with unrecognized subtype: port %d, sub_type %d\n",
+					       port, eqe->sub_type);
+			}
+			break;
+		case MLX5_EVENT_TYPE_CQ_ERROR:
+			cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
+			mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrom 0x%x\n",
+				       cqn, eqe->data.cq_err.syndrome);
+			mlx5_cq_event(dev, cqn, eqe->type);
+			break;
+
+		case MLX5_EVENT_TYPE_PAGE_REQUEST:
+			{
+				u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id);
+				s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages);
+
+				mlx5_core_dbg(dev, "page request for func 0x%x, npages %d\n",
+					      func_id, npages);
+				mlx5_core_req_pages_handler(dev, func_id, npages);
+			}
+			break;
+
+#ifdef CONFIG_MLX5_CORE_EN
+		case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE:
+			mlx5_eswitch_vport_event(dev->priv.eswitch, eqe);
+			break;
+#endif
+
+		case MLX5_EVENT_TYPE_PORT_MODULE_EVENT:
+			mlx5_port_module_event(dev, eqe);
+			break;
+
+		default:
+			mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n",
+				       eqe->type, eq->eqn);
+			break;
+		}
+
+		++eq->cons_index;
+		++set_ci;
+
+		/* The HCA will think the queue has overflowed if we
+		 * don't tell it we've been processing events.  We
+		 * create our EQs with MLX5_NUM_SPARE_EQE extra
+		 * entries, so we must update our consumer index at
+		 * least that often.
+		 */
+		if (unlikely(set_ci >= MLX5_NUM_SPARE_EQE)) {
+			eq_update_ci(eq, 0);
+			set_ci = 0;
+		}
+	}
+
+	eq_update_ci(eq, 1);
+
+	if (cqn != -1)
+		tasklet_schedule(&eq->tasklet_ctx.task);
+
+	return IRQ_HANDLED;
+}
+
+static void init_eq_buf(struct mlx5_eq *eq)
+{
+	struct mlx5_eqe *eqe;
+	int i;
+
+	for (i = 0; i < eq->nent; i++) {
+		eqe = get_eqe(eq, i);
+		eqe->owner = MLX5_EQE_OWNER_INIT_VAL;
+	}
+}
+
+int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
+		       int nent, u64 mask, const char *name,
+		       enum mlx5_eq_type type)
+{
+	u32 out[MLX5_ST_SZ_DW(create_eq_out)] = {0};
+	struct mlx5_priv *priv = &dev->priv;
+	irq_handler_t handler;
+	__be64 *pas;
+	void *eqc;
+	int inlen;
+	u32 *in;
+	int err;
+
+	eq->type = type;
+	eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE);
+	eq->cons_index = 0;
+	err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, &eq->buf);
+	if (err)
+		return err;
+
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+	if (type == MLX5_EQ_TYPE_PF)
+		handler = mlx5_eq_pf_int;
+	else
+#endif
+		handler = mlx5_eq_int;
+
+	init_eq_buf(eq);
+
+	inlen = MLX5_ST_SZ_BYTES(create_eq_in) +
+		MLX5_FLD_SZ_BYTES(create_eq_in, pas[0]) * eq->buf.npages;
+
+	in = mlx5_vzalloc(inlen);
+	if (!in) {
+		err = -ENOMEM;
+		goto err_buf;
+	}
+
+	pas = (__be64 *)MLX5_ADDR_OF(create_eq_in, in, pas);
+	mlx5_fill_page_array(&eq->buf, pas);
+
+	MLX5_SET(create_eq_in, in, opcode, MLX5_CMD_OP_CREATE_EQ);
+	MLX5_SET64(create_eq_in, in, event_bitmask, mask);
+
+	eqc = MLX5_ADDR_OF(create_eq_in, in, eq_context_entry);
+	MLX5_SET(eqc, eqc, log_eq_size, ilog2(eq->nent));
+	MLX5_SET(eqc, eqc, uar_page, priv->uar->index);
+	MLX5_SET(eqc, eqc, intr, vecidx);
+	MLX5_SET(eqc, eqc, log_page_size,
+		 eq->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
+
+	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+	if (err)
+		goto err_in;
+
+	snprintf(priv->irq_info[vecidx].name, MLX5_MAX_IRQ_NAME, "%s@pci:%s",
+		 name, pci_name(dev->pdev));
+
+	eq->eqn = MLX5_GET(create_eq_out, out, eq_number);
+	eq->irqn = priv->msix_arr[vecidx].vector;
+	eq->dev = dev;
+	eq->doorbell = priv->uar->map + MLX5_EQ_DOORBEL_OFFSET;
+	err = request_irq(eq->irqn, handler, 0,
+			  priv->irq_info[vecidx].name, eq);
+	if (err)
+		goto err_eq;
+
+	err = mlx5_debug_eq_add(dev, eq);
+	if (err)
+		goto err_irq;
+
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+	if (type == MLX5_EQ_TYPE_PF) {
+		err = init_pf_ctx(&eq->pf_ctx, name);
+		if (err)
+			goto err_irq;
+	} else
+#endif
+	{
+		INIT_LIST_HEAD(&eq->tasklet_ctx.list);
+		INIT_LIST_HEAD(&eq->tasklet_ctx.process_list);
+		spin_lock_init(&eq->tasklet_ctx.lock);
+		tasklet_init(&eq->tasklet_ctx.task, mlx5_cq_tasklet_cb,
+			     (unsigned long)&eq->tasklet_ctx);
+	}
+
+	/* EQs are created in ARMED state
+	 */
+	eq_update_ci(eq, 1);
+
+	kvfree(in);
+	return 0;
+
+err_irq:
+	free_irq(priv->msix_arr[vecidx].vector, eq);
+
+err_eq:
+	mlx5_cmd_destroy_eq(dev, eq->eqn);
+
+err_in:
+	kvfree(in);
+
+err_buf:
+	mlx5_buf_free(dev, &eq->buf);
+	return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_create_map_eq);
+
+int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
+{
+	int err;
+
+	mlx5_debug_eq_remove(dev, eq);
+	free_irq(eq->irqn, eq);
+	err = mlx5_cmd_destroy_eq(dev, eq->eqn);
+	if (err)
+		mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n",
+			       eq->eqn);
+	synchronize_irq(eq->irqn);
+
+	if (eq->type == MLX5_EQ_TYPE_COMP) {
+		tasklet_disable(&eq->tasklet_ctx.task);
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+	} else if (eq->type == MLX5_EQ_TYPE_PF) {
+		cancel_work_sync(&eq->pf_ctx.work);
+		destroy_workqueue(eq->pf_ctx.wq);
+		mempool_destroy(eq->pf_ctx.pool);
+#endif
+	}
+	mlx5_buf_free(dev, &eq->buf);
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_destroy_unmap_eq);
+
+u32 mlx5_get_msix_vec(struct mlx5_core_dev *dev, int vecidx)
+{
+	return dev->priv.msix_arr[MLX5_EQ_VEC_ASYNC].vector;
+}
+
+int mlx5_eq_init(struct mlx5_core_dev *dev)
+{
+	int err;
+
+	spin_lock_init(&dev->priv.eq_table.lock);
+
+	err = mlx5_eq_debugfs_init(dev);
+
+	return err;
+}
+
+
+void mlx5_eq_cleanup(struct mlx5_core_dev *dev)
+{
+	mlx5_eq_debugfs_cleanup(dev);
+}
+
+int mlx5_start_eqs(struct mlx5_core_dev *dev)
+{
+	struct mlx5_eq_table *table = &dev->priv.eq_table;
+	u64 async_event_mask = MLX5_ASYNC_EVENT_MASK;
+	int err;
+
+
+	if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH &&
+	    MLX5_CAP_GEN(dev, vport_group_manager) &&
+	    mlx5_core_is_pf(dev))
+		async_event_mask |= (1ull << MLX5_EVENT_TYPE_NIC_VPORT_CHANGE);
+
+	if (MLX5_CAP_GEN(dev, port_module_event))
+		async_event_mask |= (1ull << MLX5_EVENT_TYPE_PORT_MODULE_EVENT);
+	else
+		mlx5_core_dbg(dev, "port_module_event is not set\n");
+
+	err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD,
+				 MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD,
+				 "mlx5_cmd_eq", MLX5_EQ_TYPE_ASYNC);
+	if (err) {
+		mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err);
+		return err;
+	}
+
+	mlx5_cmd_use_events(dev);
+
+	err = mlx5_create_map_eq(dev, &table->async_eq, MLX5_EQ_VEC_ASYNC,
+				 MLX5_NUM_ASYNC_EQE, async_event_mask,
+				 "mlx5_async_eq", MLX5_EQ_TYPE_ASYNC);
+	if (err) {
+		mlx5_core_warn(dev, "failed to create async EQ %d\n", err);
+		goto err1;
+	}
+
+	err = mlx5_create_map_eq(dev, &table->pages_eq,
+				 MLX5_EQ_VEC_PAGES,
+				 /* TODO: sriov max_vf + */ 1,
+				 1 << MLX5_EVENT_TYPE_PAGE_REQUEST, "mlx5_pages_eq",
+				 MLX5_EQ_TYPE_ASYNC);
+	if (err) {
+		mlx5_core_warn(dev, "failed to create pages EQ %d\n", err);
+		goto err2;
+	}
+
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+	if (MLX5_CAP_GEN(dev, pg)) {
+		err = mlx5_create_map_eq(dev, &table->pfault_eq,
+					 MLX5_EQ_VEC_PFAULT,
+					 MLX5_NUM_ASYNC_EQE,
+					 1 << MLX5_EVENT_TYPE_PAGE_FAULT,
+					 "mlx5_page_fault_eq",
+					 MLX5_EQ_TYPE_PF);
+		if (err) {
+			mlx5_core_warn(dev, "failed to create page fault EQ %d\n",
+				       err);
+			goto err3;
+		}
+	}
+
+	return err;
+err3:
+	mlx5_destroy_unmap_eq(dev, &table->pages_eq);
+#else
+	return err;
+#endif
+
+err2:
+	mlx5_destroy_unmap_eq(dev, &table->async_eq);
+
+err1:
+	mlx5_cmd_use_polling(dev);
+	mlx5_destroy_unmap_eq(dev, &table->cmd_eq);
+	return err;
+}
+
+int mlx5_stop_eqs(struct mlx5_core_dev *dev)
+{
+	struct mlx5_eq_table *table = &dev->priv.eq_table;
+	int err;
+
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+	if (MLX5_CAP_GEN(dev, pg)) {
+		err = mlx5_destroy_unmap_eq(dev, &table->pfault_eq);
+		if (err)
+			return err;
+	}
+#endif
+
+	err = mlx5_destroy_unmap_eq(dev, &table->pages_eq);
+	if (err)
+		return err;
+
+	mlx5_destroy_unmap_eq(dev, &table->async_eq);
+	mlx5_cmd_use_polling(dev);
+
+	err = mlx5_destroy_unmap_eq(dev, &table->cmd_eq);
+	if (err)
+		mlx5_cmd_use_events(dev);
+
+	return err;
+}
+
+int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
+		       u32 *out, int outlen)
+{
+	u32 in[MLX5_ST_SZ_DW(query_eq_in)] = {0};
+
+	MLX5_SET(query_eq_in, in, opcode, MLX5_CMD_OP_QUERY_EQ);
+	MLX5_SET(query_eq_in, in, eq_number, eq->eqn);
+	return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
+}
+EXPORT_SYMBOL_GPL(mlx5_core_eq_query);
diff --git a/drivers/net/ethernet/mellanox/mlx5/fs/cmd.c b/drivers/net/ethernet/mellanox/mlx5/fs/cmd.c
new file mode 100644
index 0000000..ce1ff6f
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/fs/cmd.c
@@ -0,0 +1,513 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "lib/driver.h"
+#include "lib/device.h"
+#include "lib/mlx5_ifc.h"
+#include "fs/core.h"
+#include "fs/cmd.h"
+#include "mlx5.h"
+#include "sriov/eswitch.h"
+
+int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
+			    struct mlx5_flow_table *ft)
+{
+	u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)]   = {0};
+	u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {0};
+
+	MLX5_SET(set_flow_table_root_in, in, opcode,
+		 MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
+	MLX5_SET(set_flow_table_root_in, in, table_type, ft->type);
+	MLX5_SET(set_flow_table_root_in, in, table_id, ft->id);
+	if (ft->vport) {
+		MLX5_SET(set_flow_table_root_in, in, vport_number, ft->vport);
+		MLX5_SET(set_flow_table_root_in, in, other_vport, 1);
+	}
+
+	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
+			       u16 vport,
+			       enum fs_flow_table_op_mod op_mod,
+			       enum fs_flow_table_type type, unsigned int level,
+			       unsigned int log_size, struct mlx5_flow_table
+			       *next_ft, unsigned int *table_id, u32 flags)
+{
+	int en_encap_decap = !!(flags & MLX5_FLOW_TABLE_TUNNEL_EN);
+	u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0};
+	u32 in[MLX5_ST_SZ_DW(create_flow_table_in)]   = {0};
+	int err;
+
+	MLX5_SET(create_flow_table_in, in, opcode,
+		 MLX5_CMD_OP_CREATE_FLOW_TABLE);
+
+	MLX5_SET(create_flow_table_in, in, table_type, type);
+	MLX5_SET(create_flow_table_in, in, level, level);
+	MLX5_SET(create_flow_table_in, in, log_size, log_size);
+	if (vport) {
+		MLX5_SET(create_flow_table_in, in, vport_number, vport);
+		MLX5_SET(create_flow_table_in, in, other_vport, 1);
+	}
+
+	MLX5_SET(create_flow_table_in, in, decap_en, en_encap_decap);
+	MLX5_SET(create_flow_table_in, in, encap_en, en_encap_decap);
+
+	switch (op_mod) {
+	case FS_FT_OP_MOD_NORMAL:
+		if (next_ft) {
+			MLX5_SET(create_flow_table_in, in, table_miss_mode, 1);
+			MLX5_SET(create_flow_table_in, in, table_miss_id, next_ft->id);
+		}
+		break;
+
+	case FS_FT_OP_MOD_LAG_DEMUX:
+		MLX5_SET(create_flow_table_in, in, op_mod, 0x1);
+		if (next_ft)
+			MLX5_SET(create_flow_table_in, in, lag_master_next_table_id,
+				 next_ft->id);
+		break;
+	}
+
+	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+	if (!err)
+		*table_id = MLX5_GET(create_flow_table_out, out,
+				     table_id);
+	return err;
+}
+
+int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev,
+				struct mlx5_flow_table *ft)
+{
+	u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)]   = {0};
+	u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)] = {0};
+
+	MLX5_SET(destroy_flow_table_in, in, opcode,
+		 MLX5_CMD_OP_DESTROY_FLOW_TABLE);
+	MLX5_SET(destroy_flow_table_in, in, table_type, ft->type);
+	MLX5_SET(destroy_flow_table_in, in, table_id, ft->id);
+	if (ft->vport) {
+		MLX5_SET(destroy_flow_table_in, in, vport_number, ft->vport);
+		MLX5_SET(destroy_flow_table_in, in, other_vport, 1);
+	}
+
+	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_cmd_modify_flow_table(struct mlx5_core_dev *dev,
+			       struct mlx5_flow_table *ft,
+			       struct mlx5_flow_table *next_ft)
+{
+	u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)]   = {0};
+	u32 out[MLX5_ST_SZ_DW(modify_flow_table_out)] = {0};
+
+	MLX5_SET(modify_flow_table_in, in, opcode,
+		 MLX5_CMD_OP_MODIFY_FLOW_TABLE);
+	MLX5_SET(modify_flow_table_in, in, table_type, ft->type);
+	MLX5_SET(modify_flow_table_in, in, table_id, ft->id);
+
+	if (ft->op_mod == FS_FT_OP_MOD_LAG_DEMUX) {
+		MLX5_SET(modify_flow_table_in, in, modify_field_select,
+			 MLX5_MODIFY_FLOW_TABLE_LAG_NEXT_TABLE_ID);
+		if (next_ft) {
+			MLX5_SET(modify_flow_table_in, in,
+				 lag_master_next_table_id, next_ft->id);
+		} else {
+			MLX5_SET(modify_flow_table_in, in,
+				 lag_master_next_table_id, 0);
+		}
+	} else {
+		if (ft->vport) {
+			MLX5_SET(modify_flow_table_in, in, vport_number,
+				 ft->vport);
+			MLX5_SET(modify_flow_table_in, in, other_vport, 1);
+		}
+		MLX5_SET(modify_flow_table_in, in, modify_field_select,
+			 MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID);
+		if (next_ft) {
+			MLX5_SET(modify_flow_table_in, in, table_miss_mode, 1);
+			MLX5_SET(modify_flow_table_in, in, table_miss_id,
+				 next_ft->id);
+		} else {
+			MLX5_SET(modify_flow_table_in, in, table_miss_mode, 0);
+		}
+	}
+
+	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev,
+			       struct mlx5_flow_table *ft,
+			       u32 *in,
+			       unsigned int *group_id)
+{
+	u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {0};
+	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+	int err;
+
+	MLX5_SET(create_flow_group_in, in, opcode,
+		 MLX5_CMD_OP_CREATE_FLOW_GROUP);
+	MLX5_SET(create_flow_group_in, in, table_type, ft->type);
+	MLX5_SET(create_flow_group_in, in, table_id, ft->id);
+	if (ft->vport) {
+		MLX5_SET(create_flow_group_in, in, vport_number, ft->vport);
+		MLX5_SET(create_flow_group_in, in, other_vport, 1);
+	}
+
+	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+	if (!err)
+		*group_id = MLX5_GET(create_flow_group_out, out,
+				     group_id);
+	return err;
+}
+
+int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev,
+				struct mlx5_flow_table *ft,
+				unsigned int group_id)
+{
+	u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)] = {0};
+	u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)]   = {0};
+
+	MLX5_SET(destroy_flow_group_in, in, opcode,
+		 MLX5_CMD_OP_DESTROY_FLOW_GROUP);
+	MLX5_SET(destroy_flow_group_in, in, table_type, ft->type);
+	MLX5_SET(destroy_flow_group_in, in, table_id, ft->id);
+	MLX5_SET(destroy_flow_group_in, in, group_id, group_id);
+	if (ft->vport) {
+		MLX5_SET(destroy_flow_group_in, in, vport_number, ft->vport);
+		MLX5_SET(destroy_flow_group_in, in, other_vport, 1);
+	}
+
+	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
+static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
+			    int opmod, int modify_mask,
+			    struct mlx5_flow_table *ft,
+			    unsigned group_id,
+			    struct fs_fte *fte)
+{
+	unsigned int inlen = MLX5_ST_SZ_BYTES(set_fte_in) +
+		fte->dests_size * MLX5_ST_SZ_BYTES(dest_format_struct);
+	u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {0};
+	struct mlx5_flow_rule *dst;
+	void *in_flow_context;
+	void *in_match_value;
+	void *in_dests;
+	u32 *in;
+	int err;
+
+	in = mlx5_vzalloc(inlen);
+	if (!in) {
+		mlx5_core_warn(dev, "failed to allocate inbox\n");
+		return -ENOMEM;
+	}
+
+	MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
+	MLX5_SET(set_fte_in, in, op_mod, opmod);
+	MLX5_SET(set_fte_in, in, modify_enable_mask, modify_mask);
+	MLX5_SET(set_fte_in, in, table_type, ft->type);
+	MLX5_SET(set_fte_in, in, table_id,   ft->id);
+	MLX5_SET(set_fte_in, in, flow_index, fte->index);
+	if (ft->vport) {
+		MLX5_SET(set_fte_in, in, vport_number, ft->vport);
+		MLX5_SET(set_fte_in, in, other_vport, 1);
+	}
+
+	in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
+	MLX5_SET(flow_context, in_flow_context, group_id, group_id);
+	MLX5_SET(flow_context, in_flow_context, flow_tag, fte->flow_tag);
+	MLX5_SET(flow_context, in_flow_context, action, fte->action);
+	MLX5_SET(flow_context, in_flow_context, encap_id, fte->encap_id);
+	in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
+				      match_value);
+	memcpy(in_match_value, &fte->val, MLX5_ST_SZ_BYTES(fte_match_param));
+
+	in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
+	if (fte->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
+		int list_size = 0;
+
+		list_for_each_entry(dst, &fte->node.children, node.list) {
+			unsigned int id;
+
+			if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
+				continue;
+
+			MLX5_SET(dest_format_struct, in_dests, destination_type,
+				 dst->dest_attr.type);
+			if (dst->dest_attr.type ==
+			    MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) {
+				id = dst->dest_attr.ft->id;
+			} else {
+				id = dst->dest_attr.tir_num;
+			}
+			MLX5_SET(dest_format_struct, in_dests, destination_id, id);
+			in_dests += MLX5_ST_SZ_BYTES(dest_format_struct);
+			list_size++;
+		}
+
+		MLX5_SET(flow_context, in_flow_context, destination_list_size,
+			 list_size);
+	}
+
+	if (fte->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+		int list_size = 0;
+
+		list_for_each_entry(dst, &fte->node.children, node.list) {
+			if (dst->dest_attr.type !=
+			    MLX5_FLOW_DESTINATION_TYPE_COUNTER)
+				continue;
+
+			MLX5_SET(flow_counter_list, in_dests, flow_counter_id,
+				 dst->dest_attr.counter->id);
+			in_dests += MLX5_ST_SZ_BYTES(dest_format_struct);
+			list_size++;
+		}
+
+		MLX5_SET(flow_context, in_flow_context, flow_counter_list_size,
+			 list_size);
+	}
+
+	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+	kvfree(in);
+	return err;
+}
+
+int mlx5_cmd_create_fte(struct mlx5_core_dev *dev,
+			struct mlx5_flow_table *ft,
+			unsigned group_id,
+			struct fs_fte *fte)
+{
+	return mlx5_cmd_set_fte(dev, 0, 0, ft, group_id, fte);
+}
+
+int mlx5_cmd_update_fte(struct mlx5_core_dev *dev,
+			struct mlx5_flow_table *ft,
+			unsigned group_id,
+			int modify_mask,
+			struct fs_fte *fte)
+{
+	int opmod;
+	int atomic_mod_cap = MLX5_CAP_FLOWTABLE(dev,
+						flow_table_properties_nic_receive.
+						flow_modify_en);
+	if (!atomic_mod_cap)
+		return -ENOTSUPP;
+	opmod = 1;
+
+	return	mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, group_id, fte);
+}
+
+int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev,
+			struct mlx5_flow_table *ft,
+			unsigned int index)
+{
+	u32 out[MLX5_ST_SZ_DW(delete_fte_out)] = {0};
+	u32 in[MLX5_ST_SZ_DW(delete_fte_in)]   = {0};
+
+	MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
+	MLX5_SET(delete_fte_in, in, table_type, ft->type);
+	MLX5_SET(delete_fte_in, in, table_id, ft->id);
+	MLX5_SET(delete_fte_in, in, flow_index, index);
+	if (ft->vport) {
+		MLX5_SET(delete_fte_in, in, vport_number, ft->vport);
+		MLX5_SET(delete_fte_in, in, other_vport, 1);
+	}
+
+	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u16 *id)
+{
+	u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)]   = {0};
+	u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {0};
+	int err;
+
+	MLX5_SET(alloc_flow_counter_in, in, opcode,
+		 MLX5_CMD_OP_ALLOC_FLOW_COUNTER);
+
+	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+	if (!err)
+		*id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id);
+	return err;
+}
+
+int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u16 id)
+{
+	u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)]   = {0};
+	u32 out[MLX5_ST_SZ_DW(dealloc_flow_counter_out)] = {0};
+
+	MLX5_SET(dealloc_flow_counter_in, in, opcode,
+		 MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
+	MLX5_SET(dealloc_flow_counter_in, in, flow_counter_id, id);
+	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u16 id,
+		      u64 *packets, u64 *bytes)
+{
+	u32 out[MLX5_ST_SZ_BYTES(query_flow_counter_out) +
+		MLX5_ST_SZ_BYTES(traffic_counter)]   = {0};
+	u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0};
+	void *stats;
+	int err = 0;
+
+	MLX5_SET(query_flow_counter_in, in, opcode,
+		 MLX5_CMD_OP_QUERY_FLOW_COUNTER);
+	MLX5_SET(query_flow_counter_in, in, op_mod, 0);
+	MLX5_SET(query_flow_counter_in, in, flow_counter_id, id);
+	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+	if (err)
+		return err;
+
+	stats = MLX5_ADDR_OF(query_flow_counter_out, out, flow_statistics);
+	*packets = MLX5_GET64(traffic_counter, stats, packets);
+	*bytes = MLX5_GET64(traffic_counter, stats, octets);
+	return 0;
+}
+
+struct mlx5_cmd_fc_bulk {
+	u16 id;
+	int num;
+	int outlen;
+	u32 out[0];
+};
+
+struct mlx5_cmd_fc_bulk *
+mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u16 id, int num)
+{
+	struct mlx5_cmd_fc_bulk *b;
+	int outlen =
+		MLX5_ST_SZ_BYTES(query_flow_counter_out) +
+		MLX5_ST_SZ_BYTES(traffic_counter) * num;
+
+	b = kzalloc(sizeof(*b) + outlen, GFP_KERNEL);
+	if (!b)
+		return NULL;
+
+	b->id = id;
+	b->num = num;
+	b->outlen = outlen;
+
+	return b;
+}
+
+void mlx5_cmd_fc_bulk_free(struct mlx5_cmd_fc_bulk *b)
+{
+	kfree(b);
+}
+
+int
+mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, struct mlx5_cmd_fc_bulk *b)
+{
+	u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0};
+
+	MLX5_SET(query_flow_counter_in, in, opcode,
+		 MLX5_CMD_OP_QUERY_FLOW_COUNTER);
+	MLX5_SET(query_flow_counter_in, in, op_mod, 0);
+	MLX5_SET(query_flow_counter_in, in, flow_counter_id, b->id);
+	MLX5_SET(query_flow_counter_in, in, num_of_counters, b->num);
+	return mlx5_cmd_exec(dev, in, sizeof(in), b->out, b->outlen);
+}
+
+void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev,
+			  struct mlx5_cmd_fc_bulk *b, u16 id,
+			  u64 *packets, u64 *bytes)
+{
+	int index = id - b->id;
+	void *stats;
+
+	if (index < 0 || index >= b->num) {
+		mlx5_core_warn(dev, "Flow counter id (0x%x) out of range (0x%x..0x%x). Counter ignored.\n",
+			       id, b->id, b->id + b->num - 1);
+		return;
+	}
+
+	stats = MLX5_ADDR_OF(query_flow_counter_out, b->out,
+			     flow_statistics[index]);
+	*packets = MLX5_GET64(traffic_counter, stats, packets);
+	*bytes = MLX5_GET64(traffic_counter, stats, octets);
+}
+
+int mlx5_encap_alloc(struct mlx5_core_dev *dev,
+		     int header_type,
+		     size_t size,
+		     void *encap_header,
+		     u32 *encap_id)
+{
+	int max_encap_size = MLX5_CAP_ESW(dev, max_encap_header_size);
+	u32 out[MLX5_ST_SZ_DW(alloc_encap_header_out)];
+	void *encap_header_in;
+	void *header;
+	int inlen;
+	int err;
+	u32 *in;
+
+	if (size > MLX5_CAP_ESW(dev, max_encap_header_size))
+		return -EINVAL;
+
+	in = kzalloc(MLX5_ST_SZ_BYTES(alloc_encap_header_in) + max_encap_size,
+		     GFP_KERNEL);
+	if (!in)
+		return -ENOMEM;
+
+	encap_header_in = MLX5_ADDR_OF(alloc_encap_header_in, in, encap_header);
+	header = MLX5_ADDR_OF(encap_header_in, encap_header_in, encap_header);
+	inlen = header - (void *)in  + size;
+
+	memset(in, 0, inlen);
+	MLX5_SET(alloc_encap_header_in, in, opcode,
+		 MLX5_CMD_OP_ALLOC_ENCAP_HEADER);
+	MLX5_SET(encap_header_in, encap_header_in, encap_header_size, size);
+	MLX5_SET(encap_header_in, encap_header_in, header_type, header_type);
+	memcpy(header, encap_header, size);
+
+	memset(out, 0, sizeof(out));
+	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+
+	*encap_id = MLX5_GET(alloc_encap_header_out, out, encap_id);
+	kfree(in);
+	return err;
+}
+
+void mlx5_encap_dealloc(struct mlx5_core_dev *dev, u32 encap_id)
+{
+	u32 in[MLX5_ST_SZ_DW(dealloc_encap_header_in)];
+	u32 out[MLX5_ST_SZ_DW(dealloc_encap_header_out)];
+
+	memset(in, 0, sizeof(in));
+	MLX5_SET(dealloc_encap_header_in, in, opcode,
+		 MLX5_CMD_OP_DEALLOC_ENCAP_HEADER);
+	MLX5_SET(dealloc_encap_header_in, in, encap_id, encap_id);
+
+	mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/fs/cmd.h b/drivers/net/ethernet/mellanox/mlx5/fs/cmd.h
new file mode 100644
index 0000000..8fad806
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/fs/cmd.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _MLX5_FS_CMD_
+#define _MLX5_FS_CMD_
+
+int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
+			       u16 vport,
+			       enum fs_flow_table_op_mod op_mod,
+			       enum fs_flow_table_type type, unsigned int level,
+			       unsigned int log_size, struct mlx5_flow_table
+			       *next_ft, unsigned int *table_id, u32 flags);
+
+int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev,
+				struct mlx5_flow_table *ft);
+
+int mlx5_cmd_modify_flow_table(struct mlx5_core_dev *dev,
+			       struct mlx5_flow_table *ft,
+			       struct mlx5_flow_table *next_ft);
+
+int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev,
+			       struct mlx5_flow_table *ft,
+			       u32 *in, unsigned int *group_id);
+
+int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev,
+				struct mlx5_flow_table *ft,
+				unsigned int group_id);
+
+int mlx5_cmd_create_fte(struct mlx5_core_dev *dev,
+			struct mlx5_flow_table *ft,
+			unsigned group_id,
+			struct fs_fte *fte);
+
+int mlx5_cmd_update_fte(struct mlx5_core_dev *dev,
+			struct mlx5_flow_table *ft,
+			unsigned group_id,
+			int modify_mask,
+			struct fs_fte *fte);
+
+int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev,
+			struct mlx5_flow_table *ft,
+			unsigned int index);
+
+int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
+			    struct mlx5_flow_table *ft);
+
+int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u16 *id);
+int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u16 id);
+int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u16 id,
+		      u64 *packets, u64 *bytes);
+
+struct mlx5_cmd_fc_bulk;
+
+struct mlx5_cmd_fc_bulk *
+mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u16 id, int num);
+void mlx5_cmd_fc_bulk_free(struct mlx5_cmd_fc_bulk *b);
+int
+mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, struct mlx5_cmd_fc_bulk *b);
+void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev,
+			  struct mlx5_cmd_fc_bulk *b, u16 id,
+			  u64 *packets, u64 *bytes);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/fs/core.c b/drivers/net/ethernet/mellanox/mlx5/fs/core.c
new file mode 100644
index 0000000..6a8283f
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/fs/core.c
@@ -0,0 +1,2038 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/mutex.h>
+#include "lib/driver.h"
+#include "mlx5.h"
+#include "fs/core.h"
+#include "fs/cmd.h"
+
+#define INIT_TREE_NODE_ARRAY_SIZE(...)	(sizeof((struct init_tree_node[]){__VA_ARGS__}) /\
+					 sizeof(struct init_tree_node))
+
+#define ADD_PRIO(num_prios_val, min_level_val, num_levels_val, caps_val,\
+		 ...) {.type = FS_TYPE_PRIO,\
+	.min_ft_level = min_level_val,\
+	.num_levels = num_levels_val,\
+	.num_leaf_prios = num_prios_val,\
+	.caps = caps_val,\
+	.children = (struct init_tree_node[]) {__VA_ARGS__},\
+	.ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
+}
+
+#define ADD_MULTIPLE_PRIO(num_prios_val, num_levels_val, ...)\
+	ADD_PRIO(num_prios_val, 0, num_levels_val, {},\
+		 __VA_ARGS__)\
+
+#define ADD_NS(...) {.type = FS_TYPE_NAMESPACE,\
+	.children = (struct init_tree_node[]) {__VA_ARGS__},\
+	.ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
+}
+
+#define INIT_CAPS_ARRAY_SIZE(...) (sizeof((long[]){__VA_ARGS__}) /\
+				   sizeof(long))
+
+#define FS_CAP(cap) (__mlx5_bit_off(flow_table_nic_cap, cap))
+
+#define FS_REQUIRED_CAPS(...) {.arr_sz = INIT_CAPS_ARRAY_SIZE(__VA_ARGS__), \
+			       .caps = (long[]) {__VA_ARGS__} }
+
+#define FS_CHAINING_CAPS  FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en), \
+					   FS_CAP(flow_table_properties_nic_receive.modify_root), \
+					   FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode), \
+					   FS_CAP(flow_table_properties_nic_receive.flow_table_modify))
+
+#define LEFTOVERS_NUM_LEVELS 1
+#define LEFTOVERS_NUM_PRIOS 1
+
+#define BY_PASS_PRIO_NUM_LEVELS 1
+#define BY_PASS_MIN_LEVEL (ETHTOOL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\
+			   LEFTOVERS_NUM_PRIOS)
+
+#define ETHTOOL_PRIO_NUM_LEVELS 1
+#define ETHTOOL_NUM_PRIOS 11
+#define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
+/* Vlan, mac, ttc, aRFS */
+#define KERNEL_NIC_PRIO_NUM_LEVELS 4
+#define KERNEL_NIC_NUM_PRIOS 1
+/* One more level for tc */
+#define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
+
+#define ANCHOR_NUM_LEVELS 1
+#define ANCHOR_NUM_PRIOS 1
+#define ANCHOR_MIN_LEVEL (BY_PASS_MIN_LEVEL + 1)
+
+#define OFFLOADS_MAX_FT 1
+#define OFFLOADS_NUM_PRIOS 1
+#define OFFLOADS_MIN_LEVEL (ANCHOR_MIN_LEVEL + 1)
+
+#define LAG_PRIO_NUM_LEVELS 1
+#define LAG_NUM_PRIOS 1
+#define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + 1)
+
+struct node_caps {
+	size_t	arr_sz;
+	long	*caps;
+};
+static struct init_tree_node {
+	enum fs_node_type	type;
+	struct init_tree_node *children;
+	int ar_size;
+	struct node_caps caps;
+	int min_ft_level;
+	int num_leaf_prios;
+	int prio;
+	int num_levels;
+} root_fs = {
+	.type = FS_TYPE_NAMESPACE,
+	.ar_size = 7,
+	.children = (struct init_tree_node[]) {
+		ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0,
+			 FS_CHAINING_CAPS,
+			 ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
+						  BY_PASS_PRIO_NUM_LEVELS))),
+		ADD_PRIO(0, LAG_MIN_LEVEL, 0,
+			 FS_CHAINING_CAPS,
+			 ADD_NS(ADD_MULTIPLE_PRIO(LAG_NUM_PRIOS,
+						  LAG_PRIO_NUM_LEVELS))),
+		ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, {},
+			 ADD_NS(ADD_MULTIPLE_PRIO(OFFLOADS_NUM_PRIOS, OFFLOADS_MAX_FT))),
+		ADD_PRIO(0, ETHTOOL_MIN_LEVEL, 0,
+			 FS_CHAINING_CAPS,
+			 ADD_NS(ADD_MULTIPLE_PRIO(ETHTOOL_NUM_PRIOS,
+						  ETHTOOL_PRIO_NUM_LEVELS))),
+		ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {},
+			 ADD_NS(ADD_MULTIPLE_PRIO(1, 1),
+				ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS,
+						  KERNEL_NIC_PRIO_NUM_LEVELS))),
+		ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0,
+			 FS_CHAINING_CAPS,
+			 ADD_NS(ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS, LEFTOVERS_NUM_LEVELS))),
+		ADD_PRIO(0, ANCHOR_MIN_LEVEL, 0, {},
+			 ADD_NS(ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS, ANCHOR_NUM_LEVELS))),
+	}
+};
+
+enum fs_i_mutex_lock_class {
+	FS_MUTEX_GRANDPARENT,
+	FS_MUTEX_PARENT,
+	FS_MUTEX_CHILD
+};
+
+static void del_rule(struct fs_node *node);
+static void del_flow_table(struct fs_node *node);
+static void del_flow_group(struct fs_node *node);
+static void del_fte(struct fs_node *node);
+static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
+				struct mlx5_flow_destination *d2);
+static struct mlx5_flow_rule *
+find_flow_rule(struct fs_fte *fte,
+	       struct mlx5_flow_destination *dest);
+
+static void tree_init_node(struct fs_node *node,
+			   unsigned int refcount,
+			   void (*remove_func)(struct fs_node *))
+{
+	atomic_set(&node->refcount, refcount);
+	INIT_LIST_HEAD(&node->list);
+	INIT_LIST_HEAD(&node->children);
+	mutex_init(&node->lock);
+	node->remove_func = remove_func;
+}
+
+static void tree_add_node(struct fs_node *node, struct fs_node *parent)
+{
+	if (parent)
+		atomic_inc(&parent->refcount);
+	node->parent = parent;
+
+	/* Parent is the root */
+	if (!parent)
+		node->root = node;
+	else
+		node->root = parent->root;
+}
+
+static void tree_get_node(struct fs_node *node)
+{
+	atomic_inc(&node->refcount);
+}
+
+static void nested_lock_ref_node(struct fs_node *node,
+				 enum fs_i_mutex_lock_class class)
+{
+	if (node) {
+		mutex_lock_nested(&node->lock, class);
+		atomic_inc(&node->refcount);
+	}
+}
+
+static void lock_ref_node(struct fs_node *node)
+{
+	if (node) {
+		mutex_lock(&node->lock);
+		atomic_inc(&node->refcount);
+	}
+}
+
+static void unlock_ref_node(struct fs_node *node)
+{
+	if (node) {
+		atomic_dec(&node->refcount);
+		mutex_unlock(&node->lock);
+	}
+}
+
+static void tree_put_node(struct fs_node *node)
+{
+	struct fs_node *parent_node = node->parent;
+
+	lock_ref_node(parent_node);
+	if (atomic_dec_and_test(&node->refcount)) {
+		if (parent_node)
+			list_del_init(&node->list);
+		if (node->remove_func)
+			node->remove_func(node);
+		kfree(node);
+		node = NULL;
+	}
+	unlock_ref_node(parent_node);
+	if (!node && parent_node)
+		tree_put_node(parent_node);
+}
+
+static int tree_remove_node(struct fs_node *node)
+{
+	if (atomic_read(&node->refcount) > 1) {
+		atomic_dec(&node->refcount);
+		return -EEXIST;
+	}
+	tree_put_node(node);
+	return 0;
+}
+
+static struct fs_prio *find_prio(struct mlx5_flow_namespace *ns,
+				 unsigned int prio)
+{
+	struct fs_prio *iter_prio;
+
+	fs_for_each_prio(iter_prio, ns) {
+		if (iter_prio->prio == prio)
+			return iter_prio;
+	}
+
+	return NULL;
+}
+
+static bool masked_memcmp(void *mask, void *val1, void *val2, size_t size)
+{
+	unsigned int i;
+
+	for (i = 0; i < size; i++, mask++, val1++, val2++)
+		if ((*((u8 *)val1) & (*(u8 *)mask)) !=
+		    ((*(u8 *)val2) & (*(u8 *)mask)))
+			return false;
+
+	return true;
+}
+
+static bool compare_match_value(struct mlx5_flow_group_mask *mask,
+				void *fte_param1, void *fte_param2)
+{
+	if (mask->match_criteria_enable &
+	    1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS) {
+		void *fte_match1 = MLX5_ADDR_OF(fte_match_param,
+						fte_param1, outer_headers);
+		void *fte_match2 = MLX5_ADDR_OF(fte_match_param,
+						fte_param2, outer_headers);
+		void *fte_mask = MLX5_ADDR_OF(fte_match_param,
+					      mask->match_criteria, outer_headers);
+
+		if (!masked_memcmp(fte_mask, fte_match1, fte_match2,
+				   MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4)))
+			return false;
+	}
+
+	if (mask->match_criteria_enable &
+	    1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS) {
+		void *fte_match1 = MLX5_ADDR_OF(fte_match_param,
+						fte_param1, misc_parameters);
+		void *fte_match2 = MLX5_ADDR_OF(fte_match_param,
+						fte_param2, misc_parameters);
+		void *fte_mask = MLX5_ADDR_OF(fte_match_param,
+					  mask->match_criteria, misc_parameters);
+
+		if (!masked_memcmp(fte_mask, fte_match1, fte_match2,
+				   MLX5_ST_SZ_BYTES(fte_match_set_misc)))
+			return false;
+	}
+
+	if (mask->match_criteria_enable &
+	    1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS) {
+		void *fte_match1 = MLX5_ADDR_OF(fte_match_param,
+						fte_param1, inner_headers);
+		void *fte_match2 = MLX5_ADDR_OF(fte_match_param,
+						fte_param2, inner_headers);
+		void *fte_mask = MLX5_ADDR_OF(fte_match_param,
+					  mask->match_criteria, inner_headers);
+
+		if (!masked_memcmp(fte_mask, fte_match1, fte_match2,
+				   MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4)))
+			return false;
+	}
+	return true;
+}
+
+static bool compare_match_criteria(u8 match_criteria_enable1,
+				   u8 match_criteria_enable2,
+				   void *mask1, void *mask2)
+{
+	return match_criteria_enable1 == match_criteria_enable2 &&
+		!memcmp(mask1, mask2, MLX5_ST_SZ_BYTES(fte_match_param));
+}
+
+static struct mlx5_flow_root_namespace *find_root(struct fs_node *node)
+{
+	struct fs_node *root;
+	struct mlx5_flow_namespace *ns;
+
+	root = node->root;
+
+	if (WARN_ON(root->type != FS_TYPE_NAMESPACE)) {
+		pr_warn("mlx5: flow steering node is not in tree or garbaged\n");
+		return NULL;
+	}
+
+	ns = container_of(root, struct mlx5_flow_namespace, node);
+	return container_of(ns, struct mlx5_flow_root_namespace, ns);
+}
+
+static inline struct mlx5_core_dev *get_dev(struct fs_node *node)
+{
+	struct mlx5_flow_root_namespace *root = find_root(node);
+
+	if (root)
+		return root->dev;
+	return NULL;
+}
+
+static void del_flow_table(struct fs_node *node)
+{
+	struct mlx5_flow_table *ft;
+	struct mlx5_core_dev *dev;
+	struct fs_prio *prio;
+	int err;
+
+	fs_get_obj(ft, node);
+	dev = get_dev(&ft->node);
+
+	err = mlx5_cmd_destroy_flow_table(dev, ft);
+	if (err)
+		mlx5_core_warn(dev, "flow steering can't destroy ft\n");
+	fs_get_obj(prio, ft->node.parent);
+	prio->num_ft--;
+}
+
+static void del_rule(struct fs_node *node)
+{
+	struct mlx5_flow_rule *rule;
+	struct mlx5_flow_table *ft;
+	struct mlx5_flow_group *fg;
+	struct fs_fte *fte;
+	u32	*match_value;
+	int modify_mask;
+	struct mlx5_core_dev *dev = get_dev(node);
+	int match_len = MLX5_ST_SZ_BYTES(fte_match_param);
+	int err;
+	bool update_fte = false;
+
+	match_value = mlx5_vzalloc(match_len);
+	if (!match_value) {
+		mlx5_core_warn(dev, "failed to allocate inbox\n");
+		return;
+	}
+
+	fs_get_obj(rule, node);
+	fs_get_obj(fte, rule->node.parent);
+	fs_get_obj(fg, fte->node.parent);
+	memcpy(match_value, fte->val, sizeof(fte->val));
+	fs_get_obj(ft, fg->node.parent);
+	list_del(&rule->node.list);
+	if (rule->sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
+		mutex_lock(&rule->dest_attr.ft->lock);
+		list_del(&rule->next_ft);
+		mutex_unlock(&rule->dest_attr.ft->lock);
+	}
+
+	if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER  &&
+	    --fte->dests_size) {
+		modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
+		fte->action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT;
+		update_fte = true;
+		goto out;
+	}
+
+	if ((fte->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
+	    --fte->dests_size) {
+		modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST),
+		update_fte = true;
+	}
+out:
+	if (update_fte && fte->dests_size) {
+		err = mlx5_cmd_update_fte(dev, ft, fg->id, modify_mask, fte);
+		if (err)
+			mlx5_core_warn(dev,
+				       "%s can't del rule fg id=%d fte_index=%d\n",
+				       __func__, fg->id, fte->index);
+	}
+	kvfree(match_value);
+}
+
+static void del_fte(struct fs_node *node)
+{
+	struct mlx5_flow_table *ft;
+	struct mlx5_flow_group *fg;
+	struct mlx5_core_dev *dev;
+	struct fs_fte *fte;
+	int err;
+
+	fs_get_obj(fte, node);
+	fs_get_obj(fg, fte->node.parent);
+	fs_get_obj(ft, fg->node.parent);
+
+	dev = get_dev(&ft->node);
+	err = mlx5_cmd_delete_fte(dev, ft,
+				  fte->index);
+	if (err)
+		mlx5_core_warn(dev,
+			       "flow steering can't delete fte in index %d of flow group id %d\n",
+			       fte->index, fg->id);
+
+	fte->status = 0;
+	fg->num_ftes--;
+}
+
+static void del_flow_group(struct fs_node *node)
+{
+	struct mlx5_flow_group *fg;
+	struct mlx5_flow_table *ft;
+	struct mlx5_core_dev *dev;
+
+	fs_get_obj(fg, node);
+	fs_get_obj(ft, fg->node.parent);
+	dev = get_dev(&ft->node);
+
+	if (ft->autogroup.active)
+		ft->autogroup.num_groups--;
+
+	if (mlx5_cmd_destroy_flow_group(dev, ft, fg->id))
+		mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n",
+			       fg->id, ft->id);
+}
+
+static struct fs_fte *alloc_fte(struct mlx5_flow_act *flow_act,
+				u32 *match_value,
+				unsigned int index)
+{
+	struct fs_fte *fte;
+
+	fte = kzalloc(sizeof(*fte), GFP_KERNEL);
+	if (!fte)
+		return ERR_PTR(-ENOMEM);
+
+	memcpy(fte->val, match_value, sizeof(fte->val));
+	fte->node.type =  FS_TYPE_FLOW_ENTRY;
+	fte->flow_tag = flow_act->flow_tag;
+	fte->index = index;
+	fte->action = flow_act->action;
+	fte->encap_id = flow_act->encap_id;
+
+	return fte;
+}
+
+static struct mlx5_flow_group *alloc_flow_group(u32 *create_fg_in)
+{
+	struct mlx5_flow_group *fg;
+	void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
+					    create_fg_in, match_criteria);
+	u8 match_criteria_enable = MLX5_GET(create_flow_group_in,
+					    create_fg_in,
+					    match_criteria_enable);
+	fg = kzalloc(sizeof(*fg), GFP_KERNEL);
+	if (!fg)
+		return ERR_PTR(-ENOMEM);
+
+	fg->mask.match_criteria_enable = match_criteria_enable;
+	memcpy(&fg->mask.match_criteria, match_criteria,
+	       sizeof(fg->mask.match_criteria));
+	fg->node.type =  FS_TYPE_FLOW_GROUP;
+	fg->start_index = MLX5_GET(create_flow_group_in, create_fg_in,
+				   start_flow_index);
+	fg->max_ftes = MLX5_GET(create_flow_group_in, create_fg_in,
+				end_flow_index) - fg->start_index + 1;
+	return fg;
+}
+
+static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport, int max_fte,
+						enum fs_flow_table_type table_type,
+						enum fs_flow_table_op_mod op_mod,
+						u32 flags)
+{
+	struct mlx5_flow_table *ft;
+
+	ft  = kzalloc(sizeof(*ft), GFP_KERNEL);
+	if (!ft)
+		return NULL;
+
+	ft->level = level;
+	ft->node.type = FS_TYPE_FLOW_TABLE;
+	ft->op_mod = op_mod;
+	ft->type = table_type;
+	ft->vport = vport;
+	ft->max_fte = max_fte;
+	ft->flags = flags;
+	INIT_LIST_HEAD(&ft->fwd_rules);
+	mutex_init(&ft->lock);
+
+	return ft;
+}
+
+/* If reverse is false, then we search for the first flow table in the
+ * root sub-tree from start(closest from right), else we search for the
+ * last flow table in the root sub-tree till start(closest from left).
+ */
+static struct mlx5_flow_table *find_closest_ft_recursive(struct fs_node  *root,
+							 struct list_head *start,
+							 bool reverse)
+{
+#define list_advance_entry(pos, reverse)		\
+	((reverse) ? list_prev_entry(pos, list) : list_next_entry(pos, list))
+
+#define list_for_each_advance_continue(pos, head, reverse)	\
+	for (pos = list_advance_entry(pos, reverse);		\
+	     &pos->list != (head);				\
+	     pos = list_advance_entry(pos, reverse))
+
+	struct fs_node *iter = list_entry(start, struct fs_node, list);
+	struct mlx5_flow_table *ft = NULL;
+
+	if (!root)
+		return NULL;
+
+	list_for_each_advance_continue(iter, &root->children, reverse) {
+		if (iter->type == FS_TYPE_FLOW_TABLE) {
+			fs_get_obj(ft, iter);
+			return ft;
+		}
+		ft = find_closest_ft_recursive(iter, &iter->children, reverse);
+		if (ft)
+			return ft;
+	}
+
+	return ft;
+}
+
+/* If reverse if false then return the first flow table in next priority of
+ * prio in the tree, else return the last flow table in the previous priority
+ * of prio in the tree.
+ */
+static struct mlx5_flow_table *find_closest_ft(struct fs_prio *prio, bool reverse)
+{
+	struct mlx5_flow_table *ft = NULL;
+	struct fs_node *curr_node;
+	struct fs_node *parent;
+
+	parent = prio->node.parent;
+	curr_node = &prio->node;
+	while (!ft && parent) {
+		ft = find_closest_ft_recursive(parent, &curr_node->list, reverse);
+		curr_node = parent;
+		parent = curr_node->parent;
+	}
+	return ft;
+}
+
+/* Assuming all the tree is locked by mutex chain lock */
+static struct mlx5_flow_table *find_next_chained_ft(struct fs_prio *prio)
+{
+	return find_closest_ft(prio, false);
+}
+
+/* Assuming all the tree is locked by mutex chain lock */
+static struct mlx5_flow_table *find_prev_chained_ft(struct fs_prio *prio)
+{
+	return find_closest_ft(prio, true);
+}
+
+static int connect_fts_in_prio(struct mlx5_core_dev *dev,
+			       struct fs_prio *prio,
+			       struct mlx5_flow_table *ft)
+{
+	struct mlx5_flow_table *iter;
+	int i = 0;
+	int err;
+
+	fs_for_each_ft(iter, prio) {
+		i++;
+		err = mlx5_cmd_modify_flow_table(dev,
+						 iter,
+						 ft);
+		if (err) {
+			mlx5_core_warn(dev, "Failed to modify flow table %d\n",
+				       iter->id);
+			/* The driver is out of sync with the FW */
+			if (i > 1)
+				WARN_ON(true);
+			return err;
+		}
+	}
+	return 0;
+}
+
+/* Connect flow tables from previous priority of prio to ft */
+static int connect_prev_fts(struct mlx5_core_dev *dev,
+			    struct mlx5_flow_table *ft,
+			    struct fs_prio *prio)
+{
+	struct mlx5_flow_table *prev_ft;
+
+	prev_ft = find_prev_chained_ft(prio);
+	if (prev_ft) {
+		struct fs_prio *prev_prio;
+
+		fs_get_obj(prev_prio, prev_ft->node.parent);
+		return connect_fts_in_prio(dev, prev_prio, ft);
+	}
+	return 0;
+}
+
+static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
+				 *prio)
+{
+	struct mlx5_flow_root_namespace *root = find_root(&prio->node);
+	int min_level = INT_MAX;
+	int err;
+
+	if (root->root_ft)
+		min_level = root->root_ft->level;
+
+	if (ft->level >= min_level)
+		return 0;
+
+	err = mlx5_cmd_update_root_ft(root->dev, ft);
+	if (err)
+		mlx5_core_warn(root->dev, "Update root flow table of id=%u failed\n",
+			       ft->id);
+	else
+		root->root_ft = ft;
+
+	return err;
+}
+
+static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
+					 struct mlx5_flow_destination *dest)
+{
+	struct mlx5_flow_table *ft;
+	struct mlx5_flow_group *fg;
+	struct fs_fte *fte;
+	int modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
+	int err = 0;
+
+	fs_get_obj(fte, rule->node.parent);
+	if (!(fte->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
+		return -EINVAL;
+	lock_ref_node(&fte->node);
+	fs_get_obj(fg, fte->node.parent);
+	fs_get_obj(ft, fg->node.parent);
+
+	memcpy(&rule->dest_attr, dest, sizeof(*dest));
+	err = mlx5_cmd_update_fte(get_dev(&ft->node),
+				  ft, fg->id,
+				  modify_mask,
+				  fte);
+	unlock_ref_node(&fte->node);
+
+	return err;
+}
+
+int mlx5_modify_rule_destination(struct mlx5_flow_handle *handle,
+				 struct mlx5_flow_destination *new_dest,
+				 struct mlx5_flow_destination *old_dest)
+{
+	int i;
+
+	if (!old_dest) {
+		if (handle->num_rules != 1)
+			return -EINVAL;
+		return _mlx5_modify_rule_destination(handle->rule[0],
+						     new_dest);
+	}
+
+	for (i = 0; i < handle->num_rules; i++) {
+		if (mlx5_flow_dests_cmp(new_dest, &handle->rule[i]->dest_attr))
+			return _mlx5_modify_rule_destination(handle->rule[i],
+							     new_dest);
+	}
+
+	return -EINVAL;
+}
+
+/* Modify/set FWD rules that point on old_next_ft to point on new_next_ft  */
+static int connect_fwd_rules(struct mlx5_core_dev *dev,
+			     struct mlx5_flow_table *new_next_ft,
+			     struct mlx5_flow_table *old_next_ft)
+{
+	struct mlx5_flow_destination dest;
+	struct mlx5_flow_rule *iter;
+	int err = 0;
+
+	/* new_next_ft and old_next_ft could be NULL only
+	 * when we create/destroy the anchor flow table.
+	 */
+	if (!new_next_ft || !old_next_ft)
+		return 0;
+
+	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+	dest.ft = new_next_ft;
+
+	mutex_lock(&old_next_ft->lock);
+	list_splice_init(&old_next_ft->fwd_rules, &new_next_ft->fwd_rules);
+	mutex_unlock(&old_next_ft->lock);
+	list_for_each_entry(iter, &new_next_ft->fwd_rules, next_ft) {
+		err = _mlx5_modify_rule_destination(iter, &dest);
+		if (err)
+			pr_err("mlx5_core: failed to modify rule to point on flow table %d\n",
+			       new_next_ft->id);
+	}
+	return 0;
+}
+
+static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
+			      struct fs_prio *prio)
+{
+	struct mlx5_flow_table *next_ft;
+	int err = 0;
+
+	/* Connect_prev_fts and update_root_ft_create are mutually exclusive */
+
+	if (list_empty(&prio->node.children)) {
+		err = connect_prev_fts(dev, ft, prio);
+		if (err)
+			return err;
+
+		next_ft = find_next_chained_ft(prio);
+		err = connect_fwd_rules(dev, ft, next_ft);
+		if (err)
+			return err;
+	}
+
+	if (MLX5_CAP_FLOWTABLE(dev,
+			       flow_table_properties_nic_receive.modify_root))
+		err = update_root_ft_create(ft, prio);
+	return err;
+}
+
+static void list_add_flow_table(struct mlx5_flow_table *ft,
+				struct fs_prio *prio)
+{
+	struct list_head *prev = &prio->node.children;
+	struct mlx5_flow_table *iter;
+
+	fs_for_each_ft(iter, prio) {
+		if (iter->level > ft->level)
+			break;
+		prev = &iter->node.list;
+	}
+	list_add(&ft->node.list, prev);
+}
+
+static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
+							enum fs_flow_table_op_mod op_mod,
+							u16 vport, int prio,
+							int max_fte, u32 level,
+							u32 flags)
+{
+	struct mlx5_flow_table *next_ft = NULL;
+	struct mlx5_flow_table *ft;
+	int err;
+	int log_table_sz;
+	struct mlx5_flow_root_namespace *root =
+		find_root(&ns->node);
+	struct fs_prio *fs_prio = NULL;
+
+	if (!root) {
+		pr_err("mlx5: flow steering failed to find root of namespace\n");
+		return ERR_PTR(-ENODEV);
+	}
+
+	mutex_lock(&root->chain_lock);
+	fs_prio = find_prio(ns, prio);
+	if (!fs_prio) {
+		err = -EINVAL;
+		goto unlock_root;
+	}
+	if (level >= fs_prio->num_levels) {
+		err = -ENOSPC;
+		goto unlock_root;
+	}
+	/* The level is related to the
+	 * priority level range.
+	 */
+	level += fs_prio->start_level;
+	ft = alloc_flow_table(level,
+			      vport,
+			      max_fte ? roundup_pow_of_two(max_fte) : 0,
+			      root->table_type,
+			      op_mod, flags);
+	if (!ft) {
+		err = -ENOMEM;
+		goto unlock_root;
+	}
+
+	tree_init_node(&ft->node, 1, del_flow_table);
+	log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0;
+	next_ft = find_next_chained_ft(fs_prio);
+	err = mlx5_cmd_create_flow_table(root->dev, ft->vport, ft->op_mod, ft->type,
+					 ft->level, log_table_sz, next_ft, &ft->id,
+					 ft->flags);
+	if (err)
+		goto free_ft;
+
+	err = connect_flow_table(root->dev, ft, fs_prio);
+	if (err)
+		goto destroy_ft;
+	lock_ref_node(&fs_prio->node);
+	tree_add_node(&ft->node, &fs_prio->node);
+	list_add_flow_table(ft, fs_prio);
+	fs_prio->num_ft++;
+	unlock_ref_node(&fs_prio->node);
+	mutex_unlock(&root->chain_lock);
+	return ft;
+destroy_ft:
+	mlx5_cmd_destroy_flow_table(root->dev, ft);
+free_ft:
+	kfree(ft);
+unlock_root:
+	mutex_unlock(&root->chain_lock);
+	return ERR_PTR(err);
+}
+
+struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
+					       int prio, int max_fte,
+					       u32 level,
+					       u32 flags)
+{
+	return __mlx5_create_flow_table(ns, FS_FT_OP_MOD_NORMAL, 0, prio,
+					max_fte, level, flags);
+}
+
+struct mlx5_flow_table *mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
+						     int prio, int max_fte,
+						     u32 level, u16 vport)
+{
+	return __mlx5_create_flow_table(ns, FS_FT_OP_MOD_NORMAL, vport, prio,
+					max_fte, level, 0);
+}
+
+struct mlx5_flow_table *mlx5_create_lag_demux_flow_table(
+					       struct mlx5_flow_namespace *ns,
+					       int prio, u32 level)
+{
+	return __mlx5_create_flow_table(ns, FS_FT_OP_MOD_LAG_DEMUX, 0, prio, 0,
+					level, 0);
+}
+EXPORT_SYMBOL(mlx5_create_lag_demux_flow_table);
+
+struct mlx5_flow_table *mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
+							    int prio,
+							    int num_flow_table_entries,
+							    int max_num_groups,
+							    u32 level,
+							    u32 flags)
+{
+	struct mlx5_flow_table *ft;
+
+	if (max_num_groups > num_flow_table_entries)
+		return ERR_PTR(-EINVAL);
+
+	ft = mlx5_create_flow_table(ns, prio, num_flow_table_entries, level, flags);
+	if (IS_ERR(ft))
+		return ft;
+
+	ft->autogroup.active = true;
+	ft->autogroup.required_groups = max_num_groups;
+
+	return ft;
+}
+EXPORT_SYMBOL(mlx5_create_auto_grouped_flow_table);
+
+/* Flow table should be locked */
+static struct mlx5_flow_group *create_flow_group_common(struct mlx5_flow_table *ft,
+							u32 *fg_in,
+							struct list_head
+							*prev_fg,
+							bool is_auto_fg)
+{
+	struct mlx5_flow_group *fg;
+	struct mlx5_core_dev *dev = get_dev(&ft->node);
+	int err;
+
+	if (!dev)
+		return ERR_PTR(-ENODEV);
+
+	fg = alloc_flow_group(fg_in);
+	if (IS_ERR(fg))
+		return fg;
+
+	err = mlx5_cmd_create_flow_group(dev, ft, fg_in, &fg->id);
+	if (err) {
+		kfree(fg);
+		return ERR_PTR(err);
+	}
+
+	if (ft->autogroup.active)
+		ft->autogroup.num_groups++;
+	/* Add node to tree */
+	tree_init_node(&fg->node, !is_auto_fg, del_flow_group);
+	tree_add_node(&fg->node, &ft->node);
+	/* Add node to group list */
+	list_add(&fg->node.list, prev_fg);
+
+	return fg;
+}
+
+struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
+					       u32 *fg_in)
+{
+	struct mlx5_flow_group *fg;
+
+	if (ft->autogroup.active)
+		return ERR_PTR(-EPERM);
+
+	lock_ref_node(&ft->node);
+	fg = create_flow_group_common(ft, fg_in, ft->node.children.prev, false);
+	unlock_ref_node(&ft->node);
+
+	return fg;
+}
+
+static struct mlx5_flow_rule *alloc_rule(struct mlx5_flow_destination *dest)
+{
+	struct mlx5_flow_rule *rule;
+
+	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+	if (!rule)
+		return NULL;
+
+	INIT_LIST_HEAD(&rule->next_ft);
+	rule->node.type = FS_TYPE_FLOW_DEST;
+	if (dest)
+		memcpy(&rule->dest_attr, dest, sizeof(*dest));
+
+	return rule;
+}
+
+static struct mlx5_flow_handle *alloc_handle(int num_rules)
+{
+	struct mlx5_flow_handle *handle;
+
+	handle = kzalloc(sizeof(*handle) + sizeof(handle->rule[0]) *
+			  num_rules, GFP_KERNEL);
+	if (!handle)
+		return NULL;
+
+	handle->num_rules = num_rules;
+
+	return handle;
+}
+
+static void destroy_flow_handle(struct fs_fte *fte,
+				struct mlx5_flow_handle *handle,
+				struct mlx5_flow_destination *dest,
+				int i)
+{
+	for (; --i >= 0;) {
+		if (atomic_dec_and_test(&handle->rule[i]->node.refcount)) {
+			fte->dests_size--;
+			list_del(&handle->rule[i]->node.list);
+			kfree(handle->rule[i]);
+		}
+	}
+	kfree(handle);
+}
+
+static struct mlx5_flow_handle *
+create_flow_handle(struct fs_fte *fte,
+		   struct mlx5_flow_destination *dest,
+		   int dest_num,
+		   int *modify_mask,
+		   bool *new_rule)
+{
+	struct mlx5_flow_handle *handle;
+	struct mlx5_flow_rule *rule = NULL;
+	static int count = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
+	static int dst = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
+	int type;
+	int i = 0;
+
+	handle = alloc_handle((dest_num) ? dest_num : 1);
+	if (!handle)
+		return ERR_PTR(-ENOMEM);
+
+	do {
+		if (dest) {
+			rule = find_flow_rule(fte, dest + i);
+			if (rule) {
+				atomic_inc(&rule->node.refcount);
+				goto rule_found;
+			}
+		}
+
+		*new_rule = true;
+		rule = alloc_rule(dest + i);
+		if (!rule)
+			goto free_rules;
+
+		/* Add dest to dests list- we need flow tables to be in the
+		 * end of the list for forward to next prio rules.
+		 */
+		tree_init_node(&rule->node, 1, del_rule);
+		if (dest &&
+		    dest[i].type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
+			list_add(&rule->node.list, &fte->node.children);
+		else
+			list_add_tail(&rule->node.list, &fte->node.children);
+		if (dest) {
+			fte->dests_size++;
+
+			type = dest[i].type ==
+				MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+			*modify_mask |= type ? count : dst;
+		}
+rule_found:
+		handle->rule[i] = rule;
+	} while (++i < dest_num);
+
+	return handle;
+
+free_rules:
+	destroy_flow_handle(fte, handle, dest, i);
+	return ERR_PTR(-ENOMEM);
+}
+
+/* fte should not be deleted while calling this function */
+static struct mlx5_flow_handle *
+add_rule_fte(struct fs_fte *fte,
+	     struct mlx5_flow_group *fg,
+	     struct mlx5_flow_destination *dest,
+	     int dest_num,
+	     bool update_action)
+{
+	struct mlx5_flow_handle *handle;
+	struct mlx5_flow_table *ft;
+	int modify_mask = 0;
+	int err;
+	bool new_rule = false;
+
+	handle = create_flow_handle(fte, dest, dest_num, &modify_mask,
+				    &new_rule);
+	if (IS_ERR(handle) || !new_rule)
+		goto out;
+
+	if (update_action)
+		modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
+
+	fs_get_obj(ft, fg->node.parent);
+	if (!(fte->status & FS_FTE_STATUS_EXISTING))
+		err = mlx5_cmd_create_fte(get_dev(&ft->node),
+					  ft, fg->id, fte);
+	else
+		err = mlx5_cmd_update_fte(get_dev(&ft->node),
+					  ft, fg->id, modify_mask, fte);
+	if (err)
+		goto free_handle;
+
+	fte->status |= FS_FTE_STATUS_EXISTING;
+
+out:
+	return handle;
+
+free_handle:
+	destroy_flow_handle(fte, handle, dest, handle->num_rules);
+	return ERR_PTR(err);
+}
+
+/* Assumed fg is locked */
+static unsigned int get_free_fte_index(struct mlx5_flow_group *fg,
+				       struct list_head **prev)
+{
+	struct fs_fte *fte;
+	unsigned int start = fg->start_index;
+
+	if (prev)
+		*prev = &fg->node.children;
+
+	/* assumed list is sorted by index */
+	fs_for_each_fte(fte, fg) {
+		if (fte->index != start)
+			return start;
+		start++;
+		if (prev)
+			*prev = &fte->node.list;
+	}
+
+	return start;
+}
+
+/* prev is output, prev->next = new_fte */
+static struct fs_fte *create_fte(struct mlx5_flow_group *fg,
+				 u32 *match_value,
+				 struct mlx5_flow_act *flow_act,
+				 struct list_head **prev)
+{
+	struct fs_fte *fte;
+	int index;
+
+	index = get_free_fte_index(fg, prev);
+	fte = alloc_fte(flow_act, match_value, index);
+	if (IS_ERR(fte))
+		return fte;
+
+	return fte;
+}
+
+static struct mlx5_flow_group *create_autogroup(struct mlx5_flow_table *ft,
+						u8 match_criteria_enable,
+						u32 *match_criteria)
+{
+	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+	struct list_head *prev = ft->node.children.prev;
+	unsigned int candidate_index = 0;
+	struct mlx5_flow_group *fg;
+	void *match_criteria_addr;
+	unsigned int group_size = 0;
+	u32 *in;
+
+	if (!ft->autogroup.active)
+		return ERR_PTR(-ENOENT);
+
+	in = mlx5_vzalloc(inlen);
+	if (!in)
+		return ERR_PTR(-ENOMEM);
+
+	if (ft->autogroup.num_groups < ft->autogroup.required_groups)
+		/* We save place for flow groups in addition to max types */
+		group_size = ft->max_fte / (ft->autogroup.required_groups + 1);
+
+	/*  ft->max_fte == ft->autogroup.max_types */
+	if (group_size == 0)
+		group_size = 1;
+
+	/* sorted by start_index */
+	fs_for_each_fg(fg, ft) {
+		if (candidate_index + group_size > fg->start_index)
+			candidate_index = fg->start_index + fg->max_ftes;
+		else
+			break;
+		prev = &fg->node.list;
+	}
+
+	if (candidate_index + group_size > ft->max_fte) {
+		fg = ERR_PTR(-ENOSPC);
+		goto out;
+	}
+
+	MLX5_SET(create_flow_group_in, in, match_criteria_enable,
+		 match_criteria_enable);
+	MLX5_SET(create_flow_group_in, in, start_flow_index, candidate_index);
+	MLX5_SET(create_flow_group_in, in, end_flow_index,   candidate_index +
+		 group_size - 1);
+	match_criteria_addr = MLX5_ADDR_OF(create_flow_group_in,
+					   in, match_criteria);
+	memcpy(match_criteria_addr, match_criteria,
+	       MLX5_ST_SZ_BYTES(fte_match_param));
+
+	fg = create_flow_group_common(ft, in, prev, true);
+out:
+	kvfree(in);
+	return fg;
+}
+
+static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
+				struct mlx5_flow_destination *d2)
+{
+	if (d1->type == d2->type) {
+		if ((d1->type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
+		     d1->vport_num == d2->vport_num) ||
+		    (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
+		     d1->ft == d2->ft) ||
+		    (d1->type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
+		     d1->tir_num == d2->tir_num))
+			return true;
+	}
+
+	return false;
+}
+
+static struct mlx5_flow_rule *find_flow_rule(struct fs_fte *fte,
+					     struct mlx5_flow_destination *dest)
+{
+	struct mlx5_flow_rule *rule;
+
+	list_for_each_entry(rule, &fte->node.children, node.list) {
+		if (mlx5_flow_dests_cmp(&rule->dest_attr, dest))
+			return rule;
+	}
+	return NULL;
+}
+
+static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
+					    u32 *match_value,
+					    struct mlx5_flow_act *flow_act,
+					    struct mlx5_flow_destination *dest,
+					    int dest_num)
+{
+	struct mlx5_flow_handle *handle;
+	struct mlx5_flow_table *ft;
+	struct list_head *prev;
+	struct fs_fte *fte;
+	int i;
+
+	nested_lock_ref_node(&fg->node, FS_MUTEX_PARENT);
+	fs_for_each_fte(fte, fg) {
+		nested_lock_ref_node(&fte->node, FS_MUTEX_CHILD);
+		if (compare_match_value(&fg->mask, match_value, &fte->val) &&
+		    (flow_act->action & fte->action) &&
+		    flow_act->flow_tag == fte->flow_tag) {
+			int old_action = fte->action;
+
+			fte->action |= flow_act->action;
+			handle = add_rule_fte(fte, fg, dest, dest_num,
+					      old_action != flow_act->action);
+			if (IS_ERR(handle)) {
+				fte->action = old_action;
+				goto unlock_fte;
+			} else {
+				goto add_rules;
+			}
+		}
+		unlock_ref_node(&fte->node);
+	}
+	fs_get_obj(ft, fg->node.parent);
+	if (fg->num_ftes >= fg->max_ftes) {
+		handle = ERR_PTR(-ENOSPC);
+		goto unlock_fg;
+	}
+
+	fte = create_fte(fg, match_value, flow_act, &prev);
+	if (IS_ERR(fte)) {
+		handle = (void *)fte;
+		goto unlock_fg;
+	}
+	tree_init_node(&fte->node, 0, del_fte);
+	nested_lock_ref_node(&fte->node, FS_MUTEX_CHILD);
+	handle = add_rule_fte(fte, fg, dest, dest_num, false);
+	if (IS_ERR(handle)) {
+		kfree(fte);
+		goto unlock_fg;
+	}
+
+	fg->num_ftes++;
+
+	tree_add_node(&fte->node, &fg->node);
+	list_add(&fte->node.list, prev);
+add_rules:
+	for (i = 0; i < handle->num_rules; i++) {
+		if (atomic_read(&handle->rule[i]->node.refcount) == 1)
+			tree_add_node(&handle->rule[i]->node, &fte->node);
+	}
+unlock_fte:
+	unlock_ref_node(&fte->node);
+unlock_fg:
+	unlock_ref_node(&fg->node);
+	return handle;
+}
+
+struct mlx5_fc *mlx5_flow_rule_counter(struct mlx5_flow_handle *handle)
+{
+	struct mlx5_flow_rule *dst;
+	struct fs_fte *fte;
+
+	fs_get_obj(fte, handle->rule[0]->node.parent);
+
+	fs_for_each_dst(dst, fte) {
+		if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
+			return dst->dest_attr.counter;
+	}
+
+	return NULL;
+}
+
+static bool counter_is_valid(struct mlx5_fc *counter, u32 action)
+{
+	if (!(action & MLX5_FLOW_CONTEXT_ACTION_COUNT))
+		return !counter;
+
+	if (!counter)
+		return false;
+
+	return (action & (MLX5_FLOW_CONTEXT_ACTION_DROP |
+			  MLX5_FLOW_CONTEXT_ACTION_FWD_DEST));
+}
+
+static bool dest_is_valid(struct mlx5_flow_destination *dest,
+			  u32 action,
+			  struct mlx5_flow_table *ft)
+{
+	if (dest && (dest->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER))
+		return counter_is_valid(dest->counter, action);
+
+	if (!(action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
+		return true;
+
+	if (!dest || ((dest->type ==
+	    MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) &&
+	    (dest->ft->level <= ft->level)))
+		return false;
+	return true;
+}
+
+static struct mlx5_flow_handle *
+_mlx5_add_flow_rules(struct mlx5_flow_table *ft,
+		     struct mlx5_flow_spec *spec,
+		     struct mlx5_flow_act *flow_act,
+		     struct mlx5_flow_destination *dest,
+		     int dest_num)
+
+{
+	struct mlx5_flow_group *g;
+	struct mlx5_flow_handle *rule;
+	int i;
+
+	for (i = 0; i < dest_num; i++) {
+		if (!dest_is_valid(&dest[i], flow_act->action, ft))
+			return ERR_PTR(-EINVAL);
+	}
+
+	nested_lock_ref_node(&ft->node, FS_MUTEX_GRANDPARENT);
+	fs_for_each_fg(g, ft)
+		if (compare_match_criteria(g->mask.match_criteria_enable,
+					   spec->match_criteria_enable,
+					   g->mask.match_criteria,
+					   spec->match_criteria)) {
+			rule = add_rule_fg(g, spec->match_value,
+					   flow_act, dest, dest_num);
+			if (!IS_ERR(rule) || PTR_ERR(rule) != -ENOSPC)
+				goto unlock;
+		}
+
+	g = create_autogroup(ft, spec->match_criteria_enable,
+			     spec->match_criteria);
+	if (IS_ERR(g)) {
+		rule = (void *)g;
+		goto unlock;
+	}
+
+	rule = add_rule_fg(g, spec->match_value, flow_act, dest, dest_num);
+	if (IS_ERR(rule)) {
+		/* Remove assumes refcount > 0 and autogroup creates a group
+		 * with a refcount = 0.
+		 */
+		unlock_ref_node(&ft->node);
+		tree_get_node(&g->node);
+		tree_remove_node(&g->node);
+		return rule;
+	}
+unlock:
+	unlock_ref_node(&ft->node);
+	return rule;
+}
+
+static bool fwd_next_prio_supported(struct mlx5_flow_table *ft)
+{
+	return ((ft->type == FS_FT_NIC_RX) &&
+		(MLX5_CAP_FLOWTABLE(get_dev(&ft->node), nic_rx_multi_path_tirs)));
+}
+
+struct mlx5_flow_handle *
+mlx5_add_flow_rules(struct mlx5_flow_table *ft,
+		    struct mlx5_flow_spec *spec,
+		    struct mlx5_flow_act *flow_act,
+		    struct mlx5_flow_destination *dest,
+		    int dest_num)
+{
+	struct mlx5_flow_root_namespace *root = find_root(&ft->node);
+	struct mlx5_flow_destination gen_dest;
+	struct mlx5_flow_table *next_ft = NULL;
+	struct mlx5_flow_handle *handle = NULL;
+	u32 sw_action = flow_act->action;
+	struct fs_prio *prio;
+
+	fs_get_obj(prio, ft->node.parent);
+	if (flow_act->action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
+		if (!fwd_next_prio_supported(ft))
+			return ERR_PTR(-EOPNOTSUPP);
+		if (dest)
+			return ERR_PTR(-EINVAL);
+		mutex_lock(&root->chain_lock);
+		next_ft = find_next_chained_ft(prio);
+		if (next_ft) {
+			gen_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+			gen_dest.ft = next_ft;
+			dest = &gen_dest;
+			dest_num = 1;
+			flow_act->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+		} else {
+			mutex_unlock(&root->chain_lock);
+			return ERR_PTR(-EOPNOTSUPP);
+		}
+	}
+
+	handle = _mlx5_add_flow_rules(ft, spec, flow_act, dest, dest_num);
+
+	if (sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
+		if (!IS_ERR_OR_NULL(handle) &&
+		    (list_empty(&handle->rule[0]->next_ft))) {
+			mutex_lock(&next_ft->lock);
+			list_add(&handle->rule[0]->next_ft,
+				 &next_ft->fwd_rules);
+			mutex_unlock(&next_ft->lock);
+			handle->rule[0]->sw_action = MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
+		}
+		mutex_unlock(&root->chain_lock);
+	}
+	return handle;
+}
+EXPORT_SYMBOL(mlx5_add_flow_rules);
+
+void mlx5_del_flow_rules(struct mlx5_flow_handle *handle)
+{
+	int i;
+
+	for (i = handle->num_rules - 1; i >= 0; i--)
+		tree_remove_node(&handle->rule[i]->node);
+	kfree(handle);
+}
+EXPORT_SYMBOL(mlx5_del_flow_rules);
+
+/* Assuming prio->node.children(flow tables) is sorted by level */
+static struct mlx5_flow_table *find_next_ft(struct mlx5_flow_table *ft)
+{
+	struct fs_prio *prio;
+
+	fs_get_obj(prio, ft->node.parent);
+
+	if (!list_is_last(&ft->node.list, &prio->node.children))
+		return list_next_entry(ft, node.list);
+	return find_next_chained_ft(prio);
+}
+
+static int update_root_ft_destroy(struct mlx5_flow_table *ft)
+{
+	struct mlx5_flow_root_namespace *root = find_root(&ft->node);
+	struct mlx5_flow_table *new_root_ft = NULL;
+
+	if (root->root_ft != ft)
+		return 0;
+
+	new_root_ft = find_next_ft(ft);
+	if (new_root_ft) {
+		int err = mlx5_cmd_update_root_ft(root->dev, new_root_ft);
+
+		if (err) {
+			mlx5_core_warn(root->dev, "Update root flow table of id=%u failed\n",
+				       ft->id);
+			return err;
+		}
+	}
+	root->root_ft = new_root_ft;
+	return 0;
+}
+
+/* Connect flow table from previous priority to
+ * the next flow table.
+ */
+static int disconnect_flow_table(struct mlx5_flow_table *ft)
+{
+	struct mlx5_core_dev *dev = get_dev(&ft->node);
+	struct mlx5_flow_table *next_ft;
+	struct fs_prio *prio;
+	int err = 0;
+
+	err = update_root_ft_destroy(ft);
+	if (err)
+		return err;
+
+	fs_get_obj(prio, ft->node.parent);
+	if  (!(list_first_entry(&prio->node.children,
+				struct mlx5_flow_table,
+				node.list) == ft))
+		return 0;
+
+	next_ft = find_next_chained_ft(prio);
+	err = connect_fwd_rules(dev, next_ft, ft);
+	if (err)
+		return err;
+
+	err = connect_prev_fts(dev, next_ft, prio);
+	if (err)
+		mlx5_core_warn(dev, "Failed to disconnect flow table %d\n",
+			       ft->id);
+	return err;
+}
+
+int mlx5_destroy_flow_table(struct mlx5_flow_table *ft)
+{
+	struct mlx5_flow_root_namespace *root = find_root(&ft->node);
+	int err = 0;
+
+	mutex_lock(&root->chain_lock);
+	err = disconnect_flow_table(ft);
+	if (err) {
+		mutex_unlock(&root->chain_lock);
+		return err;
+	}
+	if (tree_remove_node(&ft->node))
+		mlx5_core_warn(get_dev(&ft->node), "Flow table %d wasn't destroyed, refcount > 1\n",
+			       ft->id);
+	mutex_unlock(&root->chain_lock);
+
+	return err;
+}
+EXPORT_SYMBOL(mlx5_destroy_flow_table);
+
+void mlx5_destroy_flow_group(struct mlx5_flow_group *fg)
+{
+	if (tree_remove_node(&fg->node))
+		mlx5_core_warn(get_dev(&fg->node), "Flow group %d wasn't destroyed, refcount > 1\n",
+			       fg->id);
+}
+
+struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
+						    enum mlx5_flow_namespace_type type)
+{
+	struct mlx5_flow_steering *steering = dev->priv.steering;
+	struct mlx5_flow_root_namespace *root_ns;
+	int prio;
+	struct fs_prio *fs_prio;
+	struct mlx5_flow_namespace *ns;
+
+	if (!steering)
+		return NULL;
+
+	switch (type) {
+	case MLX5_FLOW_NAMESPACE_BYPASS:
+	case MLX5_FLOW_NAMESPACE_LAG:
+	case MLX5_FLOW_NAMESPACE_OFFLOADS:
+	case MLX5_FLOW_NAMESPACE_ETHTOOL:
+	case MLX5_FLOW_NAMESPACE_KERNEL:
+	case MLX5_FLOW_NAMESPACE_LEFTOVERS:
+	case MLX5_FLOW_NAMESPACE_ANCHOR:
+		prio = type;
+		break;
+	case MLX5_FLOW_NAMESPACE_FDB:
+		if (steering->fdb_root_ns)
+			return &steering->fdb_root_ns->ns;
+		else
+			return NULL;
+	case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
+		if (steering->esw_egress_root_ns)
+			return &steering->esw_egress_root_ns->ns;
+		else
+			return NULL;
+	case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
+		if (steering->esw_ingress_root_ns)
+			return &steering->esw_ingress_root_ns->ns;
+		else
+			return NULL;
+	case MLX5_FLOW_NAMESPACE_SNIFFER_RX:
+		if (steering->sniffer_rx_root_ns)
+			return &steering->sniffer_rx_root_ns->ns;
+		else
+			return NULL;
+	case MLX5_FLOW_NAMESPACE_SNIFFER_TX:
+		if (steering->sniffer_tx_root_ns)
+			return &steering->sniffer_tx_root_ns->ns;
+		else
+			return NULL;
+	default:
+		return NULL;
+	}
+
+	root_ns = steering->root_ns;
+	if (!root_ns)
+		return NULL;
+
+	fs_prio = find_prio(&root_ns->ns, prio);
+	if (!fs_prio)
+		return NULL;
+
+	ns = list_first_entry(&fs_prio->node.children,
+			      typeof(*ns),
+			      node.list);
+
+	return ns;
+}
+EXPORT_SYMBOL(mlx5_get_flow_namespace);
+
+static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
+				      unsigned int prio, int num_levels)
+{
+	struct fs_prio *fs_prio;
+
+	fs_prio = kzalloc(sizeof(*fs_prio), GFP_KERNEL);
+	if (!fs_prio)
+		return ERR_PTR(-ENOMEM);
+
+	fs_prio->node.type = FS_TYPE_PRIO;
+	tree_init_node(&fs_prio->node, 1, NULL);
+	tree_add_node(&fs_prio->node, &ns->node);
+	fs_prio->num_levels = num_levels;
+	fs_prio->prio = prio;
+	list_add_tail(&fs_prio->node.list, &ns->node.children);
+
+	return fs_prio;
+}
+
+static struct mlx5_flow_namespace *fs_init_namespace(struct mlx5_flow_namespace
+						     *ns)
+{
+	ns->node.type = FS_TYPE_NAMESPACE;
+
+	return ns;
+}
+
+static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio)
+{
+	struct mlx5_flow_namespace	*ns;
+
+	ns = kzalloc(sizeof(*ns), GFP_KERNEL);
+	if (!ns)
+		return ERR_PTR(-ENOMEM);
+
+	fs_init_namespace(ns);
+	tree_init_node(&ns->node, 1, NULL);
+	tree_add_node(&ns->node, &prio->node);
+	list_add_tail(&ns->node.list, &prio->node.children);
+
+	return ns;
+}
+
+static int create_leaf_prios(struct mlx5_flow_namespace *ns, int prio,
+			     struct init_tree_node *prio_metadata)
+{
+	struct fs_prio *fs_prio;
+	int i;
+
+	for (i = 0; i < prio_metadata->num_leaf_prios; i++) {
+		fs_prio = fs_create_prio(ns, prio++, prio_metadata->num_levels);
+		if (IS_ERR(fs_prio))
+			return PTR_ERR(fs_prio);
+	}
+	return 0;
+}
+
+#define FLOW_TABLE_BIT_SZ 1
+#define GET_FLOW_TABLE_CAP(dev, offset) \
+	((be32_to_cpu(*((__be32 *)(dev->hca_caps_cur[MLX5_CAP_FLOW_TABLE]) +	\
+			offset / 32)) >>					\
+	  (32 - FLOW_TABLE_BIT_SZ - (offset & 0x1f))) & FLOW_TABLE_BIT_SZ)
+static bool has_required_caps(struct mlx5_core_dev *dev, struct node_caps *caps)
+{
+	int i;
+
+	for (i = 0; i < caps->arr_sz; i++) {
+		if (!GET_FLOW_TABLE_CAP(dev, caps->caps[i]))
+			return false;
+	}
+	return true;
+}
+
+static int init_root_tree_recursive(struct mlx5_flow_steering *steering,
+				    struct init_tree_node *init_node,
+				    struct fs_node *fs_parent_node,
+				    struct init_tree_node *init_parent_node,
+				    int prio)
+{
+	int max_ft_level = MLX5_CAP_FLOWTABLE(steering->dev,
+					      flow_table_properties_nic_receive.
+					      max_ft_level);
+	struct mlx5_flow_namespace *fs_ns;
+	struct fs_prio *fs_prio;
+	struct fs_node *base;
+	int i;
+	int err;
+
+	if (init_node->type == FS_TYPE_PRIO) {
+		if ((init_node->min_ft_level > max_ft_level) ||
+		    !has_required_caps(steering->dev, &init_node->caps))
+			return 0;
+
+		fs_get_obj(fs_ns, fs_parent_node);
+		if (init_node->num_leaf_prios)
+			return create_leaf_prios(fs_ns, prio, init_node);
+		fs_prio = fs_create_prio(fs_ns, prio, init_node->num_levels);
+		if (IS_ERR(fs_prio))
+			return PTR_ERR(fs_prio);
+		base = &fs_prio->node;
+	} else if (init_node->type == FS_TYPE_NAMESPACE) {
+		fs_get_obj(fs_prio, fs_parent_node);
+		fs_ns = fs_create_namespace(fs_prio);
+		if (IS_ERR(fs_ns))
+			return PTR_ERR(fs_ns);
+		base = &fs_ns->node;
+	} else {
+		return -EINVAL;
+	}
+	prio = 0;
+	for (i = 0; i < init_node->ar_size; i++) {
+		err = init_root_tree_recursive(steering, &init_node->children[i],
+					       base, init_node, prio);
+		if (err)
+			return err;
+		if (init_node->children[i].type == FS_TYPE_PRIO &&
+		    init_node->children[i].num_leaf_prios) {
+			prio += init_node->children[i].num_leaf_prios;
+		}
+	}
+
+	return 0;
+}
+
+static int init_root_tree(struct mlx5_flow_steering *steering,
+			  struct init_tree_node *init_node,
+			  struct fs_node *fs_parent_node)
+{
+	int i;
+	struct mlx5_flow_namespace *fs_ns;
+	int err;
+
+	fs_get_obj(fs_ns, fs_parent_node);
+	for (i = 0; i < init_node->ar_size; i++) {
+		err = init_root_tree_recursive(steering, &init_node->children[i],
+					       &fs_ns->node,
+					       init_node, i);
+		if (err)
+			return err;
+	}
+	return 0;
+}
+
+static struct mlx5_flow_root_namespace *create_root_ns(struct mlx5_flow_steering *steering,
+						       enum fs_flow_table_type
+						       table_type)
+{
+	struct mlx5_flow_root_namespace *root_ns;
+	struct mlx5_flow_namespace *ns;
+
+	/* Create the root namespace */
+	root_ns = mlx5_vzalloc(sizeof(*root_ns));
+	if (!root_ns)
+		return NULL;
+
+	root_ns->dev = steering->dev;
+	root_ns->table_type = table_type;
+
+	ns = &root_ns->ns;
+	fs_init_namespace(ns);
+	mutex_init(&root_ns->chain_lock);
+	tree_init_node(&ns->node, 1, NULL);
+	tree_add_node(&ns->node, NULL);
+
+	return root_ns;
+}
+
+static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level);
+
+static int set_prio_attrs_in_ns(struct mlx5_flow_namespace *ns, int acc_level)
+{
+	struct fs_prio *prio;
+
+	fs_for_each_prio(prio, ns) {
+		 /* This updates prio start_level and num_levels */
+		set_prio_attrs_in_prio(prio, acc_level);
+		acc_level += prio->num_levels;
+	}
+	return acc_level;
+}
+
+static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level)
+{
+	struct mlx5_flow_namespace *ns;
+	int acc_level_ns = acc_level;
+
+	prio->start_level = acc_level;
+	fs_for_each_ns(ns, prio)
+		/* This updates start_level and num_levels of ns's priority descendants */
+		acc_level_ns = set_prio_attrs_in_ns(ns, acc_level);
+	if (!prio->num_levels)
+		prio->num_levels = acc_level_ns - prio->start_level;
+	WARN_ON(prio->num_levels < acc_level_ns - prio->start_level);
+}
+
+static void set_prio_attrs(struct mlx5_flow_root_namespace *root_ns)
+{
+	struct mlx5_flow_namespace *ns = &root_ns->ns;
+	struct fs_prio *prio;
+	int start_level = 0;
+
+	fs_for_each_prio(prio, ns) {
+		set_prio_attrs_in_prio(prio, start_level);
+		start_level += prio->num_levels;
+	}
+}
+
+#define ANCHOR_PRIO 0
+#define ANCHOR_SIZE 1
+#define ANCHOR_LEVEL 0
+static int create_anchor_flow_table(struct mlx5_flow_steering *steering)
+{
+	struct mlx5_flow_namespace *ns = NULL;
+	struct mlx5_flow_table *ft;
+
+	ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR);
+	if (!ns)
+		return -EINVAL;
+	ft = mlx5_create_flow_table(ns, ANCHOR_PRIO, ANCHOR_SIZE, ANCHOR_LEVEL, 0);
+	if (IS_ERR(ft)) {
+		mlx5_core_err(steering->dev, "Failed to create last anchor flow table");
+		return PTR_ERR(ft);
+	}
+	return 0;
+}
+
+static int init_root_ns(struct mlx5_flow_steering *steering)
+{
+
+	steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX);
+	if (!steering->root_ns)
+		goto cleanup;
+
+	if (init_root_tree(steering, &root_fs, &steering->root_ns->ns.node))
+		goto cleanup;
+
+	set_prio_attrs(steering->root_ns);
+
+	if (create_anchor_flow_table(steering))
+		goto cleanup;
+
+	return 0;
+
+cleanup:
+	mlx5_cleanup_fs(steering->dev);
+	return -ENOMEM;
+}
+
+static void clean_tree(struct fs_node *node)
+{
+	if (node) {
+		struct fs_node *iter;
+		struct fs_node *temp;
+
+		list_for_each_entry_safe(iter, temp, &node->children, list)
+			clean_tree(iter);
+		tree_remove_node(node);
+	}
+}
+
+static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns)
+{
+	if (!root_ns)
+		return;
+
+	clean_tree(&root_ns->ns.node);
+}
+
+void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
+{
+	struct mlx5_flow_steering *steering = dev->priv.steering;
+
+	if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+		return;
+
+	cleanup_root_ns(steering->root_ns);
+	cleanup_root_ns(steering->esw_egress_root_ns);
+	cleanup_root_ns(steering->esw_ingress_root_ns);
+	cleanup_root_ns(steering->fdb_root_ns);
+	cleanup_root_ns(steering->sniffer_rx_root_ns);
+	cleanup_root_ns(steering->sniffer_tx_root_ns);
+	mlx5_cleanup_fc_stats(dev);
+	kfree(steering);
+}
+
+static int init_sniffer_tx_root_ns(struct mlx5_flow_steering *steering)
+{
+	struct fs_prio *prio;
+
+	steering->sniffer_tx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_TX);
+	if (!steering->sniffer_tx_root_ns)
+		return -ENOMEM;
+
+	/* Create single prio */
+	prio = fs_create_prio(&steering->sniffer_tx_root_ns->ns, 0, 1);
+	if (IS_ERR(prio)) {
+		cleanup_root_ns(steering->sniffer_tx_root_ns);
+		return PTR_ERR(prio);
+	}
+	return 0;
+}
+
+static int init_sniffer_rx_root_ns(struct mlx5_flow_steering *steering)
+{
+	struct fs_prio *prio;
+
+	steering->sniffer_rx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_RX);
+	if (!steering->sniffer_rx_root_ns)
+		return -ENOMEM;
+
+	/* Create single prio */
+	prio = fs_create_prio(&steering->sniffer_rx_root_ns->ns, 0, 1);
+	if (IS_ERR(prio)) {
+		cleanup_root_ns(steering->sniffer_rx_root_ns);
+		return PTR_ERR(prio);
+	}
+	return 0;
+}
+
+static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
+{
+	struct fs_prio *prio;
+
+	steering->fdb_root_ns = create_root_ns(steering, FS_FT_FDB);
+	if (!steering->fdb_root_ns)
+		return -ENOMEM;
+
+	prio = fs_create_prio(&steering->fdb_root_ns->ns, 0, 1);
+	if (IS_ERR(prio))
+		goto out_err;
+
+	prio = fs_create_prio(&steering->fdb_root_ns->ns, 1, 1);
+	if (IS_ERR(prio))
+		goto out_err;
+
+	set_prio_attrs(steering->fdb_root_ns);
+	return 0;
+
+out_err:
+	cleanup_root_ns(steering->fdb_root_ns);
+	steering->fdb_root_ns = NULL;
+	return PTR_ERR(prio);
+}
+
+static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering)
+{
+	struct fs_prio *prio;
+
+	steering->esw_egress_root_ns = create_root_ns(steering, FS_FT_ESW_EGRESS_ACL);
+	if (!steering->esw_egress_root_ns)
+		return -ENOMEM;
+
+	/* create 1 prio*/
+	prio = fs_create_prio(&steering->esw_egress_root_ns->ns, 0,
+			      MLX5_TOTAL_VPORTS(steering->dev));
+	return PTR_ERR_OR_ZERO(prio);
+}
+
+static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering)
+{
+	struct fs_prio *prio;
+
+	steering->esw_ingress_root_ns = create_root_ns(steering, FS_FT_ESW_INGRESS_ACL);
+	if (!steering->esw_ingress_root_ns)
+		return -ENOMEM;
+
+	/* create 1 prio*/
+	prio = fs_create_prio(&steering->esw_ingress_root_ns->ns, 0,
+			      MLX5_TOTAL_VPORTS(steering->dev));
+	return PTR_ERR_OR_ZERO(prio);
+}
+
+int mlx5_init_fs(struct mlx5_core_dev *dev)
+{
+	struct mlx5_flow_steering *steering;
+	int err = 0;
+
+	if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+		return 0;
+
+	err = mlx5_init_fc_stats(dev);
+	if (err)
+		return err;
+
+	steering = kzalloc(sizeof(*steering), GFP_KERNEL);
+	if (!steering)
+		return -ENOMEM;
+	steering->dev = dev;
+	dev->priv.steering = steering;
+
+	if (MLX5_CAP_GEN(dev, nic_flow_table) &&
+	    MLX5_CAP_FLOWTABLE_NIC_RX(dev, ft_support)) {
+		err = init_root_ns(steering);
+		if (err)
+			goto err;
+	}
+
+	if (MLX5_CAP_GEN(dev, eswitch_flow_table)) {
+		if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) {
+			err = init_fdb_root_ns(steering);
+			if (err)
+				goto err;
+		}
+		if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
+			err = init_egress_acl_root_ns(steering);
+			if (err)
+				goto err;
+		}
+		if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
+			err = init_ingress_acl_root_ns(steering);
+			if (err)
+				goto err;
+		}
+	}
+
+	if (MLX5_CAP_FLOWTABLE_SNIFFER_RX(dev, ft_support)) {
+		err = init_sniffer_rx_root_ns(steering);
+		if (err)
+			goto err;
+	}
+
+	if (MLX5_CAP_FLOWTABLE_SNIFFER_TX(dev, ft_support)) {
+		err = init_sniffer_tx_root_ns(steering);
+		if (err)
+			goto err;
+	}
+
+	return 0;
+err:
+	mlx5_cleanup_fs(dev);
+	return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/fs/core.h b/drivers/net/ethernet/mellanox/mlx5/fs/core.h
new file mode 100644
index 0000000..3c605bd
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/fs/core.h
@@ -0,0 +1,239 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _MLX5_FS_CORE_
+#define _MLX5_FS_CORE_
+
+#include "lib/fs.h"
+
+enum fs_node_type {
+	FS_TYPE_NAMESPACE,
+	FS_TYPE_PRIO,
+	FS_TYPE_FLOW_TABLE,
+	FS_TYPE_FLOW_GROUP,
+	FS_TYPE_FLOW_ENTRY,
+	FS_TYPE_FLOW_DEST
+};
+
+enum fs_flow_table_type {
+	FS_FT_NIC_RX          = 0x0,
+	FS_FT_ESW_EGRESS_ACL  = 0x2,
+	FS_FT_ESW_INGRESS_ACL = 0x3,
+	FS_FT_FDB             = 0X4,
+	FS_FT_SNIFFER_RX	= 0X5,
+	FS_FT_SNIFFER_TX	= 0X6,
+};
+
+enum fs_flow_table_op_mod {
+	FS_FT_OP_MOD_NORMAL,
+	FS_FT_OP_MOD_LAG_DEMUX,
+};
+
+enum fs_fte_status {
+	FS_FTE_STATUS_EXISTING = 1UL << 0,
+};
+
+struct mlx5_flow_steering {
+	struct mlx5_core_dev *dev;
+	struct mlx5_flow_root_namespace *root_ns;
+	struct mlx5_flow_root_namespace *fdb_root_ns;
+	struct mlx5_flow_root_namespace *esw_egress_root_ns;
+	struct mlx5_flow_root_namespace *esw_ingress_root_ns;
+	struct mlx5_flow_root_namespace	*sniffer_tx_root_ns;
+	struct mlx5_flow_root_namespace	*sniffer_rx_root_ns;
+};
+
+struct fs_node {
+	struct list_head	list;
+	struct list_head	children;
+	enum fs_node_type	type;
+	struct fs_node		*parent;
+	struct fs_node		*root;
+	/* lock the node for writing and traversing */
+	struct mutex		lock;
+	atomic_t		refcount;
+	void			(*remove_func)(struct fs_node *);
+};
+
+struct mlx5_flow_rule {
+	struct fs_node				node;
+	struct mlx5_flow_destination		dest_attr;
+	/* next_ft should be accessed under chain_lock and only of
+	 * destination type is FWD_NEXT_fT.
+	 */
+	struct list_head			next_ft;
+	u32					sw_action;
+};
+
+struct mlx5_flow_handle {
+	int num_rules;
+	struct mlx5_flow_rule *rule[];
+};
+
+/* Type of children is mlx5_flow_group */
+struct mlx5_flow_table {
+	struct fs_node			node;
+	u32				id;
+	u16				vport;
+	unsigned int			max_fte;
+	unsigned int			level;
+	enum fs_flow_table_type		type;
+	enum fs_flow_table_op_mod	op_mod;
+	struct {
+		bool			active;
+		unsigned int		required_groups;
+		unsigned int		num_groups;
+	} autogroup;
+	/* Protect fwd_rules */
+	struct mutex			lock;
+	/* FWD rules that point on this flow table */
+	struct list_head		fwd_rules;
+	u32				flags;
+};
+
+struct mlx5_fc_cache {
+	u64 packets;
+	u64 bytes;
+	u64 lastuse;
+};
+
+struct mlx5_fc {
+	struct rb_node node;
+	struct list_head list;
+
+	/* last{packets,bytes} members are used when calculating the delta since
+	 * last reading
+	 */
+	u64 lastpackets;
+	u64 lastbytes;
+
+	u16 id;
+	bool deleted;
+	bool aging;
+
+	struct mlx5_fc_cache cache ____cacheline_aligned_in_smp;
+};
+
+/* Type of children is mlx5_flow_rule */
+struct fs_fte {
+	struct fs_node			node;
+	u32				val[MLX5_ST_SZ_DW(fte_match_param)];
+	u32				dests_size;
+	u32				flow_tag;
+	u32				index;
+	u32				action;
+	u32				encap_id;
+	enum fs_fte_status		status;
+	struct mlx5_fc			*counter;
+};
+
+/* Type of children is mlx5_flow_table/namespace */
+struct fs_prio {
+	struct fs_node			node;
+	unsigned int			num_levels;
+	unsigned int			start_level;
+	unsigned int			prio;
+	unsigned int			num_ft;
+};
+
+/* Type of children is fs_prio */
+struct mlx5_flow_namespace {
+	/* parent == NULL => root ns */
+	struct	fs_node			node;
+};
+
+struct mlx5_flow_group_mask {
+	u8	match_criteria_enable;
+	u32	match_criteria[MLX5_ST_SZ_DW(fte_match_param)];
+};
+
+/* Type of children is fs_fte */
+struct mlx5_flow_group {
+	struct fs_node			node;
+	struct mlx5_flow_group_mask	mask;
+	u32				start_index;
+	u32				max_ftes;
+	u32				num_ftes;
+	u32				id;
+};
+
+struct mlx5_flow_root_namespace {
+	struct mlx5_flow_namespace	ns;
+	enum   fs_flow_table_type	table_type;
+	struct mlx5_core_dev		*dev;
+	struct mlx5_flow_table		*root_ft;
+	/* Should be held when chaining flow tables */
+	struct mutex			chain_lock;
+};
+
+int mlx5_init_fc_stats(struct mlx5_core_dev *dev);
+void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev);
+
+int mlx5_init_fs(struct mlx5_core_dev *dev);
+void mlx5_cleanup_fs(struct mlx5_core_dev *dev);
+
+#define fs_get_obj(v, _node)  {v = container_of((_node), typeof(*v), node); }
+
+#define fs_list_for_each_entry(pos, root)		\
+	list_for_each_entry(pos, root, node.list)
+
+#define fs_list_for_each_entry_safe(pos, tmp, root)		\
+	list_for_each_entry_safe(pos, tmp, root, node.list)
+
+#define fs_for_each_ns_or_ft_reverse(pos, prio)				\
+	list_for_each_entry_reverse(pos, &(prio)->node.children, list)
+
+#define fs_for_each_ns_or_ft(pos, prio)					\
+	list_for_each_entry(pos, (&(prio)->node.children), list)
+
+#define fs_for_each_prio(pos, ns)			\
+	fs_list_for_each_entry(pos, &(ns)->node.children)
+
+#define fs_for_each_ns(pos, prio)			\
+	fs_list_for_each_entry(pos, &(prio)->node.children)
+
+#define fs_for_each_ft(pos, prio)			\
+	fs_list_for_each_entry(pos, &(prio)->node.children)
+
+#define fs_for_each_ft_safe(pos, tmp, prio)			\
+	fs_list_for_each_entry_safe(pos, tmp, &(prio)->node.children)
+
+#define fs_for_each_fg(pos, ft)			\
+	fs_list_for_each_entry(pos, &(ft)->node.children)
+
+#define fs_for_each_fte(pos, fg)			\
+	fs_list_for_each_entry(pos, &(fg)->node.children)
+
+#define fs_for_each_dst(pos, fte)			\
+	fs_list_for_each_entry(pos, &(fte)->node.children)
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/fs/counters.c b/drivers/net/ethernet/mellanox/mlx5/fs/counters.c
new file mode 100644
index 0000000..19f4c97
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/fs/counters.c
@@ -0,0 +1,319 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "lib/driver.h"
+#include "lib/fs.h"
+#include <linux/rbtree.h>
+#include "mlx5.h"
+#include "fs/core.h"
+#include "fs/cmd.h"
+
+#define MLX5_FC_STATS_PERIOD msecs_to_jiffies(1000)
+
+/* locking scheme:
+ *
+ * It is the responsibility of the user to prevent concurrent calls or bad
+ * ordering to mlx5_fc_create(), mlx5_fc_destroy() and accessing a reference
+ * to struct mlx5_fc.
+ * e.g en_tc.c is protected by RTNL lock of its caller, and will never call a
+ * dump (access to struct mlx5_fc) after a counter is destroyed.
+ *
+ * access to counter list:
+ * - create (user context)
+ *   - mlx5_fc_create() only adds to an addlist to be used by
+ *     mlx5_fc_stats_query_work(). addlist is protected by a spinlock.
+ *   - spawn thread to do the actual destroy
+ *
+ * - destroy (user context)
+ *   - mark a counter as deleted
+ *   - spawn thread to do the actual del
+ *
+ * - dump (user context)
+ *   user should not call dump after destroy
+ *
+ * - query (single thread workqueue context)
+ *   destroy/dump - no conflict (see destroy)
+ *   query/dump - packets and bytes might be inconsistent (since update is not
+ *                atomic)
+ *   query/create - no conflict (see create)
+ *   since every create/destroy spawn the work, only after necessary time has
+ *   elapsed, the thread will actually query the hardware.
+ */
+
+static void mlx5_fc_stats_insert(struct rb_root *root, struct mlx5_fc *counter)
+{
+	struct rb_node **new = &root->rb_node;
+	struct rb_node *parent = NULL;
+
+	while (*new) {
+		struct mlx5_fc *this = rb_entry(*new, struct mlx5_fc, node);
+		int result = counter->id - this->id;
+
+		parent = *new;
+		if (result < 0)
+			new = &((*new)->rb_left);
+		else
+			new = &((*new)->rb_right);
+	}
+
+	/* Add new node and rebalance tree. */
+	rb_link_node(&counter->node, parent, new);
+	rb_insert_color(&counter->node, root);
+}
+
+static struct rb_node *mlx5_fc_stats_query(struct mlx5_core_dev *dev,
+					   struct mlx5_fc *first,
+					   u16 last_id)
+{
+	struct mlx5_cmd_fc_bulk *b;
+	struct rb_node *node = NULL;
+	u16 afirst_id;
+	int num;
+	int err;
+	int max_bulk = 1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk);
+
+	/* first id must be aligned to 4 when using bulk query */
+	afirst_id = first->id & ~0x3;
+
+	/* number of counters to query inc. the last counter */
+	num = ALIGN(last_id - afirst_id + 1, 4);
+	if (num > max_bulk) {
+		num = max_bulk;
+		last_id = afirst_id + num - 1;
+	}
+
+	b = mlx5_cmd_fc_bulk_alloc(dev, afirst_id, num);
+	if (!b) {
+		mlx5_core_err(dev, "Error allocating resources for bulk query\n");
+		return NULL;
+	}
+
+	err = mlx5_cmd_fc_bulk_query(dev, b);
+	if (err) {
+		mlx5_core_err(dev, "Error doing bulk query: %d\n", err);
+		goto out;
+	}
+
+	for (node = &first->node; node; node = rb_next(node)) {
+		struct mlx5_fc *counter = rb_entry(node, struct mlx5_fc, node);
+		struct mlx5_fc_cache *c = &counter->cache;
+		u64 packets;
+		u64 bytes;
+
+		if (counter->id > last_id)
+			break;
+
+		mlx5_cmd_fc_bulk_get(dev, b,
+				     counter->id, &packets, &bytes);
+
+		if (c->packets == packets)
+			continue;
+
+		c->packets = packets;
+		c->bytes = bytes;
+		c->lastuse = jiffies;
+	}
+
+out:
+	mlx5_cmd_fc_bulk_free(b);
+
+	return node;
+}
+
+static void mlx5_fc_stats_work(struct work_struct *work)
+{
+	struct mlx5_core_dev *dev = container_of(work, struct mlx5_core_dev,
+						 priv.fc_stats.work.work);
+	struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+	unsigned long now = jiffies;
+	struct mlx5_fc *counter = NULL;
+	struct mlx5_fc *last = NULL;
+	struct rb_node *node;
+	LIST_HEAD(tmplist);
+
+	spin_lock(&fc_stats->addlist_lock);
+
+	list_splice_tail_init(&fc_stats->addlist, &tmplist);
+
+	if (!list_empty(&tmplist) || !RB_EMPTY_ROOT(&fc_stats->counters))
+		queue_delayed_work(fc_stats->wq, &fc_stats->work, MLX5_FC_STATS_PERIOD);
+
+	spin_unlock(&fc_stats->addlist_lock);
+
+	list_for_each_entry(counter, &tmplist, list)
+		mlx5_fc_stats_insert(&fc_stats->counters, counter);
+
+	node = rb_first(&fc_stats->counters);
+	while (node) {
+		counter = rb_entry(node, struct mlx5_fc, node);
+
+		node = rb_next(node);
+
+		if (counter->deleted) {
+			rb_erase(&counter->node, &fc_stats->counters);
+
+			mlx5_cmd_fc_free(dev, counter->id);
+
+			kfree(counter);
+			continue;
+		}
+
+		last = counter;
+	}
+
+	if (time_before(now, fc_stats->next_query) || !last)
+		return;
+
+	node = rb_first(&fc_stats->counters);
+	while (node) {
+		counter = rb_entry(node, struct mlx5_fc, node);
+
+		node = mlx5_fc_stats_query(dev, counter, last->id);
+	}
+
+	fc_stats->next_query = now + MLX5_FC_STATS_PERIOD;
+}
+
+struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging)
+{
+	struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+	struct mlx5_fc *counter;
+	int err;
+
+	counter = kzalloc(sizeof(*counter), GFP_KERNEL);
+	if (!counter)
+		return ERR_PTR(-ENOMEM);
+
+	err = mlx5_cmd_fc_alloc(dev, &counter->id);
+	if (err)
+		goto err_out;
+
+	if (aging) {
+		counter->cache.lastuse = jiffies;
+		counter->aging = true;
+
+		spin_lock(&fc_stats->addlist_lock);
+		list_add(&counter->list, &fc_stats->addlist);
+		spin_unlock(&fc_stats->addlist_lock);
+
+		mod_delayed_work(fc_stats->wq, &fc_stats->work, 0);
+	}
+
+	return counter;
+
+err_out:
+	kfree(counter);
+
+	return ERR_PTR(err);
+}
+
+void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
+{
+	struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+
+	if (!counter)
+		return;
+
+	if (counter->aging) {
+		counter->deleted = true;
+		mod_delayed_work(fc_stats->wq, &fc_stats->work, 0);
+		return;
+	}
+
+	mlx5_cmd_fc_free(dev, counter->id);
+	kfree(counter);
+}
+
+int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
+{
+	struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+
+	fc_stats->counters = RB_ROOT;
+	INIT_LIST_HEAD(&fc_stats->addlist);
+	spin_lock_init(&fc_stats->addlist_lock);
+
+	fc_stats->wq = create_singlethread_workqueue("mlx5_fc");
+	if (!fc_stats->wq)
+		return -ENOMEM;
+
+	INIT_DELAYED_WORK(&fc_stats->work, mlx5_fc_stats_work);
+
+	return 0;
+}
+
+void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
+{
+	struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+	struct mlx5_fc *counter;
+	struct mlx5_fc *tmp;
+	struct rb_node *node;
+
+	cancel_delayed_work_sync(&dev->priv.fc_stats.work);
+	destroy_workqueue(dev->priv.fc_stats.wq);
+	dev->priv.fc_stats.wq = NULL;
+
+	list_for_each_entry_safe(counter, tmp, &fc_stats->addlist, list) {
+		list_del(&counter->list);
+
+		mlx5_cmd_fc_free(dev, counter->id);
+
+		kfree(counter);
+	}
+
+	node = rb_first(&fc_stats->counters);
+	while (node) {
+		counter = rb_entry(node, struct mlx5_fc, node);
+
+		node = rb_next(node);
+
+		rb_erase(&counter->node, &fc_stats->counters);
+
+		mlx5_cmd_fc_free(dev, counter->id);
+
+		kfree(counter);
+	}
+}
+
+void mlx5_fc_query_cached(struct mlx5_fc *counter,
+			  u64 *bytes, u64 *packets, u64 *lastuse)
+{
+	struct mlx5_fc_cache c;
+
+	c = counter->cache;
+
+	*bytes = c.bytes - counter->lastbytes;
+	*packets = c.packets - counter->lastpackets;
+	*lastuse = c.lastuse;
+
+	counter->lastbytes = c.bytes;
+	counter->lastpackets = c.packets;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/health.c b/drivers/net/ethernet/mellanox/mlx5/health.c
new file mode 100644
index 0000000..7af5593
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/health.c
@@ -0,0 +1,386 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/random.h>
+#include <linux/vmalloc.h>
+#include <linux/hardirq.h>
+#include "lib/driver.h"
+#include "lib/cmd.h"
+#include "mlx5.h"
+
+enum {
+	MLX5_HEALTH_POLL_INTERVAL	= 2 * HZ,
+	MAX_MISSES			= 3,
+};
+
+enum {
+	MLX5_HEALTH_SYNDR_FW_ERR		= 0x1,
+	MLX5_HEALTH_SYNDR_IRISC_ERR		= 0x7,
+	MLX5_HEALTH_SYNDR_HW_UNRECOVERABLE_ERR	= 0x8,
+	MLX5_HEALTH_SYNDR_CRC_ERR		= 0x9,
+	MLX5_HEALTH_SYNDR_FETCH_PCI_ERR		= 0xa,
+	MLX5_HEALTH_SYNDR_HW_FTL_ERR		= 0xb,
+	MLX5_HEALTH_SYNDR_ASYNC_EQ_OVERRUN_ERR	= 0xc,
+	MLX5_HEALTH_SYNDR_EQ_ERR		= 0xd,
+	MLX5_HEALTH_SYNDR_EQ_INV		= 0xe,
+	MLX5_HEALTH_SYNDR_FFSER_ERR		= 0xf,
+	MLX5_HEALTH_SYNDR_HIGH_TEMP		= 0x10
+};
+
+enum {
+	MLX5_NIC_IFC_FULL		= 0,
+	MLX5_NIC_IFC_DISABLED		= 1,
+	MLX5_NIC_IFC_NO_DRAM_NIC	= 2,
+	MLX5_NIC_IFC_INVALID		= 3
+};
+
+enum {
+	MLX5_DROP_NEW_HEALTH_WORK,
+};
+
+static u8 get_nic_state(struct mlx5_core_dev *dev)
+{
+	return (ioread32be(&dev->iseg->cmdq_addr_l_sz) >> 8) & 3;
+}
+
+static void trigger_cmd_completions(struct mlx5_core_dev *dev)
+{
+	unsigned long flags;
+	u64 vector;
+
+	/* wait for pending handlers to complete */
+	synchronize_irq(dev->priv.msix_arr[MLX5_EQ_VEC_CMD].vector);
+	spin_lock_irqsave(&dev->cmd.alloc_lock, flags);
+	vector = ~dev->cmd.bitmask & ((1ul << (1 << dev->cmd.log_sz)) - 1);
+	if (!vector)
+		goto no_trig;
+
+	vector |= MLX5_TRIGGERED_CMD_COMP;
+	spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
+
+	mlx5_core_dbg(dev, "vector 0x%llx\n", vector);
+	mlx5_cmd_comp_handler(dev, vector);
+	return;
+
+no_trig:
+	spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags);
+}
+
+static int in_fatal(struct mlx5_core_dev *dev)
+{
+	struct mlx5_core_health *health = &dev->priv.health;
+	struct health_buffer __iomem *h = health->health;
+
+	if (get_nic_state(dev) == MLX5_NIC_IFC_DISABLED)
+		return 1;
+
+	if (ioread32be(&h->fw_ver) == 0xffffffff)
+		return 1;
+
+	return 0;
+}
+
+void mlx5_enter_error_state(struct mlx5_core_dev *dev)
+{
+	mutex_lock(&dev->intf_state_mutex);
+	if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
+		goto unlock;
+
+	mlx5_core_err(dev, "start\n");
+	if (pci_channel_offline(dev->pdev) || in_fatal(dev)) {
+		dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
+		trigger_cmd_completions(dev);
+	}
+
+	mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 0);
+	mlx5_core_err(dev, "end\n");
+
+unlock:
+	mutex_unlock(&dev->intf_state_mutex);
+}
+
+static void mlx5_handle_bad_state(struct mlx5_core_dev *dev)
+{
+	u8 nic_interface = get_nic_state(dev);
+
+	switch (nic_interface) {
+	case MLX5_NIC_IFC_FULL:
+		mlx5_core_warn(dev, "Expected to see disabled NIC but it is full driver\n");
+		break;
+
+	case MLX5_NIC_IFC_DISABLED:
+		mlx5_core_warn(dev, "starting teardown\n");
+		break;
+
+	case MLX5_NIC_IFC_NO_DRAM_NIC:
+		mlx5_core_warn(dev, "Expected to see disabled NIC but it is no dram nic\n");
+		break;
+	default:
+		mlx5_core_warn(dev, "Expected to see disabled NIC but it is has invalid value %d\n",
+			       nic_interface);
+	}
+
+	mlx5_disable_device(dev);
+}
+
+static void health_recover(struct work_struct *work)
+{
+	struct mlx5_core_health *health;
+	struct delayed_work *dwork;
+	struct mlx5_core_dev *dev;
+	struct mlx5_priv *priv;
+	u8 nic_state;
+
+	dwork = container_of(work, struct delayed_work, work);
+	health = container_of(dwork, struct mlx5_core_health, recover_work);
+	priv = container_of(health, struct mlx5_priv, health);
+	dev = container_of(priv, struct mlx5_core_dev, priv);
+
+	nic_state = get_nic_state(dev);
+	if (nic_state == MLX5_NIC_IFC_INVALID) {
+		dev_err(&dev->pdev->dev, "health recovery flow aborted since the nic state is invalid\n");
+		return;
+	}
+
+	dev_err(&dev->pdev->dev, "starting health recovery flow\n");
+	mlx5_recover_device(dev);
+}
+
+/* How much time to wait until health resetting the driver (in msecs) */
+#define MLX5_RECOVERY_DELAY_MSECS 60000
+static void health_care(struct work_struct *work)
+{
+	unsigned long recover_delay = msecs_to_jiffies(MLX5_RECOVERY_DELAY_MSECS);
+	struct mlx5_core_health *health;
+	struct mlx5_core_dev *dev;
+	struct mlx5_priv *priv;
+
+	health = container_of(work, struct mlx5_core_health, work);
+	priv = container_of(health, struct mlx5_priv, health);
+	dev = container_of(priv, struct mlx5_core_dev, priv);
+	mlx5_core_warn(dev, "handling bad device here\n");
+	mlx5_handle_bad_state(dev);
+
+	spin_lock(&health->wq_lock);
+	if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags))
+		schedule_delayed_work(&health->recover_work, recover_delay);
+	else
+		dev_err(&dev->pdev->dev,
+			"new health works are not permitted at this stage\n");
+	spin_unlock(&health->wq_lock);
+}
+
+static const char *hsynd_str(u8 synd)
+{
+	switch (synd) {
+	case MLX5_HEALTH_SYNDR_FW_ERR:
+		return "firmware internal error";
+	case MLX5_HEALTH_SYNDR_IRISC_ERR:
+		return "irisc not responding";
+	case MLX5_HEALTH_SYNDR_HW_UNRECOVERABLE_ERR:
+		return "unrecoverable hardware error";
+	case MLX5_HEALTH_SYNDR_CRC_ERR:
+		return "firmware CRC error";
+	case MLX5_HEALTH_SYNDR_FETCH_PCI_ERR:
+		return "ICM fetch PCI error";
+	case MLX5_HEALTH_SYNDR_HW_FTL_ERR:
+		return "HW fatal error\n";
+	case MLX5_HEALTH_SYNDR_ASYNC_EQ_OVERRUN_ERR:
+		return "async EQ buffer overrun";
+	case MLX5_HEALTH_SYNDR_EQ_ERR:
+		return "EQ error";
+	case MLX5_HEALTH_SYNDR_EQ_INV:
+		return "Invalid EQ referenced";
+	case MLX5_HEALTH_SYNDR_FFSER_ERR:
+		return "FFSER error";
+	case MLX5_HEALTH_SYNDR_HIGH_TEMP:
+		return "High temperature";
+	default:
+		return "unrecognized error";
+	}
+}
+
+static u16 get_maj(u32 fw)
+{
+	return fw >> 28;
+}
+
+static u16 get_min(u32 fw)
+{
+	return fw >> 16 & 0xfff;
+}
+
+static u16 get_sub(u32 fw)
+{
+	return fw & 0xffff;
+}
+
+static void print_health_info(struct mlx5_core_dev *dev)
+{
+	struct mlx5_core_health *health = &dev->priv.health;
+	struct health_buffer __iomem *h = health->health;
+	char fw_str[18];
+	u32 fw;
+	int i;
+
+	/* If the syndrom is 0, the device is OK and no need to print buffer */
+	if (!ioread8(&h->synd))
+		return;
+
+	for (i = 0; i < ARRAY_SIZE(h->assert_var); i++)
+		dev_err(&dev->pdev->dev, "assert_var[%d] 0x%08x\n", i, ioread32be(h->assert_var + i));
+
+	dev_err(&dev->pdev->dev, "assert_exit_ptr 0x%08x\n", ioread32be(&h->assert_exit_ptr));
+	dev_err(&dev->pdev->dev, "assert_callra 0x%08x\n", ioread32be(&h->assert_callra));
+	fw = ioread32be(&h->fw_ver);
+	sprintf(fw_str, "%d.%d.%d", get_maj(fw), get_min(fw), get_sub(fw));
+	dev_err(&dev->pdev->dev, "fw_ver %s\n", fw_str);
+	dev_err(&dev->pdev->dev, "hw_id 0x%08x\n", ioread32be(&h->hw_id));
+	dev_err(&dev->pdev->dev, "irisc_index %d\n", ioread8(&h->irisc_index));
+	dev_err(&dev->pdev->dev, "synd 0x%x: %s\n", ioread8(&h->synd), hsynd_str(ioread8(&h->synd)));
+	dev_err(&dev->pdev->dev, "ext_synd 0x%04x\n", ioread16be(&h->ext_synd));
+}
+
+static unsigned long get_next_poll_jiffies(void)
+{
+	unsigned long next;
+
+	get_random_bytes(&next, sizeof(next));
+	next %= HZ;
+	next += jiffies + MLX5_HEALTH_POLL_INTERVAL;
+
+	return next;
+}
+
+static void poll_health(unsigned long data)
+{
+	struct mlx5_core_dev *dev = (struct mlx5_core_dev *)data;
+	struct mlx5_core_health *health = &dev->priv.health;
+	u32 count;
+
+	if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+		mod_timer(&health->timer, get_next_poll_jiffies());
+		return;
+	}
+
+	count = ioread32be(health->health_counter);
+	if (count == health->prev)
+		++health->miss_counter;
+	else
+		health->miss_counter = 0;
+
+	health->prev = count;
+	if (health->miss_counter == MAX_MISSES) {
+		dev_err(&dev->pdev->dev, "device's health compromised - reached miss count\n");
+		print_health_info(dev);
+	} else {
+		mod_timer(&health->timer, get_next_poll_jiffies());
+	}
+
+	if (in_fatal(dev) && !health->sick) {
+		health->sick = true;
+		print_health_info(dev);
+		spin_lock(&health->wq_lock);
+		if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags))
+			queue_work(health->wq, &health->work);
+		else
+			dev_err(&dev->pdev->dev,
+				"new health works are not permitted at this stage\n");
+		spin_unlock(&health->wq_lock);
+	}
+}
+
+void mlx5_start_health_poll(struct mlx5_core_dev *dev)
+{
+	struct mlx5_core_health *health = &dev->priv.health;
+
+	init_timer(&health->timer);
+	health->sick = 0;
+	clear_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
+	health->health = &dev->iseg->health;
+	health->health_counter = &dev->iseg->health_counter;
+
+	health->timer.data = (unsigned long)dev;
+	health->timer.function = poll_health;
+	health->timer.expires = round_jiffies(jiffies + MLX5_HEALTH_POLL_INTERVAL);
+	add_timer(&health->timer);
+}
+
+void mlx5_stop_health_poll(struct mlx5_core_dev *dev)
+{
+	struct mlx5_core_health *health = &dev->priv.health;
+
+	del_timer_sync(&health->timer);
+}
+
+void mlx5_drain_health_wq(struct mlx5_core_dev *dev)
+{
+	struct mlx5_core_health *health = &dev->priv.health;
+
+	spin_lock(&health->wq_lock);
+	set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
+	spin_unlock(&health->wq_lock);
+	cancel_delayed_work_sync(&health->recover_work);
+	cancel_work_sync(&health->work);
+}
+
+void mlx5_health_cleanup(struct mlx5_core_dev *dev)
+{
+	struct mlx5_core_health *health = &dev->priv.health;
+
+	destroy_workqueue(health->wq);
+}
+
+int mlx5_health_init(struct mlx5_core_dev *dev)
+{
+	struct mlx5_core_health *health;
+	char *name;
+
+	health = &dev->priv.health;
+	name = kmalloc(64, GFP_KERNEL);
+	if (!name)
+		return -ENOMEM;
+
+	strcpy(name, "mlx5_health");
+	strcat(name, dev_name(&dev->pdev->dev));
+	health->wq = create_singlethread_workqueue(name);
+	kfree(name);
+	if (!health->wq)
+		return -ENOMEM;
+	spin_lock_init(&health->wq_lock);
+	INIT_WORK(&health->work, health_care);
+	INIT_DELAYED_WORK(&health->recover_work, health_recover);
+
+	return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/lag.c b/drivers/net/ethernet/mellanox/mlx5/lag.c
new file mode 100644
index 0000000..c0338a5
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/lag.c
@@ -0,0 +1,588 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/netdevice.h>
+#include "lib/driver.h"
+#include "lib/vport.h"
+#include "mlx5.h"
+
+enum {
+	MLX5_LAG_FLAG_BONDED = 1 << 0,
+};
+
+struct lag_func {
+	struct mlx5_core_dev *dev;
+	struct net_device    *netdev;
+};
+
+/* Used for collection of netdev event info. */
+struct lag_tracker {
+	enum   netdev_lag_tx_type           tx_type;
+	struct netdev_lag_lower_state_info  netdev_state[MLX5_MAX_PORTS];
+	bool is_bonded;
+};
+
+/* LAG data of a ConnectX card.
+ * It serves both its phys functions.
+ */
+struct mlx5_lag {
+	u8                        flags;
+	u8                        v2p_map[MLX5_MAX_PORTS];
+	struct lag_func           pf[MLX5_MAX_PORTS];
+	struct lag_tracker        tracker;
+	struct delayed_work       bond_work;
+	struct notifier_block     nb;
+};
+
+/* General purpose, use for short periods of time.
+ * Beware of lock dependencies (preferably, no locks should be acquired
+ * under it).
+ */
+static DEFINE_MUTEX(lag_mutex);
+
+static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 remap_port1,
+			       u8 remap_port2)
+{
+	u32   in[MLX5_ST_SZ_DW(create_lag_in)]   = {0};
+	u32   out[MLX5_ST_SZ_DW(create_lag_out)] = {0};
+	void *lag_ctx = MLX5_ADDR_OF(create_lag_in, in, ctx);
+
+	MLX5_SET(create_lag_in, in, opcode, MLX5_CMD_OP_CREATE_LAG);
+
+	MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, remap_port1);
+	MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, remap_port2);
+
+	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
+static int mlx5_cmd_modify_lag(struct mlx5_core_dev *dev, u8 remap_port1,
+			       u8 remap_port2)
+{
+	u32   in[MLX5_ST_SZ_DW(modify_lag_in)]   = {0};
+	u32   out[MLX5_ST_SZ_DW(modify_lag_out)] = {0};
+	void *lag_ctx = MLX5_ADDR_OF(modify_lag_in, in, ctx);
+
+	MLX5_SET(modify_lag_in, in, opcode, MLX5_CMD_OP_MODIFY_LAG);
+	MLX5_SET(modify_lag_in, in, field_select, 0x1);
+
+	MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, remap_port1);
+	MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, remap_port2);
+
+	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
+static int mlx5_cmd_destroy_lag(struct mlx5_core_dev *dev)
+{
+	u32  in[MLX5_ST_SZ_DW(destroy_lag_in)]  = {0};
+	u32 out[MLX5_ST_SZ_DW(destroy_lag_out)] = {0};
+
+	MLX5_SET(destroy_lag_in, in, opcode, MLX5_CMD_OP_DESTROY_LAG);
+
+	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev)
+{
+	u32  in[MLX5_ST_SZ_DW(create_vport_lag_in)]  = {0};
+	u32 out[MLX5_ST_SZ_DW(create_vport_lag_out)] = {0};
+
+	MLX5_SET(create_vport_lag_in, in, opcode, MLX5_CMD_OP_CREATE_VPORT_LAG);
+
+	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+EXPORT_SYMBOL(mlx5_cmd_create_vport_lag);
+
+int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev)
+{
+	u32  in[MLX5_ST_SZ_DW(destroy_vport_lag_in)]  = {0};
+	u32 out[MLX5_ST_SZ_DW(destroy_vport_lag_out)] = {0};
+
+	MLX5_SET(destroy_vport_lag_in, in, opcode, MLX5_CMD_OP_DESTROY_VPORT_LAG);
+
+	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+EXPORT_SYMBOL(mlx5_cmd_destroy_vport_lag);
+
+static struct mlx5_lag *mlx5_lag_dev_get(struct mlx5_core_dev *dev)
+{
+	return dev->priv.lag;
+}
+
+static int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev,
+				       struct net_device *ndev)
+{
+	int i;
+
+	for (i = 0; i < MLX5_MAX_PORTS; i++)
+		if (ldev->pf[i].netdev == ndev)
+			return i;
+
+	return -1;
+}
+
+static bool mlx5_lag_is_bonded(struct mlx5_lag *ldev)
+{
+	return !!(ldev->flags & MLX5_LAG_FLAG_BONDED);
+}
+
+static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
+					   u8 *port1, u8 *port2)
+{
+	if (tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) {
+		if (tracker->netdev_state[0].tx_enabled) {
+			*port1 = 1;
+			*port2 = 1;
+		} else {
+			*port1 = 2;
+			*port2 = 2;
+		}
+	} else {
+		*port1 = 1;
+		*port2 = 2;
+		if (!tracker->netdev_state[0].link_up)
+			*port1 = 2;
+		else if (!tracker->netdev_state[1].link_up)
+			*port2 = 1;
+	}
+}
+
+static void mlx5_activate_lag(struct mlx5_lag *ldev,
+			      struct lag_tracker *tracker)
+{
+	struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
+	int err;
+
+	ldev->flags |= MLX5_LAG_FLAG_BONDED;
+
+	mlx5_infer_tx_affinity_mapping(tracker, &ldev->v2p_map[0],
+				       &ldev->v2p_map[1]);
+
+	err = mlx5_cmd_create_lag(dev0, ldev->v2p_map[0], ldev->v2p_map[1]);
+	if (err)
+		mlx5_core_err(dev0,
+			      "Failed to create LAG (%d)\n",
+			      err);
+}
+
+static void mlx5_deactivate_lag(struct mlx5_lag *ldev)
+{
+	struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
+	int err;
+
+	ldev->flags &= ~MLX5_LAG_FLAG_BONDED;
+
+	err = mlx5_cmd_destroy_lag(dev0);
+	if (err)
+		mlx5_core_err(dev0,
+			      "Failed to destroy LAG (%d)\n",
+			      err);
+}
+
+static void mlx5_do_bond(struct mlx5_lag *ldev)
+{
+	struct mlx5_core_dev *dev0 = ldev->pf[0].dev;
+	struct mlx5_core_dev *dev1 = ldev->pf[1].dev;
+	struct lag_tracker tracker;
+	u8 v2p_port1, v2p_port2;
+	int i, err;
+
+	if (!dev0 || !dev1)
+		return;
+
+	mutex_lock(&lag_mutex);
+	tracker = ldev->tracker;
+	mutex_unlock(&lag_mutex);
+
+	if (tracker.is_bonded && !mlx5_lag_is_bonded(ldev)) {
+		if (mlx5_sriov_is_enabled(dev0) ||
+		    mlx5_sriov_is_enabled(dev1)) {
+			mlx5_core_warn(dev0, "LAG is not supported with SRIOV");
+			return;
+		}
+
+		for (i = 0; i < MLX5_MAX_PORTS; i++)
+			mlx5_remove_dev_by_protocol(ldev->pf[i].dev,
+						    MLX5_INTERFACE_PROTOCOL_IB);
+
+		mlx5_activate_lag(ldev, &tracker);
+
+		mlx5_add_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB);
+		mlx5_nic_vport_enable_roce(dev1);
+	} else if (tracker.is_bonded && mlx5_lag_is_bonded(ldev)) {
+		mlx5_infer_tx_affinity_mapping(&tracker, &v2p_port1,
+					       &v2p_port2);
+
+		if ((v2p_port1 != ldev->v2p_map[0]) ||
+		    (v2p_port2 != ldev->v2p_map[1])) {
+			ldev->v2p_map[0] = v2p_port1;
+			ldev->v2p_map[1] = v2p_port2;
+
+			err = mlx5_cmd_modify_lag(dev0, v2p_port1, v2p_port2);
+			if (err)
+				mlx5_core_err(dev0,
+					      "Failed to modify LAG (%d)\n",
+					      err);
+		}
+	} else if (!tracker.is_bonded && mlx5_lag_is_bonded(ldev)) {
+		mlx5_remove_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB);
+		mlx5_nic_vport_disable_roce(dev1);
+
+		mlx5_deactivate_lag(ldev);
+
+		for (i = 0; i < MLX5_MAX_PORTS; i++)
+			if (ldev->pf[i].dev)
+				mlx5_add_dev_by_protocol(ldev->pf[i].dev,
+							 MLX5_INTERFACE_PROTOCOL_IB);
+	}
+}
+
+static void mlx5_queue_bond_work(struct mlx5_lag *ldev, unsigned long delay)
+{
+	schedule_delayed_work(&ldev->bond_work, delay);
+}
+
+static void mlx5_do_bond_work(struct work_struct *work)
+{
+	struct delayed_work *delayed_work = to_delayed_work(work);
+	struct mlx5_lag *ldev = container_of(delayed_work, struct mlx5_lag,
+					     bond_work);
+	int status;
+
+	status = mlx5_dev_list_trylock();
+	if (!status) {
+		/* 1 sec delay. */
+		mlx5_queue_bond_work(ldev, HZ);
+		return;
+	}
+
+	mlx5_do_bond(ldev);
+	mlx5_dev_list_unlock();
+}
+
+static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
+					 struct lag_tracker *tracker,
+					 struct net_device *ndev,
+					 struct netdev_notifier_changeupper_info *info)
+{
+	struct net_device *upper = info->upper_dev, *ndev_tmp;
+	struct netdev_lag_upper_info *lag_upper_info;
+	bool is_bonded;
+	int bond_status = 0;
+	int num_slaves = 0;
+	int idx;
+
+	if (!netif_is_lag_master(upper))
+		return 0;
+
+	lag_upper_info = info->upper_info;
+
+	/* The event may still be of interest if the slave does not belong to
+	 * us, but is enslaved to a master which has one or more of our netdevs
+	 * as slaves (e.g., if a new slave is added to a master that bonds two
+	 * of our netdevs, we should unbond).
+	 */
+	rcu_read_lock();
+	for_each_netdev_in_bond_rcu(upper, ndev_tmp) {
+		idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev_tmp);
+		if (idx > -1)
+			bond_status |= (1 << idx);
+
+		num_slaves++;
+	}
+	rcu_read_unlock();
+
+	/* None of this lagdev's netdevs are slaves of this master. */
+	if (!(bond_status & 0x3))
+		return 0;
+
+	if (lag_upper_info)
+		tracker->tx_type = lag_upper_info->tx_type;
+
+	/* Determine bonding status:
+	 * A device is considered bonded if both its physical ports are slaves
+	 * of the same lag master, and only them.
+	 * Lag mode must be activebackup or hash.
+	 */
+	is_bonded = (num_slaves == MLX5_MAX_PORTS) &&
+		    (bond_status == 0x3) &&
+		    ((tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) ||
+		     (tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH));
+
+	if (tracker->is_bonded != is_bonded) {
+		tracker->is_bonded = is_bonded;
+		return 1;
+	}
+
+	return 0;
+}
+
+static int mlx5_handle_changelowerstate_event(struct mlx5_lag *ldev,
+					      struct lag_tracker *tracker,
+					      struct net_device *ndev,
+					      struct netdev_notifier_changelowerstate_info *info)
+{
+	struct netdev_lag_lower_state_info *lag_lower_info;
+	int idx;
+
+	if (!netif_is_lag_port(ndev))
+		return 0;
+
+	idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev);
+	if (idx == -1)
+		return 0;
+
+	/* This information is used to determine virtual to physical
+	 * port mapping.
+	 */
+	lag_lower_info = info->lower_state_info;
+	if (!lag_lower_info)
+		return 0;
+
+	tracker->netdev_state[idx] = *lag_lower_info;
+
+	return 1;
+}
+
+static int mlx5_lag_netdev_event(struct notifier_block *this,
+				 unsigned long event, void *ptr)
+{
+	struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
+	struct lag_tracker tracker;
+	struct mlx5_lag *ldev;
+	int changed = 0;
+
+	if (!net_eq(dev_net(ndev), &init_net))
+		return NOTIFY_DONE;
+
+	if ((event != NETDEV_CHANGEUPPER) && (event != NETDEV_CHANGELOWERSTATE))
+		return NOTIFY_DONE;
+
+	ldev    = container_of(this, struct mlx5_lag, nb);
+	tracker = ldev->tracker;
+
+	switch (event) {
+	case NETDEV_CHANGEUPPER:
+		changed = mlx5_handle_changeupper_event(ldev, &tracker, ndev,
+							ptr);
+		break;
+	case NETDEV_CHANGELOWERSTATE:
+		changed = mlx5_handle_changelowerstate_event(ldev, &tracker,
+							     ndev, ptr);
+		break;
+	}
+
+	mutex_lock(&lag_mutex);
+	ldev->tracker = tracker;
+	mutex_unlock(&lag_mutex);
+
+	if (changed)
+		mlx5_queue_bond_work(ldev, 0);
+
+	return NOTIFY_DONE;
+}
+
+static struct mlx5_lag *mlx5_lag_dev_alloc(void)
+{
+	struct mlx5_lag *ldev;
+
+	ldev = kzalloc(sizeof(*ldev), GFP_KERNEL);
+	if (!ldev)
+		return NULL;
+
+	INIT_DELAYED_WORK(&ldev->bond_work, mlx5_do_bond_work);
+
+	return ldev;
+}
+
+static void mlx5_lag_dev_free(struct mlx5_lag *ldev)
+{
+	kfree(ldev);
+}
+
+static void mlx5_lag_dev_add_pf(struct mlx5_lag *ldev,
+				struct mlx5_core_dev *dev,
+				struct net_device *netdev)
+{
+	unsigned int fn = PCI_FUNC(dev->pdev->devfn);
+
+	if (fn >= MLX5_MAX_PORTS)
+		return;
+
+	mutex_lock(&lag_mutex);
+	ldev->pf[fn].dev    = dev;
+	ldev->pf[fn].netdev = netdev;
+	ldev->tracker.netdev_state[fn].link_up = 0;
+	ldev->tracker.netdev_state[fn].tx_enabled = 0;
+
+	dev->priv.lag = ldev;
+	mutex_unlock(&lag_mutex);
+}
+
+static void mlx5_lag_dev_remove_pf(struct mlx5_lag *ldev,
+				   struct mlx5_core_dev *dev)
+{
+	int i;
+
+	for (i = 0; i < MLX5_MAX_PORTS; i++)
+		if (ldev->pf[i].dev == dev)
+			break;
+
+	if (i == MLX5_MAX_PORTS)
+		return;
+
+	mutex_lock(&lag_mutex);
+	memset(&ldev->pf[i], 0, sizeof(*ldev->pf));
+
+	dev->priv.lag = NULL;
+	mutex_unlock(&lag_mutex);
+}
+
+
+/* Must be called with intf_mutex held */
+void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
+{
+	struct mlx5_lag *ldev = NULL;
+	struct mlx5_core_dev *tmp_dev;
+
+	if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
+	    !MLX5_CAP_GEN(dev, lag_master) ||
+	    (MLX5_CAP_GEN(dev, num_lag_ports) != MLX5_MAX_PORTS))
+		return;
+
+	tmp_dev = mlx5_get_next_phys_dev(dev);
+	if (tmp_dev)
+		ldev = tmp_dev->priv.lag;
+
+	if (!ldev) {
+		ldev = mlx5_lag_dev_alloc();
+		if (!ldev) {
+			mlx5_core_err(dev, "Failed to alloc lag dev\n");
+			return;
+		}
+	}
+
+	mlx5_lag_dev_add_pf(ldev, dev, netdev);
+
+	if (!ldev->nb.notifier_call) {
+		ldev->nb.notifier_call = mlx5_lag_netdev_event;
+		if (register_netdevice_notifier(&ldev->nb)) {
+			ldev->nb.notifier_call = NULL;
+			mlx5_core_err(dev, "Failed to register LAG netdev notifier\n");
+		}
+	}
+}
+
+/* Must be called with intf_mutex held */
+void mlx5_lag_remove(struct mlx5_core_dev *dev)
+{
+	struct mlx5_lag *ldev;
+	int i;
+
+	ldev = mlx5_lag_dev_get(dev);
+	if (!ldev)
+		return;
+
+	if (mlx5_lag_is_bonded(ldev))
+		mlx5_deactivate_lag(ldev);
+
+	mlx5_lag_dev_remove_pf(ldev, dev);
+
+	for (i = 0; i < MLX5_MAX_PORTS; i++)
+		if (ldev->pf[i].dev)
+			break;
+
+	if (i == MLX5_MAX_PORTS) {
+		if (ldev->nb.notifier_call)
+			unregister_netdevice_notifier(&ldev->nb);
+		cancel_delayed_work_sync(&ldev->bond_work);
+		mlx5_lag_dev_free(ldev);
+	}
+}
+
+bool mlx5_lag_is_active(struct mlx5_core_dev *dev)
+{
+	struct mlx5_lag *ldev;
+	bool res;
+
+	mutex_lock(&lag_mutex);
+	ldev = mlx5_lag_dev_get(dev);
+	res  = ldev && mlx5_lag_is_bonded(ldev);
+	mutex_unlock(&lag_mutex);
+
+	return res;
+}
+EXPORT_SYMBOL(mlx5_lag_is_active);
+
+struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev)
+{
+	struct net_device *ndev = NULL;
+	struct mlx5_lag *ldev;
+
+	mutex_lock(&lag_mutex);
+	ldev = mlx5_lag_dev_get(dev);
+
+	if (!(ldev && mlx5_lag_is_bonded(ldev)))
+		goto unlock;
+
+	if (ldev->tracker.tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) {
+		ndev = ldev->tracker.netdev_state[0].tx_enabled ?
+		       ldev->pf[0].netdev : ldev->pf[1].netdev;
+	} else {
+		ndev = ldev->pf[0].netdev;
+	}
+	if (ndev)
+		dev_hold(ndev);
+
+unlock:
+	mutex_unlock(&lag_mutex);
+
+	return ndev;
+}
+EXPORT_SYMBOL(mlx5_lag_get_roce_netdev);
+
+bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv)
+{
+	struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev,
+						 priv);
+	struct mlx5_lag *ldev;
+
+	if (intf->protocol != MLX5_INTERFACE_PROTOCOL_IB)
+		return true;
+
+	ldev = mlx5_lag_dev_get(dev);
+	if (!ldev || !mlx5_lag_is_bonded(ldev) || ldev->pf[0].dev == dev)
+		return true;
+
+	/* If bonded, we do not add an IB device for PF1. */
+	return false;
+}
+
diff --git a/drivers/net/ethernet/mellanox/mlx5/lib/alloc.c b/drivers/net/ethernet/mellanox/mlx5/lib/alloc.c
new file mode 100644
index 0000000..2f3bbff
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/lib/alloc.c
@@ -0,0 +1,297 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/export.h>
+#include <linux/bitmap.h>
+#include <linux/dma-mapping.h>
+#include <linux/vmalloc.h>
+#include "lib/driver.h"
+#include "mlx5.h"
+
+struct mlx5_db_pgdir {
+	struct list_head	list;
+	unsigned long	       *bitmap;
+	__be32		       *db_page;
+	dma_addr_t		db_dma;
+};
+
+/* Handling for queue buffers -- we allocate a bunch of memory and
+ * register it in a memory region at HCA virtual address 0.
+ */
+
+static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev,
+					   size_t size, dma_addr_t *dma_handle,
+					   int node)
+{
+	struct mlx5_priv *priv = &dev->priv;
+	int original_node;
+	void *cpu_handle;
+
+	mutex_lock(&priv->alloc_mutex);
+	original_node = dev_to_node(&dev->pdev->dev);
+	set_dev_node(&dev->pdev->dev, node);
+	cpu_handle = dma_zalloc_coherent(&dev->pdev->dev, size,
+					 dma_handle, GFP_KERNEL);
+	set_dev_node(&dev->pdev->dev, original_node);
+	mutex_unlock(&priv->alloc_mutex);
+	return cpu_handle;
+}
+
+int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
+			struct mlx5_buf *buf, int node)
+{
+	dma_addr_t t;
+
+	buf->size = size;
+	buf->npages       = 1;
+	buf->page_shift   = (u8)get_order(size) + PAGE_SHIFT;
+	buf->direct.buf   = mlx5_dma_zalloc_coherent_node(dev, size,
+							  &t, node);
+	if (!buf->direct.buf)
+		return -ENOMEM;
+
+	buf->direct.map = t;
+
+	while (t & ((1 << buf->page_shift) - 1)) {
+		--buf->page_shift;
+		buf->npages *= 2;
+	}
+
+	return 0;
+}
+
+int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf)
+{
+	return mlx5_buf_alloc_node(dev, size, buf, dev->priv.numa_node);
+}
+EXPORT_SYMBOL_GPL(mlx5_buf_alloc);
+
+void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf)
+{
+	dma_free_coherent(&dev->pdev->dev, buf->size, buf->direct.buf,
+			  buf->direct.map);
+}
+EXPORT_SYMBOL_GPL(mlx5_buf_free);
+
+int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
+			     struct mlx5_frag_buf *buf, int node)
+{
+	int i;
+
+	buf->size = size;
+	buf->npages = 1 << get_order(size);
+	buf->page_shift = PAGE_SHIFT;
+	buf->frags = kcalloc(buf->npages, sizeof(struct mlx5_buf_list),
+			     GFP_KERNEL);
+	if (!buf->frags)
+		goto err_out;
+
+	for (i = 0; i < buf->npages; i++) {
+		struct mlx5_buf_list *frag = &buf->frags[i];
+		int frag_sz = min_t(int, size, PAGE_SIZE);
+
+		frag->buf = mlx5_dma_zalloc_coherent_node(dev, frag_sz,
+							  &frag->map, node);
+		if (!frag->buf)
+			goto err_free_buf;
+		if (frag->map & ((1 << buf->page_shift) - 1)) {
+			dma_free_coherent(&dev->pdev->dev, frag_sz,
+					  buf->frags[i].buf, buf->frags[i].map);
+			mlx5_core_warn(dev, "unexpected map alignment: %pad, page_shift=%d\n",
+				       &frag->map, buf->page_shift);
+			goto err_free_buf;
+		}
+		size -= frag_sz;
+	}
+
+	return 0;
+
+err_free_buf:
+	while (i--)
+		dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, buf->frags[i].buf,
+				  buf->frags[i].map);
+	kfree(buf->frags);
+err_out:
+	return -ENOMEM;
+}
+
+void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf)
+{
+	int size = buf->size;
+	int i;
+
+	for (i = 0; i < buf->npages; i++) {
+		int frag_sz = min_t(int, size, PAGE_SIZE);
+
+		dma_free_coherent(&dev->pdev->dev, frag_sz, buf->frags[i].buf,
+				  buf->frags[i].map);
+		size -= frag_sz;
+	}
+	kfree(buf->frags);
+}
+
+static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev,
+						 int node)
+{
+	u32 db_per_page = PAGE_SIZE / cache_line_size();
+	struct mlx5_db_pgdir *pgdir;
+
+	pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL);
+	if (!pgdir)
+		return NULL;
+
+	pgdir->bitmap = kcalloc(BITS_TO_LONGS(db_per_page),
+				sizeof(unsigned long),
+				GFP_KERNEL);
+
+	if (!pgdir->bitmap) {
+		kfree(pgdir);
+		return NULL;
+	}
+
+	bitmap_fill(pgdir->bitmap, db_per_page);
+
+	pgdir->db_page = mlx5_dma_zalloc_coherent_node(dev, PAGE_SIZE,
+						       &pgdir->db_dma, node);
+	if (!pgdir->db_page) {
+		kfree(pgdir->bitmap);
+		kfree(pgdir);
+		return NULL;
+	}
+
+	return pgdir;
+}
+
+static int mlx5_alloc_db_from_pgdir(struct mlx5_db_pgdir *pgdir,
+				    struct mlx5_db *db)
+{
+	u32 db_per_page = PAGE_SIZE / cache_line_size();
+	int offset;
+	int i;
+
+	i = find_first_bit(pgdir->bitmap, db_per_page);
+	if (i >= db_per_page)
+		return -ENOMEM;
+
+	__clear_bit(i, pgdir->bitmap);
+
+	db->u.pgdir = pgdir;
+	db->index   = i;
+	offset = db->index * cache_line_size();
+	db->db      = pgdir->db_page + offset / sizeof(*pgdir->db_page);
+	db->dma     = pgdir->db_dma  + offset;
+
+	db->db[0] = 0;
+	db->db[1] = 0;
+
+	return 0;
+}
+
+int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db, int node)
+{
+	struct mlx5_db_pgdir *pgdir;
+	int ret = 0;
+
+	mutex_lock(&dev->priv.pgdir_mutex);
+
+	list_for_each_entry(pgdir, &dev->priv.pgdir_list, list)
+		if (!mlx5_alloc_db_from_pgdir(pgdir, db))
+			goto out;
+
+	pgdir = mlx5_alloc_db_pgdir(dev, node);
+	if (!pgdir) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	list_add(&pgdir->list, &dev->priv.pgdir_list);
+
+	/* This should never fail -- we just allocated an empty page: */
+	WARN_ON(mlx5_alloc_db_from_pgdir(pgdir, db));
+
+out:
+	mutex_unlock(&dev->priv.pgdir_mutex);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(mlx5_db_alloc_node);
+
+int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db)
+{
+	return mlx5_db_alloc_node(dev, db, dev->priv.numa_node);
+}
+EXPORT_SYMBOL_GPL(mlx5_db_alloc);
+
+void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
+{
+	u32 db_per_page = PAGE_SIZE / cache_line_size();
+	mutex_lock(&dev->priv.pgdir_mutex);
+
+	__set_bit(db->index, db->u.pgdir->bitmap);
+
+	if (bitmap_full(db->u.pgdir->bitmap, db_per_page)) {
+		dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
+				  db->u.pgdir->db_page, db->u.pgdir->db_dma);
+		list_del(&db->u.pgdir->list);
+		kfree(db->u.pgdir->bitmap);
+		kfree(db->u.pgdir);
+	}
+
+	mutex_unlock(&dev->priv.pgdir_mutex);
+}
+EXPORT_SYMBOL_GPL(mlx5_db_free);
+
+
+void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas)
+{
+	u64 addr;
+	int i;
+
+	for (i = 0; i < buf->npages; i++) {
+		addr = buf->direct.map + (i << buf->page_shift);
+
+		pas[i] = cpu_to_be64(addr);
+	}
+}
+EXPORT_SYMBOL_GPL(mlx5_fill_page_array);
+
+void mlx5_fill_page_frag_array(struct mlx5_frag_buf *buf, __be64 *pas)
+{
+	int i;
+
+	for (i = 0; i < buf->npages; i++)
+		pas[i] = cpu_to_be64(buf->frags[i].map);
+}
+EXPORT_SYMBOL_GPL(mlx5_fill_page_frag_array);
diff --git a/drivers/net/ethernet/mellanox/mlx5/lib/cmd.h b/drivers/net/ethernet/mellanox/mlx5/lib/cmd.h
new file mode 100644
index 0000000..68cd08f
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/lib/cmd.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef MLX5_CMD_H
+#define MLX5_CMD_H
+
+#include <linux/types.h>
+
+struct manage_pages_layout {
+	u64	ptr;
+	u32	reserved;
+	u16	num_entries;
+	u16	func_id;
+};
+
+
+struct mlx5_cmd_alloc_uar_imm_out {
+	u32	rsvd[3];
+	u32	uarn;
+};
+
+#endif /* MLX5_CMD_H */
diff --git a/drivers/net/ethernet/mellanox/mlx5/lib/cq.c b/drivers/net/ethernet/mellanox/mlx5/lib/cq.c
new file mode 100644
index 0000000..1c2e812
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/lib/cq.c
@@ -0,0 +1,290 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/hardirq.h>
+#include "lib/driver.h"
+#include "lib/cmd.h"
+#include <rdma/ib_verbs.h>
+#include "lib/cq.h"
+#include "mlx5.h"
+
+#define TASKLET_MAX_TIME 2
+#define TASKLET_MAX_TIME_JIFFIES msecs_to_jiffies(TASKLET_MAX_TIME)
+
+void mlx5_cq_tasklet_cb(unsigned long data)
+{
+	unsigned long flags;
+	unsigned long end = jiffies + TASKLET_MAX_TIME_JIFFIES;
+	struct mlx5_eq_tasklet *ctx = (struct mlx5_eq_tasklet *)data;
+	struct mlx5_core_cq *mcq;
+	struct mlx5_core_cq *temp;
+
+	spin_lock_irqsave(&ctx->lock, flags);
+	list_splice_tail_init(&ctx->list, &ctx->process_list);
+	spin_unlock_irqrestore(&ctx->lock, flags);
+
+	list_for_each_entry_safe(mcq, temp, &ctx->process_list,
+				 tasklet_ctx.list) {
+		list_del_init(&mcq->tasklet_ctx.list);
+		mcq->tasklet_ctx.comp(mcq);
+		if (atomic_dec_and_test(&mcq->refcount))
+			complete(&mcq->free);
+		if (time_after(jiffies, end))
+			break;
+	}
+
+	if (!list_empty(&ctx->process_list))
+		tasklet_schedule(&ctx->task);
+}
+
+static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq)
+{
+	unsigned long flags;
+	struct mlx5_eq_tasklet *tasklet_ctx = cq->tasklet_ctx.priv;
+
+	spin_lock_irqsave(&tasklet_ctx->lock, flags);
+	/* When migrating CQs between EQs will be implemented, please note
+	 * that you need to sync this point. It is possible that
+	 * while migrating a CQ, completions on the old EQs could
+	 * still arrive.
+	 */
+	if (list_empty_careful(&cq->tasklet_ctx.list)) {
+		atomic_inc(&cq->refcount);
+		list_add_tail(&cq->tasklet_ctx.list, &tasklet_ctx->list);
+	}
+	spin_unlock_irqrestore(&tasklet_ctx->lock, flags);
+}
+
+void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn)
+{
+	struct mlx5_core_cq *cq;
+	struct mlx5_cq_table *table = &dev->priv.cq_table;
+
+	spin_lock(&table->lock);
+	cq = radix_tree_lookup(&table->tree, cqn);
+	if (likely(cq))
+		atomic_inc(&cq->refcount);
+	spin_unlock(&table->lock);
+
+	if (!cq) {
+		mlx5_core_warn(dev, "Completion event for bogus CQ 0x%x\n", cqn);
+		return;
+	}
+
+	++cq->arm_sn;
+
+	cq->comp(cq);
+
+	if (atomic_dec_and_test(&cq->refcount))
+		complete(&cq->free);
+}
+
+void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type)
+{
+	struct mlx5_cq_table *table = &dev->priv.cq_table;
+	struct mlx5_core_cq *cq;
+
+	spin_lock(&table->lock);
+
+	cq = radix_tree_lookup(&table->tree, cqn);
+	if (cq)
+		atomic_inc(&cq->refcount);
+
+	spin_unlock(&table->lock);
+
+	if (!cq) {
+		mlx5_core_warn(dev, "Async event for bogus CQ 0x%x\n", cqn);
+		return;
+	}
+
+	cq->event(cq, event_type);
+
+	if (atomic_dec_and_test(&cq->refcount))
+		complete(&cq->free);
+}
+
+int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
+			u32 *in, int inlen)
+{
+	struct mlx5_cq_table *table = &dev->priv.cq_table;
+	u32 out[MLX5_ST_SZ_DW(create_cq_out)];
+	u32 din[MLX5_ST_SZ_DW(destroy_cq_in)];
+	u32 dout[MLX5_ST_SZ_DW(destroy_cq_out)];
+	int eqn = MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context),
+			   c_eqn);
+	struct mlx5_eq *eq;
+	int err;
+
+	eq = mlx5_eqn2eq(dev, eqn);
+	if (IS_ERR(eq))
+		return PTR_ERR(eq);
+
+	memset(out, 0, sizeof(out));
+	MLX5_SET(create_cq_in, in, opcode, MLX5_CMD_OP_CREATE_CQ);
+	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+	if (err)
+		return err;
+
+	cq->cqn = MLX5_GET(create_cq_out, out, cqn);
+	cq->cons_index = 0;
+	cq->arm_sn     = 0;
+	atomic_set(&cq->refcount, 1);
+	init_completion(&cq->free);
+	if (!cq->comp)
+		cq->comp = mlx5_add_cq_to_tasklet;
+	/* assuming CQ will be deleted before the EQ */
+	cq->tasklet_ctx.priv = &eq->tasklet_ctx;
+	INIT_LIST_HEAD(&cq->tasklet_ctx.list);
+
+	spin_lock_irq(&table->lock);
+	err = radix_tree_insert(&table->tree, cq->cqn, cq);
+	spin_unlock_irq(&table->lock);
+	if (err)
+		goto err_cmd;
+
+	cq->pid = current->pid;
+	err = mlx5_debug_cq_add(dev, cq);
+	if (err)
+		mlx5_core_dbg(dev, "failed adding CP 0x%x to debug file system\n",
+			      cq->cqn);
+
+	cq->uar = dev->priv.uar;
+
+	return 0;
+
+err_cmd:
+	memset(din, 0, sizeof(din));
+	memset(dout, 0, sizeof(dout));
+	MLX5_SET(destroy_cq_in, din, opcode, MLX5_CMD_OP_DESTROY_CQ);
+	MLX5_SET(destroy_cq_in, din, cqn, cq->cqn);
+	mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout));
+	return err;
+}
+EXPORT_SYMBOL(mlx5_core_create_cq);
+
+int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
+{
+	struct mlx5_cq_table *table = &dev->priv.cq_table;
+	u32 out[MLX5_ST_SZ_DW(destroy_cq_out)] = {0};
+	u32 in[MLX5_ST_SZ_DW(destroy_cq_in)] = {0};
+	struct mlx5_core_cq *tmp;
+	int err;
+
+	spin_lock_irq(&table->lock);
+	tmp = radix_tree_delete(&table->tree, cq->cqn);
+	spin_unlock_irq(&table->lock);
+	if (!tmp) {
+		mlx5_core_warn(dev, "cq 0x%x not found in tree\n", cq->cqn);
+		return -EINVAL;
+	}
+	if (tmp != cq) {
+		mlx5_core_warn(dev, "corruption on srqn 0x%x\n", cq->cqn);
+		return -EINVAL;
+	}
+
+	MLX5_SET(destroy_cq_in, in, opcode, MLX5_CMD_OP_DESTROY_CQ);
+	MLX5_SET(destroy_cq_in, in, cqn, cq->cqn);
+	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+	if (err)
+		return err;
+
+	synchronize_irq(cq->irqn);
+
+	mlx5_debug_cq_remove(dev, cq);
+	if (atomic_dec_and_test(&cq->refcount))
+		complete(&cq->free);
+	wait_for_completion(&cq->free);
+
+	return 0;
+}
+EXPORT_SYMBOL(mlx5_core_destroy_cq);
+
+int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
+		       u32 *out, int outlen)
+{
+	u32 in[MLX5_ST_SZ_DW(query_cq_in)] = {0};
+
+	MLX5_SET(query_cq_in, in, opcode, MLX5_CMD_OP_QUERY_CQ);
+	MLX5_SET(query_cq_in, in, cqn, cq->cqn);
+	return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
+}
+EXPORT_SYMBOL(mlx5_core_query_cq);
+
+int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
+			u32 *in, int inlen)
+{
+	u32 out[MLX5_ST_SZ_DW(modify_cq_out)] = {0};
+
+	MLX5_SET(modify_cq_in, in, opcode, MLX5_CMD_OP_MODIFY_CQ);
+	return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+}
+EXPORT_SYMBOL(mlx5_core_modify_cq);
+
+int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
+				   struct mlx5_core_cq *cq,
+				   u16 cq_period,
+				   u16 cq_max_count)
+{
+	u32 in[MLX5_ST_SZ_DW(modify_cq_in)] = {0};
+	void *cqc;
+
+	MLX5_SET(modify_cq_in, in, cqn, cq->cqn);
+	cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context);
+	MLX5_SET(cqc, cqc, cq_period, cq_period);
+	MLX5_SET(cqc, cqc, cq_max_count, cq_max_count);
+	MLX5_SET(modify_cq_in, in,
+		 modify_field_select_resize_field_select.modify_field_select.modify_field_select,
+		 MLX5_CQ_MODIFY_PERIOD | MLX5_CQ_MODIFY_COUNT);
+
+	return mlx5_core_modify_cq(dev, cq, in, sizeof(in));
+}
+EXPORT_SYMBOL(mlx5_core_modify_cq_moderation);
+
+int mlx5_init_cq_table(struct mlx5_core_dev *dev)
+{
+	struct mlx5_cq_table *table = &dev->priv.cq_table;
+	int err;
+
+	memset(table, 0, sizeof(*table));
+	spin_lock_init(&table->lock);
+	INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
+	err = mlx5_cq_debugfs_init(dev);
+
+	return err;
+}
+
+void mlx5_cleanup_cq_table(struct mlx5_core_dev *dev)
+{
+	mlx5_cq_debugfs_cleanup(dev);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/lib/cq.h b/drivers/net/ethernet/mellanox/mlx5/lib/cq.h
new file mode 100644
index 0000000..380875a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/lib/cq.h
@@ -0,0 +1,184 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef MLX5_CORE_CQ_H
+#define MLX5_CORE_CQ_H
+
+#include <rdma/ib_verbs.h>
+#include "driver.h"
+
+
+struct mlx5_core_cq {
+	u32			cqn;
+	int			cqe_sz;
+	__be32		       *set_ci_db;
+	__be32		       *arm_db;
+	struct mlx5_uars_page  *uar;
+	atomic_t		refcount;
+	struct completion	free;
+	unsigned		vector;
+	unsigned int		irqn;
+	void (*comp)		(struct mlx5_core_cq *);
+	void (*event)		(struct mlx5_core_cq *, enum mlx5_event);
+	u32			cons_index;
+	unsigned		arm_sn;
+	struct mlx5_rsc_debug	*dbg;
+	int			pid;
+	struct {
+		struct list_head list;
+		void (*comp)(struct mlx5_core_cq *);
+		void		*priv;
+	} tasklet_ctx;
+	int			reset_notify_added;
+	struct list_head	reset_notify;
+};
+
+
+enum {
+	MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR		= 0x01,
+	MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR		= 0x02,
+	MLX5_CQE_SYNDROME_LOCAL_PROT_ERR		= 0x04,
+	MLX5_CQE_SYNDROME_WR_FLUSH_ERR			= 0x05,
+	MLX5_CQE_SYNDROME_MW_BIND_ERR			= 0x06,
+	MLX5_CQE_SYNDROME_BAD_RESP_ERR			= 0x10,
+	MLX5_CQE_SYNDROME_LOCAL_ACCESS_ERR		= 0x11,
+	MLX5_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR		= 0x12,
+	MLX5_CQE_SYNDROME_REMOTE_ACCESS_ERR		= 0x13,
+	MLX5_CQE_SYNDROME_REMOTE_OP_ERR			= 0x14,
+	MLX5_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR	= 0x15,
+	MLX5_CQE_SYNDROME_RNR_RETRY_EXC_ERR		= 0x16,
+	MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR		= 0x22,
+};
+
+enum {
+	MLX5_CQE_OWNER_MASK	= 1,
+	MLX5_CQE_REQ		= 0,
+	MLX5_CQE_RESP_WR_IMM	= 1,
+	MLX5_CQE_RESP_SEND	= 2,
+	MLX5_CQE_RESP_SEND_IMM	= 3,
+	MLX5_CQE_RESP_SEND_INV	= 4,
+	MLX5_CQE_RESIZE_CQ	= 5,
+	MLX5_CQE_SIG_ERR	= 12,
+	MLX5_CQE_REQ_ERR	= 13,
+	MLX5_CQE_RESP_ERR	= 14,
+	MLX5_CQE_INVALID	= 15,
+};
+
+enum {
+	MLX5_CQ_MODIFY_PERIOD	= 1 << 0,
+	MLX5_CQ_MODIFY_COUNT	= 1 << 1,
+	MLX5_CQ_MODIFY_OVERRUN	= 1 << 2,
+};
+
+enum {
+	MLX5_CQ_OPMOD_RESIZE		= 1,
+	MLX5_MODIFY_CQ_MASK_LOG_SIZE	= 1 << 0,
+	MLX5_MODIFY_CQ_MASK_PG_OFFSET	= 1 << 1,
+	MLX5_MODIFY_CQ_MASK_PG_SIZE	= 1 << 2,
+};
+
+struct mlx5_cq_modify_params {
+	int	type;
+	union {
+		struct {
+			u32	page_offset;
+			u8	log_cq_size;
+		} resize;
+
+		struct {
+		} moder;
+
+		struct {
+		} mapping;
+	} params;
+};
+
+enum {
+	CQE_SIZE_64 = 0,
+	CQE_SIZE_128 = 1,
+};
+
+static inline int cqe_sz_to_mlx_sz(u8 size)
+{
+	return size == 64 ? CQE_SIZE_64 : CQE_SIZE_128;
+}
+
+static inline void mlx5_cq_set_ci(struct mlx5_core_cq *cq)
+{
+	*cq->set_ci_db = cpu_to_be32(cq->cons_index & 0xffffff);
+}
+
+enum {
+	MLX5_CQ_DB_REQ_NOT_SOL		= 1 << 24,
+	MLX5_CQ_DB_REQ_NOT		= 0 << 24
+};
+
+static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd,
+			       void __iomem *uar_page,
+			       u32 cons_index)
+{
+	__be32 doorbell[2];
+	u32 sn;
+	u32 ci;
+
+	sn = cq->arm_sn & 3;
+	ci = cons_index & 0xffffff;
+
+	*cq->arm_db = cpu_to_be32(sn << 28 | cmd | ci);
+
+	/* Make sure that the doorbell record in host memory is
+	 * written before ringing the doorbell via PCI MMIO.
+	 */
+	wmb();
+
+	doorbell[0] = cpu_to_be32(sn << 28 | cmd | ci);
+	doorbell[1] = cpu_to_be32(cq->cqn);
+
+	mlx5_write64(doorbell, uar_page + MLX5_CQ_DOORBELL, NULL);
+}
+
+int mlx5_init_cq_table(struct mlx5_core_dev *dev);
+void mlx5_cleanup_cq_table(struct mlx5_core_dev *dev);
+int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
+			u32 *in, int inlen);
+int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
+int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
+		       u32 *out, int outlen);
+int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
+			u32 *in, int inlen);
+int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
+				   struct mlx5_core_cq *cq, u16 cq_period,
+				   u16 cq_max_count);
+int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
+void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
+
+#endif /* MLX5_CORE_CQ_H */
diff --git a/drivers/net/ethernet/mellanox/mlx5/lib/device.h b/drivers/net/ethernet/mellanox/mlx5/lib/device.h
new file mode 100644
index 0000000..dcbc054
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/lib/device.h
@@ -0,0 +1,1100 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef MLX5_DEVICE_H
+#define MLX5_DEVICE_H
+
+#include <linux/types.h>
+#include <rdma/ib_verbs.h>
+#include "mlx5_ifc.h"
+
+#if defined(__LITTLE_ENDIAN)
+#define MLX5_SET_HOST_ENDIANNESS	0
+#elif defined(__BIG_ENDIAN)
+#define MLX5_SET_HOST_ENDIANNESS	0x80
+#else
+#error Host endianness not defined
+#endif
+
+/* helper macros */
+#define __mlx5_nullp(typ) ((struct mlx5_ifc_##typ##_bits *)0)
+#define __mlx5_bit_sz(typ, fld) sizeof(__mlx5_nullp(typ)->fld)
+#define __mlx5_bit_off(typ, fld) ((unsigned)(unsigned long)(&(__mlx5_nullp(typ)->fld)))
+#define __mlx5_dw_off(typ, fld) (__mlx5_bit_off(typ, fld) / 32)
+#define __mlx5_64_off(typ, fld) (__mlx5_bit_off(typ, fld) / 64)
+#define __mlx5_dw_bit_off(typ, fld) (32 - __mlx5_bit_sz(typ, fld) - (__mlx5_bit_off(typ, fld) & 0x1f))
+#define __mlx5_mask(typ, fld) ((u32)((1ull << __mlx5_bit_sz(typ, fld)) - 1))
+#define __mlx5_dw_mask(typ, fld) (__mlx5_mask(typ, fld) << __mlx5_dw_bit_off(typ, fld))
+#define __mlx5_st_sz_bits(typ) sizeof(struct mlx5_ifc_##typ##_bits)
+
+#define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8)
+#define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8)
+#define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32)
+#define MLX5_ST_SZ_QW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 64)
+#define MLX5_UN_SZ_BYTES(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 8)
+#define MLX5_UN_SZ_DW(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 32)
+#define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8)
+#define MLX5_ADDR_OF(typ, p, fld) ((char *)(p) + MLX5_BYTE_OFF(typ, fld))
+
+/* insert a value to a struct */
+#define MLX5_SET(typ, p, fld, v) do { \
+	BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32);             \
+	*((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
+	cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \
+		     (~__mlx5_dw_mask(typ, fld))) | (((v) & __mlx5_mask(typ, fld)) \
+		     << __mlx5_dw_bit_off(typ, fld))); \
+} while (0)
+
+#define MLX5_SET_TO_ONES(typ, p, fld) do { \
+	BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32);             \
+	*((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
+	cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \
+		     (~__mlx5_dw_mask(typ, fld))) | ((__mlx5_mask(typ, fld)) \
+		     << __mlx5_dw_bit_off(typ, fld))); \
+} while (0)
+
+#define MLX5_GET(typ, p, fld) ((be32_to_cpu(*((__be32 *)(p) +\
+__mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \
+__mlx5_mask(typ, fld))
+
+#define MLX5_GET_PR(typ, p, fld) ({ \
+	u32 ___t = MLX5_GET(typ, p, fld); \
+	pr_debug(#fld " = 0x%x\n", ___t); \
+	___t; \
+})
+
+#define __MLX5_SET64(typ, p, fld, v) do { \
+	BUILD_BUG_ON(__mlx5_bit_sz(typ, fld) != 64); \
+	*((__be64 *)(p) + __mlx5_64_off(typ, fld)) = cpu_to_be64(v); \
+} while (0)
+
+#define MLX5_SET64(typ, p, fld, v) do { \
+	BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 64); \
+	__MLX5_SET64(typ, p, fld, v); \
+} while (0)
+
+#define MLX5_ARRAY_SET64(typ, p, fld, idx, v) do { \
+	BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 64); \
+	__MLX5_SET64(typ, p, fld[idx], v); \
+} while (0)
+
+#define MLX5_GET64(typ, p, fld) be64_to_cpu(*((__be64 *)(p) + __mlx5_64_off(typ, fld)))
+
+#define MLX5_GET64_PR(typ, p, fld) ({ \
+	u64 ___t = MLX5_GET64(typ, p, fld); \
+	pr_debug(#fld " = 0x%llx\n", ___t); \
+	___t; \
+})
+
+/* Big endian getters */
+#define MLX5_GET64_BE(typ, p, fld) (*((__be64 *)(p) +\
+	__mlx5_64_off(typ, fld)))
+
+#define MLX5_GET_BE(type_t, typ, p, fld) ({				  \
+		type_t tmp;						  \
+		switch (sizeof(tmp)) {					  \
+		case sizeof(u8):					  \
+			tmp = (__force type_t)MLX5_GET(typ, p, fld);	  \
+			break;						  \
+		case sizeof(u16):					  \
+			tmp = (__force type_t)cpu_to_be16(MLX5_GET(typ, p, fld)); \
+			break;						  \
+		case sizeof(u32):					  \
+			tmp = (__force type_t)cpu_to_be32(MLX5_GET(typ, p, fld)); \
+			break;						  \
+		case sizeof(u64):					  \
+			tmp = (__force type_t)MLX5_GET64_BE(typ, p, fld); \
+			break;						  \
+			}						  \
+		tmp;							  \
+		})
+
+enum mlx5_inline_modes {
+	MLX5_INLINE_MODE_NONE,
+	MLX5_INLINE_MODE_L2,
+	MLX5_INLINE_MODE_IP,
+	MLX5_INLINE_MODE_TCP_UDP,
+};
+
+enum {
+	MLX5_MAX_COMMANDS		= 32,
+	MLX5_CMD_DATA_BLOCK_SIZE	= 512,
+	MLX5_PCI_CMD_XPORT		= 7,
+	MLX5_MKEY_BSF_OCTO_SIZE		= 4,
+	MLX5_MAX_PSVS			= 4,
+};
+
+enum {
+	MLX5_EXTENDED_UD_AV		= 0x80000000,
+};
+
+enum {
+	MLX5_CQ_STATE_ARMED		= 9,
+	MLX5_CQ_STATE_ALWAYS_ARMED	= 0xb,
+	MLX5_CQ_STATE_FIRED		= 0xa,
+};
+
+enum {
+	MLX5_STAT_RATE_OFFSET	= 5,
+};
+
+enum {
+	MLX5_INLINE_SEG = 0x80000000,
+};
+
+enum {
+	MLX5_HW_START_PADDING = MLX5_INLINE_SEG,
+};
+
+enum {
+	MLX5_MIN_PKEY_TABLE_SIZE = 128,
+	MLX5_MAX_LOG_PKEY_TABLE  = 5,
+};
+
+enum {
+	MLX5_MKEY_INBOX_PG_ACCESS = 1 << 31
+};
+
+enum {
+	MLX5_PFAULT_SUBTYPE_WQE = 0,
+	MLX5_PFAULT_SUBTYPE_RDMA = 1,
+};
+
+enum {
+	MLX5_PERM_LOCAL_READ	= 1 << 2,
+	MLX5_PERM_LOCAL_WRITE	= 1 << 3,
+	MLX5_PERM_REMOTE_READ	= 1 << 4,
+	MLX5_PERM_REMOTE_WRITE	= 1 << 5,
+	MLX5_PERM_ATOMIC	= 1 << 6,
+	MLX5_PERM_UMR_EN	= 1 << 7,
+};
+
+enum {
+	MLX5_PCIE_CTRL_SMALL_FENCE	= 1 << 0,
+	MLX5_PCIE_CTRL_RELAXED_ORDERING	= 1 << 2,
+	MLX5_PCIE_CTRL_NO_SNOOP		= 1 << 3,
+	MLX5_PCIE_CTRL_TLP_PROCE_EN	= 1 << 6,
+	MLX5_PCIE_CTRL_TPH_MASK		= 3 << 4,
+};
+
+enum {
+	MLX5_EN_RD	= (u64)1,
+	MLX5_EN_WR	= (u64)2
+};
+
+enum {
+	MLX5_ADAPTER_PAGE_SHIFT		= 12,
+	MLX5_ADAPTER_PAGE_SIZE		= 1 << MLX5_ADAPTER_PAGE_SHIFT,
+};
+
+enum {
+	MLX5_BFREGS_PER_UAR		= 4,
+	MLX5_MAX_UARS			= 1 << 8,
+	MLX5_NON_FP_BFREGS_PER_UAR	= 2,
+	MLX5_FP_BFREGS_PER_UAR		= MLX5_BFREGS_PER_UAR -
+					  MLX5_NON_FP_BFREGS_PER_UAR,
+	MLX5_MAX_BFREGS			= MLX5_MAX_UARS *
+					  MLX5_NON_FP_BFREGS_PER_UAR,
+	MLX5_UARS_IN_PAGE		= PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
+	MLX5_NON_FP_BFREGS_IN_PAGE	= MLX5_NON_FP_BFREGS_PER_UAR * MLX5_UARS_IN_PAGE,
+};
+
+enum {
+	MLX5_MKEY_MASK_LEN		= 1ull << 0,
+	MLX5_MKEY_MASK_PAGE_SIZE	= 1ull << 1,
+	MLX5_MKEY_MASK_START_ADDR	= 1ull << 6,
+	MLX5_MKEY_MASK_PD		= 1ull << 7,
+	MLX5_MKEY_MASK_EN_RINVAL	= 1ull << 8,
+	MLX5_MKEY_MASK_EN_SIGERR	= 1ull << 9,
+	MLX5_MKEY_MASK_BSF_EN		= 1ull << 12,
+	MLX5_MKEY_MASK_KEY		= 1ull << 13,
+	MLX5_MKEY_MASK_QPN		= 1ull << 14,
+	MLX5_MKEY_MASK_LR		= 1ull << 17,
+	MLX5_MKEY_MASK_LW		= 1ull << 18,
+	MLX5_MKEY_MASK_RR		= 1ull << 19,
+	MLX5_MKEY_MASK_RW		= 1ull << 20,
+	MLX5_MKEY_MASK_A		= 1ull << 21,
+	MLX5_MKEY_MASK_SMALL_FENCE	= 1ull << 23,
+	MLX5_MKEY_MASK_FREE		= 1ull << 29,
+};
+
+enum {
+	MLX5_UMR_TRANSLATION_OFFSET_EN	= (1 << 4),
+
+	MLX5_UMR_CHECK_NOT_FREE		= (1 << 5),
+	MLX5_UMR_CHECK_FREE		= (2 << 5),
+
+	MLX5_UMR_INLINE			= (1 << 7),
+};
+
+#define MLX5_UMR_MTT_ALIGNMENT 0x40
+#define MLX5_UMR_MTT_MASK      (MLX5_UMR_MTT_ALIGNMENT - 1)
+#define MLX5_UMR_MTT_MIN_CHUNK_SIZE MLX5_UMR_MTT_ALIGNMENT
+
+#define MLX5_USER_INDEX_LEN (MLX5_FLD_SZ_BYTES(qpc, user_index) * 8)
+
+enum {
+	MLX5_EVENT_QUEUE_TYPE_QP = 0,
+	MLX5_EVENT_QUEUE_TYPE_RQ = 1,
+	MLX5_EVENT_QUEUE_TYPE_SQ = 2,
+};
+
+enum mlx5_event {
+	MLX5_EVENT_TYPE_COMP		   = 0x0,
+
+	MLX5_EVENT_TYPE_PATH_MIG	   = 0x01,
+	MLX5_EVENT_TYPE_COMM_EST	   = 0x02,
+	MLX5_EVENT_TYPE_SQ_DRAINED	   = 0x03,
+	MLX5_EVENT_TYPE_SRQ_LAST_WQE	   = 0x13,
+	MLX5_EVENT_TYPE_SRQ_RQ_LIMIT	   = 0x14,
+
+	MLX5_EVENT_TYPE_CQ_ERROR	   = 0x04,
+	MLX5_EVENT_TYPE_WQ_CATAS_ERROR	   = 0x05,
+	MLX5_EVENT_TYPE_PATH_MIG_FAILED	   = 0x07,
+	MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10,
+	MLX5_EVENT_TYPE_WQ_ACCESS_ERROR	   = 0x11,
+	MLX5_EVENT_TYPE_SRQ_CATAS_ERROR	   = 0x12,
+
+	MLX5_EVENT_TYPE_INTERNAL_ERROR	   = 0x08,
+	MLX5_EVENT_TYPE_PORT_CHANGE	   = 0x09,
+	MLX5_EVENT_TYPE_GPIO_EVENT	   = 0x15,
+	MLX5_EVENT_TYPE_PORT_MODULE_EVENT  = 0x16,
+	MLX5_EVENT_TYPE_REMOTE_CONFIG	   = 0x19,
+
+	MLX5_EVENT_TYPE_DB_BF_CONGESTION   = 0x1a,
+	MLX5_EVENT_TYPE_STALL_EVENT	   = 0x1b,
+
+	MLX5_EVENT_TYPE_CMD		   = 0x0a,
+	MLX5_EVENT_TYPE_PAGE_REQUEST	   = 0xb,
+
+	MLX5_EVENT_TYPE_PAGE_FAULT	   = 0xc,
+	MLX5_EVENT_TYPE_NIC_VPORT_CHANGE   = 0xd,
+};
+
+enum {
+	MLX5_PORT_CHANGE_SUBTYPE_DOWN		= 1,
+	MLX5_PORT_CHANGE_SUBTYPE_ACTIVE		= 4,
+	MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED	= 5,
+	MLX5_PORT_CHANGE_SUBTYPE_LID		= 6,
+	MLX5_PORT_CHANGE_SUBTYPE_PKEY		= 7,
+	MLX5_PORT_CHANGE_SUBTYPE_GUID		= 8,
+	MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG	= 9,
+};
+
+enum {
+	MLX5_DEV_CAP_FLAG_XRC		= 1LL <<  3,
+	MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR	= 1LL <<  8,
+	MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR	= 1LL <<  9,
+	MLX5_DEV_CAP_FLAG_APM		= 1LL << 17,
+	MLX5_DEV_CAP_FLAG_ATOMIC	= 1LL << 18,
+	MLX5_DEV_CAP_FLAG_BLOCK_MCAST	= 1LL << 23,
+	MLX5_DEV_CAP_FLAG_ON_DMND_PG	= 1LL << 24,
+	MLX5_DEV_CAP_FLAG_CQ_MODER	= 1LL << 29,
+	MLX5_DEV_CAP_FLAG_RESIZE_CQ	= 1LL << 30,
+	MLX5_DEV_CAP_FLAG_DCT		= 1LL << 37,
+	MLX5_DEV_CAP_FLAG_SIG_HAND_OVER	= 1LL << 40,
+	MLX5_DEV_CAP_FLAG_CMDIF_CSUM	= 3LL << 46,
+};
+
+enum {
+	MLX5_ROCE_VERSION_1		= 0,
+	MLX5_ROCE_VERSION_2		= 2,
+};
+
+enum {
+	MLX5_ROCE_VERSION_1_CAP		= 1 << MLX5_ROCE_VERSION_1,
+	MLX5_ROCE_VERSION_2_CAP		= 1 << MLX5_ROCE_VERSION_2,
+};
+
+enum {
+	MLX5_ROCE_L3_TYPE_IPV4		= 0,
+	MLX5_ROCE_L3_TYPE_IPV6		= 1,
+};
+
+enum {
+	MLX5_ROCE_L3_TYPE_IPV4_CAP	= 1 << 1,
+	MLX5_ROCE_L3_TYPE_IPV6_CAP	= 1 << 2,
+};
+
+enum {
+	MLX5_OPCODE_NOP			= 0x00,
+	MLX5_OPCODE_SEND_INVAL		= 0x01,
+	MLX5_OPCODE_RDMA_WRITE		= 0x08,
+	MLX5_OPCODE_RDMA_WRITE_IMM	= 0x09,
+	MLX5_OPCODE_SEND		= 0x0a,
+	MLX5_OPCODE_SEND_IMM		= 0x0b,
+	MLX5_OPCODE_LSO			= 0x0e,
+	MLX5_OPCODE_RDMA_READ		= 0x10,
+	MLX5_OPCODE_ATOMIC_CS		= 0x11,
+	MLX5_OPCODE_ATOMIC_FA		= 0x12,
+	MLX5_OPCODE_ATOMIC_MASKED_CS	= 0x14,
+	MLX5_OPCODE_ATOMIC_MASKED_FA	= 0x15,
+	MLX5_OPCODE_BIND_MW		= 0x18,
+	MLX5_OPCODE_CONFIG_CMD		= 0x1f,
+
+	MLX5_RECV_OPCODE_RDMA_WRITE_IMM	= 0x00,
+	MLX5_RECV_OPCODE_SEND		= 0x01,
+	MLX5_RECV_OPCODE_SEND_IMM	= 0x02,
+	MLX5_RECV_OPCODE_SEND_INVAL	= 0x03,
+
+	MLX5_CQE_OPCODE_ERROR		= 0x1e,
+	MLX5_CQE_OPCODE_RESIZE		= 0x16,
+
+	MLX5_OPCODE_SET_PSV		= 0x20,
+	MLX5_OPCODE_GET_PSV		= 0x21,
+	MLX5_OPCODE_CHECK_PSV		= 0x22,
+	MLX5_OPCODE_RGET_PSV		= 0x26,
+	MLX5_OPCODE_RCHECK_PSV		= 0x27,
+
+	MLX5_OPCODE_UMR			= 0x25,
+
+};
+
+enum {
+	MLX5_SET_PORT_RESET_QKEY	= 0,
+	MLX5_SET_PORT_GUID0		= 16,
+	MLX5_SET_PORT_NODE_GUID		= 17,
+	MLX5_SET_PORT_SYS_GUID		= 18,
+	MLX5_SET_PORT_GID_TABLE		= 19,
+	MLX5_SET_PORT_PKEY_TABLE	= 20,
+};
+
+enum {
+	MLX5_BW_NO_LIMIT   = 0,
+	MLX5_100_MBPS_UNIT = 3,
+	MLX5_GBPS_UNIT	   = 4,
+};
+
+enum {
+	MLX5_MAX_PAGE_SHIFT		= 31
+};
+
+enum {
+	MLX5_CAP_OFF_CMDIF_CSUM		= 46,
+};
+
+enum {
+	/*
+	 * Max wqe size for rdma read is 512 bytes, so this
+	 * limits our max_sge_rd as the wqe needs to fit:
+	 * - ctrl segment (16 bytes)
+	 * - rdma segment (16 bytes)
+	 * - scatter elements (16 bytes each)
+	 */
+	MLX5_MAX_SGE_RD	= (512 - 16 - 16) / 16
+};
+
+enum mlx5_odp_transport_cap_bits {
+	MLX5_ODP_SUPPORT_SEND	 = 1 << 31,
+	MLX5_ODP_SUPPORT_RECV	 = 1 << 30,
+	MLX5_ODP_SUPPORT_WRITE	 = 1 << 29,
+	MLX5_ODP_SUPPORT_READ	 = 1 << 28,
+};
+
+struct mlx5_odp_caps {
+	char reserved[0x10];
+	struct {
+		__be32			rc_odp_caps;
+		__be32			uc_odp_caps;
+		__be32			ud_odp_caps;
+	} per_transport_caps;
+	char reserved2[0xe4];
+};
+
+struct mlx5_cmd_layout {
+	u8		type;
+	u8		rsvd0[3];
+	__be32		inlen;
+	__be64		in_ptr;
+	__be32		in[4];
+	__be32		out[4];
+	__be64		out_ptr;
+	__be32		outlen;
+	u8		token;
+	u8		sig;
+	u8		rsvd1;
+	u8		status_own;
+};
+
+struct health_buffer {
+	__be32		assert_var[5];
+	__be32		rsvd0[3];
+	__be32		assert_exit_ptr;
+	__be32		assert_callra;
+	__be32		rsvd1[2];
+	__be32		fw_ver;
+	__be32		hw_id;
+	__be32		rsvd2;
+	u8		irisc_index;
+	u8		synd;
+	__be16		ext_synd;
+};
+
+struct mlx5_init_seg {
+	__be32			fw_rev;
+	__be32			cmdif_rev_fw_sub;
+	__be32			rsvd0[2];
+	__be32			cmdq_addr_h;
+	__be32			cmdq_addr_l_sz;
+	__be32			cmd_dbell;
+	__be32			rsvd1[120];
+	__be32			initializing;
+	struct health_buffer	health;
+	__be32			rsvd2[880];
+	__be32			internal_timer_h;
+	__be32			internal_timer_l;
+	__be32			rsvd3[2];
+	__be32			health_counter;
+	__be32			rsvd4[1019];
+	__be64			ieee1588_clk;
+	__be32			ieee1588_clk_type;
+	__be32			clr_intx;
+};
+
+struct mlx5_eqe_comp {
+	__be32	reserved[6];
+	__be32	cqn;
+};
+
+struct mlx5_eqe_qp_srq {
+	__be32	reserved1[5];
+	u8	type;
+	u8	reserved2[3];
+	__be32	qp_srq_n;
+};
+
+struct mlx5_eqe_cq_err {
+	__be32	cqn;
+	u8	reserved1[7];
+	u8	syndrome;
+};
+
+struct mlx5_eqe_port_state {
+	u8	reserved0[8];
+	u8	port;
+};
+
+struct mlx5_eqe_gpio {
+	__be32	reserved0[2];
+	__be64	gpio_event;
+};
+
+struct mlx5_eqe_congestion {
+	u8	type;
+	u8	rsvd0;
+	u8	congestion_level;
+};
+
+struct mlx5_eqe_stall_vl {
+	u8	rsvd0[3];
+	u8	port_vl;
+};
+
+struct mlx5_eqe_cmd {
+	__be32	vector;
+	__be32	rsvd[6];
+};
+
+struct mlx5_eqe_page_req {
+	u8		rsvd0[2];
+	__be16		func_id;
+	__be32		num_pages;
+	__be32		rsvd1[5];
+};
+
+struct mlx5_eqe_page_fault {
+	__be32 bytes_committed;
+	union {
+		struct {
+			u16     reserved1;
+			__be16  wqe_index;
+			u16	reserved2;
+			__be16  packet_length;
+			__be32  token;
+			u8	reserved4[8];
+			__be32  pftype_wq;
+		} __packed wqe;
+		struct {
+			__be32  r_key;
+			u16	reserved1;
+			__be16  packet_length;
+			__be32  rdma_op_len;
+			__be64  rdma_va;
+			__be32  pftype_token;
+		} __packed rdma;
+	} __packed;
+} __packed;
+
+struct mlx5_eqe_vport_change {
+	u8		rsvd0[2];
+	__be16		vport_num;
+	__be32		rsvd1[6];
+} __packed;
+
+struct mlx5_eqe_port_module {
+	u8        reserved_at_0[1];
+	u8        module;
+	u8        reserved_at_2[1];
+	u8        module_status;
+	u8        reserved_at_4[2];
+	u8        error_type;
+} __packed;
+
+union ev_data {
+	__be32				raw[7];
+	struct mlx5_eqe_cmd		cmd;
+	struct mlx5_eqe_comp		comp;
+	struct mlx5_eqe_qp_srq		qp_srq;
+	struct mlx5_eqe_cq_err		cq_err;
+	struct mlx5_eqe_port_state	port;
+	struct mlx5_eqe_gpio		gpio;
+	struct mlx5_eqe_congestion	cong;
+	struct mlx5_eqe_stall_vl	stall_vl;
+	struct mlx5_eqe_page_req	req_pages;
+	struct mlx5_eqe_page_fault	page_fault;
+	struct mlx5_eqe_vport_change	vport_change;
+	struct mlx5_eqe_port_module	port_module;
+} __packed;
+
+struct mlx5_eqe {
+	u8		rsvd0;
+	u8		type;
+	u8		rsvd1;
+	u8		sub_type;
+	__be32		rsvd2[7];
+	union ev_data	data;
+	__be16		rsvd3;
+	u8		signature;
+	u8		owner;
+} __packed;
+
+struct mlx5_cmd_prot_block {
+	u8		data[MLX5_CMD_DATA_BLOCK_SIZE];
+	u8		rsvd0[48];
+	__be64		next;
+	__be32		block_num;
+	u8		rsvd1;
+	u8		token;
+	u8		ctrl_sig;
+	u8		sig;
+};
+
+enum {
+	MLX5_CQE_SYND_FLUSHED_IN_ERROR = 5,
+};
+
+struct mlx5_err_cqe {
+	u8	rsvd0[32];
+	__be32	srqn;
+	u8	rsvd1[18];
+	u8	vendor_err_synd;
+	u8	syndrome;
+	__be32	s_wqe_opcode_qpn;
+	__be16	wqe_counter;
+	u8	signature;
+	u8	op_own;
+};
+
+struct mlx5_cqe64 {
+	u8		outer_l3_tunneled;
+	u8		rsvd0;
+	__be16		wqe_id;
+	u8		lro_tcppsh_abort_dupack;
+	u8		lro_min_ttl;
+	__be16		lro_tcp_win;
+	__be32		lro_ack_seq_num;
+	__be32		rss_hash_result;
+	u8		rss_hash_type;
+	u8		ml_path;
+	u8		rsvd20[2];
+	__be16		check_sum;
+	__be16		slid;
+	__be32		flags_rqpn;
+	u8		hds_ip_ext;
+	u8		l4_l3_hdr_type;
+	__be16		vlan_info;
+	__be32		srqn; /* [31:24]: lro_num_seg, [23:0]: srqn */
+	__be32		imm_inval_pkey;
+	u8		rsvd40[4];
+	__be32		byte_cnt;
+	__be32		timestamp_h;
+	__be32		timestamp_l;
+	__be32		sop_drop_qpn;
+	__be16		wqe_counter;
+	u8		signature;
+	u8		op_own;
+};
+
+struct mlx5_mini_cqe8 {
+	union {
+		__be32 rx_hash_result;
+		struct {
+			__be16 checksum;
+			__be16 rsvd;
+		};
+		struct {
+			__be16 wqe_counter;
+			u8  s_wqe_opcode;
+			u8  reserved;
+		} s_wqe_info;
+	};
+	__be32 byte_cnt;
+};
+
+enum {
+	MLX5_NO_INLINE_DATA,
+	MLX5_INLINE_DATA32_SEG,
+	MLX5_INLINE_DATA64_SEG,
+	MLX5_COMPRESSED,
+};
+
+enum {
+	MLX5_CQE_FORMAT_CSUM = 0x1,
+};
+
+#define MLX5_MINI_CQE_ARRAY_SIZE 8
+
+static inline int mlx5_get_cqe_format(struct mlx5_cqe64 *cqe)
+{
+	return (cqe->op_own >> 2) & 0x3;
+}
+
+static inline int get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe)
+{
+	return (cqe->lro_tcppsh_abort_dupack >> 6) & 1;
+}
+
+static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe)
+{
+	return (cqe->l4_l3_hdr_type >> 4) & 0x7;
+}
+
+static inline u8 get_cqe_l3_hdr_type(struct mlx5_cqe64 *cqe)
+{
+	return (cqe->l4_l3_hdr_type >> 2) & 0x3;
+}
+
+static inline u8 cqe_is_tunneled(struct mlx5_cqe64 *cqe)
+{
+	return cqe->outer_l3_tunneled & 0x1;
+}
+
+static inline int cqe_has_vlan(struct mlx5_cqe64 *cqe)
+{
+	return !!(cqe->l4_l3_hdr_type & 0x1);
+}
+
+static inline u64 get_cqe_ts(struct mlx5_cqe64 *cqe)
+{
+	u32 hi, lo;
+
+	hi = be32_to_cpu(cqe->timestamp_h);
+	lo = be32_to_cpu(cqe->timestamp_l);
+
+	return (u64)lo | ((u64)hi << 32);
+}
+
+struct mpwrq_cqe_bc {
+	__be16	filler_consumed_strides;
+	__be16	byte_cnt;
+};
+
+static inline u16 mpwrq_get_cqe_byte_cnt(struct mlx5_cqe64 *cqe)
+{
+	struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt;
+
+	return be16_to_cpu(bc->byte_cnt);
+}
+
+static inline u16 mpwrq_get_cqe_bc_consumed_strides(struct mpwrq_cqe_bc *bc)
+{
+	return 0x7fff & be16_to_cpu(bc->filler_consumed_strides);
+}
+
+static inline u16 mpwrq_get_cqe_consumed_strides(struct mlx5_cqe64 *cqe)
+{
+	struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt;
+
+	return mpwrq_get_cqe_bc_consumed_strides(bc);
+}
+
+static inline bool mpwrq_is_filler_cqe(struct mlx5_cqe64 *cqe)
+{
+	struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt;
+
+	return 0x8000 & be16_to_cpu(bc->filler_consumed_strides);
+}
+
+static inline u16 mpwrq_get_cqe_stride_index(struct mlx5_cqe64 *cqe)
+{
+	return be16_to_cpu(cqe->wqe_counter);
+}
+
+enum {
+	CQE_L4_HDR_TYPE_NONE			= 0x0,
+	CQE_L4_HDR_TYPE_TCP_NO_ACK		= 0x1,
+	CQE_L4_HDR_TYPE_UDP			= 0x2,
+	CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA		= 0x3,
+	CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA	= 0x4,
+};
+
+enum {
+	CQE_RSS_HTYPE_IP	= 0x3 << 6,
+	CQE_RSS_HTYPE_L4	= 0x3 << 2,
+};
+
+enum {
+	MLX5_CQE_ROCE_L3_HEADER_TYPE_GRH	= 0x0,
+	MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV6	= 0x1,
+	MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV4	= 0x2,
+};
+
+enum {
+	CQE_L2_OK	= 1 << 0,
+	CQE_L3_OK	= 1 << 1,
+	CQE_L4_OK	= 1 << 2,
+};
+
+struct mlx5_sig_err_cqe {
+	u8		rsvd0[16];
+	__be32		expected_trans_sig;
+	__be32		actual_trans_sig;
+	__be32		expected_reftag;
+	__be32		actual_reftag;
+	__be16		syndrome;
+	u8		rsvd22[2];
+	__be32		mkey;
+	__be64		err_offset;
+	u8		rsvd30[8];
+	__be32		qpn;
+	u8		rsvd38[2];
+	u8		signature;
+	u8		op_own;
+};
+
+struct mlx5_wqe_srq_next_seg {
+	u8			rsvd0[2];
+	__be16			next_wqe_index;
+	u8			signature;
+	u8			rsvd1[11];
+};
+
+union mlx5_ext_cqe {
+	struct ib_grh	grh;
+	u8		inl[64];
+};
+
+struct mlx5_cqe128 {
+	union mlx5_ext_cqe	inl_grh;
+	struct mlx5_cqe64	cqe64;
+};
+
+enum {
+	MLX5_MKEY_STATUS_FREE = 1 << 6,
+};
+
+enum {
+	MLX5_MKEY_REMOTE_INVAL	= 1 << 24,
+	MLX5_MKEY_FLAG_SYNC_UMR = 1 << 29,
+	MLX5_MKEY_BSF_EN	= 1 << 30,
+	MLX5_MKEY_LEN64		= 1 << 31,
+};
+
+struct mlx5_mkey_seg {
+	/* This is a two bit field occupying bits 31-30.
+	 * bit 31 is always 0,
+	 * bit 30 is zero for regular MRs and 1 (e.g free) for UMRs that do not have tanslation
+	 */
+	u8		status;
+	u8		pcie_control;
+	u8		flags;
+	u8		version;
+	__be32		qpn_mkey7_0;
+	u8		rsvd1[4];
+	__be32		flags_pd;
+	__be64		start_addr;
+	__be64		len;
+	__be32		bsfs_octo_size;
+	u8		rsvd2[16];
+	__be32		xlt_oct_size;
+	u8		rsvd3[3];
+	u8		log2_page_size;
+	u8		rsvd4[4];
+};
+
+#define MLX5_ATTR_EXTENDED_PORT_INFO	cpu_to_be16(0xff90)
+
+enum {
+	MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO	= 1 <<  0
+};
+
+enum {
+	VPORT_STATE_DOWN		= 0x0,
+	VPORT_STATE_UP			= 0x1,
+};
+
+enum {
+	MLX5_ESW_VPORT_ADMIN_STATE_DOWN  = 0x0,
+	MLX5_ESW_VPORT_ADMIN_STATE_UP    = 0x1,
+	MLX5_ESW_VPORT_ADMIN_STATE_AUTO  = 0x2,
+};
+
+enum {
+	MLX5_L3_PROT_TYPE_IPV4		= 0,
+	MLX5_L3_PROT_TYPE_IPV6		= 1,
+};
+
+enum {
+	MLX5_L4_PROT_TYPE_TCP		= 0,
+	MLX5_L4_PROT_TYPE_UDP		= 1,
+};
+
+enum {
+	MLX5_HASH_FIELD_SEL_SRC_IP	= 1 << 0,
+	MLX5_HASH_FIELD_SEL_DST_IP	= 1 << 1,
+	MLX5_HASH_FIELD_SEL_L4_SPORT	= 1 << 2,
+	MLX5_HASH_FIELD_SEL_L4_DPORT	= 1 << 3,
+	MLX5_HASH_FIELD_SEL_IPSEC_SPI	= 1 << 4,
+};
+
+enum {
+	MLX5_MATCH_OUTER_HEADERS	= 1 << 0,
+	MLX5_MATCH_MISC_PARAMETERS	= 1 << 1,
+	MLX5_MATCH_INNER_HEADERS	= 1 << 2,
+
+};
+
+enum {
+	MLX5_FLOW_TABLE_TYPE_NIC_RCV	= 0,
+	MLX5_FLOW_TABLE_TYPE_ESWITCH	= 4,
+};
+
+enum {
+	MLX5_FLOW_CONTEXT_DEST_TYPE_VPORT	= 0,
+	MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE	= 1,
+	MLX5_FLOW_CONTEXT_DEST_TYPE_TIR		= 2,
+};
+
+enum mlx5_list_type {
+	MLX5_NVPRT_LIST_TYPE_UC   = 0x0,
+	MLX5_NVPRT_LIST_TYPE_MC   = 0x1,
+	MLX5_NVPRT_LIST_TYPE_VLAN = 0x2,
+};
+
+enum {
+	MLX5_RQC_RQ_TYPE_MEMORY_RQ_INLINE = 0x0,
+	MLX5_RQC_RQ_TYPE_MEMORY_RQ_RPM    = 0x1,
+};
+
+enum mlx5_wol_mode {
+	MLX5_WOL_DISABLE        = 0,
+	MLX5_WOL_SECURED_MAGIC  = 1 << 1,
+	MLX5_WOL_MAGIC          = 1 << 2,
+	MLX5_WOL_ARP            = 1 << 3,
+	MLX5_WOL_BROADCAST      = 1 << 4,
+	MLX5_WOL_MULTICAST      = 1 << 5,
+	MLX5_WOL_UNICAST        = 1 << 6,
+	MLX5_WOL_PHY_ACTIVITY   = 1 << 7,
+};
+
+/* MLX5 DEV CAPs */
+
+/* TODO: EAT.ME */
+enum mlx5_cap_mode {
+	HCA_CAP_OPMOD_GET_MAX	= 0,
+	HCA_CAP_OPMOD_GET_CUR	= 1,
+};
+
+enum mlx5_cap_type {
+	MLX5_CAP_GENERAL = 0,
+	MLX5_CAP_ETHERNET_OFFLOADS,
+	MLX5_CAP_ODP,
+	MLX5_CAP_ATOMIC,
+	MLX5_CAP_ROCE,
+	MLX5_CAP_IPOIB_OFFLOADS,
+	MLX5_CAP_EOIB_OFFLOADS,
+	MLX5_CAP_FLOW_TABLE,
+	MLX5_CAP_ESWITCH_FLOW_TABLE,
+	MLX5_CAP_ESWITCH,
+	MLX5_CAP_RESERVED,
+	MLX5_CAP_VECTOR_CALC,
+	MLX5_CAP_QOS,
+	/* NUM OF CAP Types */
+	MLX5_CAP_NUM
+};
+
+/* GET Dev Caps macros */
+#define MLX5_CAP_GEN(mdev, cap) \
+	MLX5_GET(cmd_hca_cap, mdev->hca_caps_cur[MLX5_CAP_GENERAL], cap)
+
+#define MLX5_CAP_GEN_MAX(mdev, cap) \
+	MLX5_GET(cmd_hca_cap, mdev->hca_caps_max[MLX5_CAP_GENERAL], cap)
+
+#define MLX5_CAP_ETH(mdev, cap) \
+	MLX5_GET(per_protocol_networking_offload_caps,\
+		 mdev->hca_caps_cur[MLX5_CAP_ETHERNET_OFFLOADS], cap)
+
+#define MLX5_CAP_ETH_MAX(mdev, cap) \
+	MLX5_GET(per_protocol_networking_offload_caps,\
+		 mdev->hca_caps_max[MLX5_CAP_ETHERNET_OFFLOADS], cap)
+
+#define MLX5_CAP_ROCE(mdev, cap) \
+	MLX5_GET(roce_cap, mdev->hca_caps_cur[MLX5_CAP_ROCE], cap)
+
+#define MLX5_CAP_ROCE_MAX(mdev, cap) \
+	MLX5_GET(roce_cap, mdev->hca_caps_max[MLX5_CAP_ROCE], cap)
+
+#define MLX5_CAP_ATOMIC(mdev, cap) \
+	MLX5_GET(atomic_caps, mdev->hca_caps_cur[MLX5_CAP_ATOMIC], cap)
+
+#define MLX5_CAP_ATOMIC_MAX(mdev, cap) \
+	MLX5_GET(atomic_caps, mdev->hca_caps_max[MLX5_CAP_ATOMIC], cap)
+
+#define MLX5_CAP_FLOWTABLE(mdev, cap) \
+	MLX5_GET(flow_table_nic_cap, mdev->hca_caps_cur[MLX5_CAP_FLOW_TABLE], cap)
+
+#define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \
+	MLX5_GET(flow_table_nic_cap, mdev->hca_caps_max[MLX5_CAP_FLOW_TABLE], cap)
+
+#define MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) \
+	MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.cap)
+
+#define MLX5_CAP_FLOWTABLE_NIC_RX_MAX(mdev, cap) \
+	MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive.cap)
+
+#define MLX5_CAP_FLOWTABLE_SNIFFER_RX(mdev, cap) \
+	MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive_sniffer.cap)
+
+#define MLX5_CAP_FLOWTABLE_SNIFFER_RX_MAX(mdev, cap) \
+	MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive_sniffer.cap)
+
+#define MLX5_CAP_FLOWTABLE_SNIFFER_TX(mdev, cap) \
+	MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit_sniffer.cap)
+
+#define MLX5_CAP_FLOWTABLE_SNIFFER_TX_MAX(mdev, cap) \
+	MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit_sniffer.cap)
+
+#define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \
+	MLX5_GET(flow_table_eswitch_cap, \
+		 mdev->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
+
+#define MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, cap) \
+	MLX5_GET(flow_table_eswitch_cap, \
+		 mdev->hca_caps_max[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
+
+#define MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) \
+	MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_nic_esw_fdb.cap)
+
+#define MLX5_CAP_ESW_FLOWTABLE_FDB_MAX(mdev, cap) \
+	MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_nic_esw_fdb.cap)
+
+#define MLX5_CAP_ESW_EGRESS_ACL(mdev, cap) \
+	MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_egress.cap)
+
+#define MLX5_CAP_ESW_EGRESS_ACL_MAX(mdev, cap) \
+	MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_egress.cap)
+
+#define MLX5_CAP_ESW_INGRESS_ACL(mdev, cap) \
+	MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_ingress.cap)
+
+#define MLX5_CAP_ESW_INGRESS_ACL_MAX(mdev, cap) \
+	MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_ingress.cap)
+
+#define MLX5_CAP_ESW(mdev, cap) \
+	MLX5_GET(e_switch_cap, \
+		 mdev->hca_caps_cur[MLX5_CAP_ESWITCH], cap)
+
+#define MLX5_CAP_ESW_MAX(mdev, cap) \
+	MLX5_GET(e_switch_cap, \
+		 mdev->hca_caps_max[MLX5_CAP_ESWITCH], cap)
+
+#define MLX5_CAP_ODP(mdev, cap)\
+	MLX5_GET(odp_cap, mdev->hca_caps_cur[MLX5_CAP_ODP], cap)
+
+#define MLX5_CAP_VECTOR_CALC(mdev, cap) \
+	MLX5_GET(vector_calc_cap, \
+		 mdev->hca_caps_cur[MLX5_CAP_VECTOR_CALC], cap)
+
+#define MLX5_CAP_QOS(mdev, cap)\
+	MLX5_GET(qos_cap, mdev->hca_caps_cur[MLX5_CAP_QOS], cap)
+
+enum {
+	MLX5_CMD_STAT_OK			= 0x0,
+	MLX5_CMD_STAT_INT_ERR			= 0x1,
+	MLX5_CMD_STAT_BAD_OP_ERR		= 0x2,
+	MLX5_CMD_STAT_BAD_PARAM_ERR		= 0x3,
+	MLX5_CMD_STAT_BAD_SYS_STATE_ERR		= 0x4,
+	MLX5_CMD_STAT_BAD_RES_ERR		= 0x5,
+	MLX5_CMD_STAT_RES_BUSY			= 0x6,
+	MLX5_CMD_STAT_LIM_ERR			= 0x8,
+	MLX5_CMD_STAT_BAD_RES_STATE_ERR		= 0x9,
+	MLX5_CMD_STAT_IX_ERR			= 0xa,
+	MLX5_CMD_STAT_NO_RES_ERR		= 0xf,
+	MLX5_CMD_STAT_BAD_INP_LEN_ERR		= 0x50,
+	MLX5_CMD_STAT_BAD_OUTP_LEN_ERR		= 0x51,
+	MLX5_CMD_STAT_BAD_QP_STATE_ERR		= 0x10,
+	MLX5_CMD_STAT_BAD_PKT_ERR		= 0x30,
+	MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR	= 0x40,
+};
+
+enum {
+	MLX5_IEEE_802_3_COUNTERS_GROUP	      = 0x0,
+	MLX5_RFC_2863_COUNTERS_GROUP	      = 0x1,
+	MLX5_RFC_2819_COUNTERS_GROUP	      = 0x2,
+	MLX5_RFC_3635_COUNTERS_GROUP	      = 0x3,
+	MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP = 0x5,
+	MLX5_PER_PRIORITY_COUNTERS_GROUP      = 0x10,
+	MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11,
+	MLX5_PHYSICAL_LAYER_COUNTERS_GROUP    = 0x12,
+	MLX5_INFINIBAND_PORT_COUNTERS_GROUP   = 0x20,
+};
+
+enum {
+	MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP       = 0x0,
+	MLX5_PCIE_TIMERS_AND_STATES_COUNTERS_GROUP = 0x2,
+};
+
+static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
+{
+	if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE)
+		return 0;
+	return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz;
+}
+
+#define MLX5_BY_PASS_NUM_REGULAR_PRIOS 8
+#define MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS 8
+#define MLX5_BY_PASS_NUM_MULTICAST_PRIOS 1
+#define MLX5_BY_PASS_NUM_PRIOS (MLX5_BY_PASS_NUM_REGULAR_PRIOS +\
+				MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS +\
+				MLX5_BY_PASS_NUM_MULTICAST_PRIOS)
+
+#endif /* MLX5_DEVICE_H */
diff --git a/drivers/net/ethernet/mellanox/mlx5/lib/doorbell.h b/drivers/net/ethernet/mellanox/mlx5/lib/doorbell.h
new file mode 100644
index 0000000..0787de2
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/lib/doorbell.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef MLX5_DOORBELL_H
+#define MLX5_DOORBELL_H
+
+#define MLX5_BF_OFFSET	      0x800
+#define MLX5_CQ_DOORBELL      0x20
+
+#if BITS_PER_LONG == 64
+/* Assume that we can just write a 64-bit doorbell atomically.  s390
+ * actually doesn't have writeq() but S/390 systems don't even have
+ * PCI so we won't worry about it.
+ */
+
+#define MLX5_DECLARE_DOORBELL_LOCK(name)
+#define MLX5_INIT_DOORBELL_LOCK(ptr)    do { } while (0)
+#define MLX5_GET_DOORBELL_LOCK(ptr)      (NULL)
+
+static inline void mlx5_write64(__be32 val[2], void __iomem *dest,
+				spinlock_t *doorbell_lock)
+{
+	__raw_writeq(*(u64 *)val, dest);
+}
+
+#else
+
+/* Just fall back to a spinlock to protect the doorbell if
+ * BITS_PER_LONG is 32 -- there's no portable way to do atomic 64-bit
+ * MMIO writes.
+ */
+
+#define MLX5_DECLARE_DOORBELL_LOCK(name) spinlock_t name;
+#define MLX5_INIT_DOORBELL_LOCK(ptr)     spin_lock_init(ptr)
+#define MLX5_GET_DOORBELL_LOCK(ptr)      (ptr)
+
+static inline void mlx5_write64(__be32 val[2], void __iomem *dest,
+				spinlock_t *doorbell_lock)
+{
+	unsigned long flags;
+
+	if (doorbell_lock)
+		spin_lock_irqsave(doorbell_lock, flags);
+	__raw_writel((__force u32) val[0], dest);
+	__raw_writel((__force u32) val[1], dest + 4);
+	if (doorbell_lock)
+		spin_unlock_irqrestore(doorbell_lock, flags);
+}
+
+#endif
+
+#endif /* MLX5_DOORBELL_H */
diff --git a/drivers/net/ethernet/mellanox/mlx5/lib/driver.h b/drivers/net/ethernet/mellanox/mlx5/lib/driver.h
new file mode 100644
index 0000000..632bd3e
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/lib/driver.h
@@ -0,0 +1,1127 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef MLX5_DRIVER_H
+#define MLX5_DRIVER_H
+
+#include <linux/kernel.h>
+#include <linux/completion.h>
+#include <linux/pci.h>
+#include <linux/spinlock_types.h>
+#include <linux/semaphore.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/radix-tree.h>
+#include <linux/workqueue.h>
+#include <linux/mempool.h>
+#include <linux/interrupt.h>
+
+#include "lib/device.h"
+#include "lib/doorbell.h"
+#include "lib/srq.h"
+
+enum {
+	MLX5_BOARD_ID_LEN = 64,
+	MLX5_MAX_NAME_LEN = 16,
+};
+
+enum {
+	/* one minute for the sake of bringup. Generally, commands must always
+	 * complete and we may need to increase this timeout value
+	 */
+	MLX5_CMD_TIMEOUT_MSEC	= 60 * 1000,
+	MLX5_CMD_WQ_MAX_NAME	= 32,
+};
+
+enum {
+	CMD_OWNER_SW		= 0x0,
+	CMD_OWNER_HW		= 0x1,
+	CMD_STATUS_SUCCESS	= 0,
+};
+
+enum mlx5_sqp_t {
+	MLX5_SQP_SMI		= 0,
+	MLX5_SQP_GSI		= 1,
+	MLX5_SQP_IEEE_1588	= 2,
+	MLX5_SQP_SNIFFER	= 3,
+	MLX5_SQP_SYNC_UMR	= 4,
+};
+
+enum {
+	MLX5_MAX_PORTS	= 2,
+};
+
+enum {
+	MLX5_EQ_VEC_PAGES	 = 0,
+	MLX5_EQ_VEC_CMD		 = 1,
+	MLX5_EQ_VEC_ASYNC	 = 2,
+	MLX5_EQ_VEC_PFAULT	 = 3,
+	MLX5_EQ_VEC_COMP_BASE,
+};
+
+enum {
+	MLX5_MAX_IRQ_NAME	= 32
+};
+
+enum {
+	MLX5_ATOMIC_MODE_IB_COMP	= 1 << 16,
+	MLX5_ATOMIC_MODE_CX		= 2 << 16,
+	MLX5_ATOMIC_MODE_8B		= 3 << 16,
+	MLX5_ATOMIC_MODE_16B		= 4 << 16,
+	MLX5_ATOMIC_MODE_32B		= 5 << 16,
+	MLX5_ATOMIC_MODE_64B		= 6 << 16,
+	MLX5_ATOMIC_MODE_128B		= 7 << 16,
+	MLX5_ATOMIC_MODE_256B		= 8 << 16,
+};
+
+enum {
+	MLX5_REG_QETCR		 = 0x4005,
+	MLX5_REG_QTCT		 = 0x400a,
+	MLX5_REG_DCBX_PARAM      = 0x4020,
+	MLX5_REG_DCBX_APP        = 0x4021,
+	MLX5_REG_PCAP		 = 0x5001,
+	MLX5_REG_PMTU		 = 0x5003,
+	MLX5_REG_PTYS		 = 0x5004,
+	MLX5_REG_PAOS		 = 0x5006,
+	MLX5_REG_PFCC            = 0x5007,
+	MLX5_REG_PPCNT		 = 0x5008,
+	MLX5_REG_PMAOS		 = 0x5012,
+	MLX5_REG_PUDE		 = 0x5009,
+	MLX5_REG_PMPE		 = 0x5010,
+	MLX5_REG_PELC		 = 0x500e,
+	MLX5_REG_PVLC		 = 0x500f,
+	MLX5_REG_PCMR		 = 0x5041,
+	MLX5_REG_PMLP		 = 0x5002,
+	MLX5_REG_NODE_DESC	 = 0x6001,
+	MLX5_REG_HOST_ENDIANNESS = 0x7004,
+	MLX5_REG_MCIA		 = 0x9014,
+	MLX5_REG_MLCR		 = 0x902b,
+	MLX5_REG_MPCNT		 = 0x9051,
+};
+
+enum mlx5_dcbx_oper_mode {
+	MLX5E_DCBX_PARAM_VER_OPER_HOST  = 0x0,
+	MLX5E_DCBX_PARAM_VER_OPER_AUTO  = 0x3,
+};
+
+enum {
+	MLX5_ATOMIC_OPS_CMP_SWAP	= 1 << 0,
+	MLX5_ATOMIC_OPS_FETCH_ADD	= 1 << 1,
+};
+
+enum mlx5_page_fault_resume_flags {
+	MLX5_PAGE_FAULT_RESUME_REQUESTOR = 1 << 0,
+	MLX5_PAGE_FAULT_RESUME_WRITE	 = 1 << 1,
+	MLX5_PAGE_FAULT_RESUME_RDMA	 = 1 << 2,
+	MLX5_PAGE_FAULT_RESUME_ERROR	 = 1 << 7,
+};
+
+enum dbg_rsc_type {
+	MLX5_DBG_RSC_QP,
+	MLX5_DBG_RSC_EQ,
+	MLX5_DBG_RSC_CQ,
+};
+
+struct mlx5_field_desc {
+	struct dentry	       *dent;
+	int			i;
+};
+
+struct mlx5_rsc_debug {
+	struct mlx5_core_dev   *dev;
+	void		       *object;
+	enum dbg_rsc_type	type;
+	struct dentry	       *root;
+	struct mlx5_field_desc	fields[0];
+};
+
+enum mlx5_dev_event {
+	MLX5_DEV_EVENT_SYS_ERROR,
+	MLX5_DEV_EVENT_PORT_UP,
+	MLX5_DEV_EVENT_PORT_DOWN,
+	MLX5_DEV_EVENT_PORT_INITIALIZED,
+	MLX5_DEV_EVENT_LID_CHANGE,
+	MLX5_DEV_EVENT_PKEY_CHANGE,
+	MLX5_DEV_EVENT_GUID_CHANGE,
+	MLX5_DEV_EVENT_CLIENT_REREG,
+};
+
+enum mlx5_port_status {
+	MLX5_PORT_UP        = 1,
+	MLX5_PORT_DOWN      = 2,
+};
+
+enum mlx5_eq_type {
+	MLX5_EQ_TYPE_COMP,
+	MLX5_EQ_TYPE_ASYNC,
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+	MLX5_EQ_TYPE_PF,
+#endif
+};
+
+struct mlx5_bfreg_info {
+	u32		       *sys_pages;
+	int			num_low_latency_bfregs;
+	unsigned int	       *count;
+
+	/*
+	 * protect bfreg allocation data structs
+	 */
+	struct mutex		lock;
+	u32			ver;
+	bool			lib_uar_4k;
+	u32			num_sys_pages;
+};
+
+struct mlx5_cmd_first {
+	__be32		data[4];
+};
+
+struct mlx5_cmd_msg {
+	struct list_head		list;
+	struct cmd_msg_cache	       *parent;
+	u32				len;
+	struct mlx5_cmd_first		first;
+	struct mlx5_cmd_mailbox	       *next;
+};
+
+struct mlx5_cmd_debug {
+	struct dentry	       *dbg_root;
+	struct dentry	       *dbg_in;
+	struct dentry	       *dbg_out;
+	struct dentry	       *dbg_outlen;
+	struct dentry	       *dbg_status;
+	struct dentry	       *dbg_run;
+	void		       *in_msg;
+	void		       *out_msg;
+	u8			status;
+	u16			inlen;
+	u16			outlen;
+};
+
+struct cmd_msg_cache {
+	/* protect block chain allocations
+	 */
+	spinlock_t		lock;
+	struct list_head	head;
+	unsigned int		max_inbox_size;
+	unsigned int		num_ent;
+};
+
+enum {
+	MLX5_NUM_COMMAND_CACHES = 5,
+};
+
+struct mlx5_cmd_stats {
+	u64		sum;
+	u64		n;
+	struct dentry  *root;
+	struct dentry  *avg;
+	struct dentry  *count;
+	/* protect command average calculations */
+	spinlock_t	lock;
+};
+
+struct mlx5_cmd {
+	void	       *cmd_alloc_buf;
+	dma_addr_t	alloc_dma;
+	int		alloc_size;
+	void	       *cmd_buf;
+	dma_addr_t	dma;
+	u16		cmdif_rev;
+	u8		log_sz;
+	u8		log_stride;
+	int		max_reg_cmds;
+	int		events;
+	u32 __iomem    *vector;
+
+	/* protect command queue allocations
+	 */
+	spinlock_t	alloc_lock;
+
+	/* protect token allocations
+	 */
+	spinlock_t	token_lock;
+	u8		token;
+	unsigned long	bitmask;
+	char		wq_name[MLX5_CMD_WQ_MAX_NAME];
+	struct workqueue_struct *wq;
+	struct semaphore sem;
+	struct semaphore pages_sem;
+	int	mode;
+	struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS];
+	struct pci_pool *pool;
+	struct mlx5_cmd_debug dbg;
+	struct cmd_msg_cache cache[MLX5_NUM_COMMAND_CACHES];
+	int checksum_disabled;
+	struct mlx5_cmd_stats stats[MLX5_CMD_OP_MAX];
+};
+
+struct mlx5_port_caps {
+	int	gid_table_len;
+	int	pkey_table_len;
+	u8	ext_port_cap;
+};
+
+struct mlx5_cmd_mailbox {
+	void	       *buf;
+	dma_addr_t	dma;
+	struct mlx5_cmd_mailbox *next;
+};
+
+struct mlx5_buf_list {
+	void		       *buf;
+	dma_addr_t		map;
+};
+
+struct mlx5_buf {
+	struct mlx5_buf_list	direct;
+	int			npages;
+	int			size;
+	u8			page_shift;
+};
+
+struct mlx5_frag_buf {
+	struct mlx5_buf_list	*frags;
+	int			npages;
+	int			size;
+	u8			page_shift;
+};
+
+struct mlx5_eq_tasklet {
+	struct list_head list;
+	struct list_head process_list;
+	struct tasklet_struct task;
+	/* lock on completion tasklet list */
+	spinlock_t lock;
+};
+
+struct mlx5_eq_pagefault {
+	struct work_struct       work;
+	/* Pagefaults lock */
+	spinlock_t		 lock;
+	struct workqueue_struct *wq;
+	mempool_t		*pool;
+};
+
+struct mlx5_eq {
+	struct mlx5_core_dev   *dev;
+	__be32 __iomem	       *doorbell;
+	u32			cons_index;
+	struct mlx5_buf		buf;
+	int			size;
+	unsigned int		irqn;
+	u8			eqn;
+	int			nent;
+	u64			mask;
+	struct list_head	list;
+	int			index;
+	struct mlx5_rsc_debug	*dbg;
+	enum mlx5_eq_type	type;
+	union {
+		struct mlx5_eq_tasklet   tasklet_ctx;
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+		struct mlx5_eq_pagefault pf_ctx;
+#endif
+	};
+};
+
+struct mlx5_core_psv {
+	u32	psv_idx;
+	struct psv_layout {
+		u32	pd;
+		u16	syndrome;
+		u16	reserved;
+		u16	bg;
+		u16	app_tag;
+		u32	ref_tag;
+	} psv;
+};
+
+struct mlx5_core_sig_ctx {
+	struct mlx5_core_psv	psv_memory;
+	struct mlx5_core_psv	psv_wire;
+	struct ib_sig_err       err_item;
+	bool			sig_status_checked;
+	bool			sig_err_exists;
+	u32			sigerr_count;
+};
+
+enum {
+	MLX5_MKEY_MR = 1,
+	MLX5_MKEY_MW,
+};
+
+struct mlx5_core_mkey {
+	u64			iova;
+	u64			size;
+	u32			key;
+	u32			pd;
+	u32			type;
+};
+
+#define MLX5_24BIT_MASK		((1 << 24) - 1)
+
+enum mlx5_res_type {
+	MLX5_RES_QP	= MLX5_EVENT_QUEUE_TYPE_QP,
+	MLX5_RES_RQ	= MLX5_EVENT_QUEUE_TYPE_RQ,
+	MLX5_RES_SQ	= MLX5_EVENT_QUEUE_TYPE_SQ,
+	MLX5_RES_SRQ	= 3,
+	MLX5_RES_XSRQ	= 4,
+};
+
+struct mlx5_core_rsc_common {
+	enum mlx5_res_type	res;
+	atomic_t		refcount;
+	struct completion	free;
+};
+
+struct mlx5_core_srq {
+	struct mlx5_core_rsc_common	common; /* must be first */
+	u32		srqn;
+	int		max;
+	int		max_gs;
+	int		max_avail_gather;
+	int		wqe_shift;
+	void (*event)	(struct mlx5_core_srq *, enum mlx5_event);
+
+	atomic_t		refcount;
+	struct completion	free;
+};
+
+struct mlx5_eq_table {
+	void __iomem	       *update_ci;
+	void __iomem	       *update_arm_ci;
+	struct list_head	comp_eqs_list;
+	struct mlx5_eq		pages_eq;
+	struct mlx5_eq		async_eq;
+	struct mlx5_eq		cmd_eq;
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+	struct mlx5_eq		pfault_eq;
+#endif
+	int			num_comp_vectors;
+	/* protect EQs list
+	 */
+	spinlock_t		lock;
+};
+
+struct mlx5_uars_page {
+	void __iomem	       *map;
+	bool			wc;
+	u32			index;
+	struct list_head	list;
+	unsigned int		bfregs;
+	unsigned long	       *reg_bitmap; /* for non fast path bf regs */
+	unsigned long	       *fp_bitmap;
+	unsigned int		reg_avail;
+	unsigned int		fp_avail;
+	struct kref		ref_count;
+	struct mlx5_core_dev   *mdev;
+};
+
+struct mlx5_bfreg_head {
+	/* protect blue flame registers allocations */
+	struct mutex		lock;
+	struct list_head	list;
+};
+
+struct mlx5_bfreg_data {
+	struct mlx5_bfreg_head	reg_head;
+	struct mlx5_bfreg_head	wc_head;
+};
+
+struct mlx5_sq_bfreg {
+	void __iomem	       *map;
+	struct mlx5_uars_page  *up;
+	bool			wc;
+	u32			index;
+	unsigned int		offset;
+};
+
+struct mlx5_core_health {
+	struct health_buffer __iomem   *health;
+	__be32 __iomem		       *health_counter;
+	struct timer_list		timer;
+	u32				prev;
+	int				miss_counter;
+	bool				sick;
+	/* wq spinlock to synchronize draining */
+	spinlock_t			wq_lock;
+	struct workqueue_struct	       *wq;
+	unsigned long			flags;
+	struct work_struct		work;
+	struct delayed_work		recover_work;
+};
+
+struct mlx5_cq_table {
+	/* protect radix tree
+	 */
+	spinlock_t		lock;
+	struct radix_tree_root	tree;
+};
+
+struct mlx5_qp_table {
+	/* protect radix tree
+	 */
+	spinlock_t		lock;
+	struct radix_tree_root	tree;
+};
+
+struct mlx5_srq_table {
+	/* protect radix tree
+	 */
+	spinlock_t		lock;
+	struct radix_tree_root	tree;
+};
+
+struct mlx5_mkey_table {
+	/* protect radix tree
+	 */
+	rwlock_t		lock;
+	struct radix_tree_root	tree;
+};
+
+struct mlx5_vf_context {
+	int	enabled;
+};
+
+struct mlx5_core_sriov {
+	struct mlx5_vf_context	*vfs_ctx;
+	int			num_vfs;
+	int			enabled_vfs;
+};
+
+struct mlx5_irq_info {
+	cpumask_var_t mask;
+	char name[MLX5_MAX_IRQ_NAME];
+};
+
+struct mlx5_fc_stats {
+	struct rb_root counters;
+	struct list_head addlist;
+	/* protect addlist add/splice operations */
+	spinlock_t addlist_lock;
+
+	struct workqueue_struct *wq;
+	struct delayed_work work;
+	unsigned long next_query;
+};
+
+struct mlx5_eswitch;
+struct mlx5_lag;
+struct mlx5_pagefault;
+
+struct mlx5_rl_entry {
+	u32                     rate;
+	u16                     index;
+	u16                     refcount;
+};
+
+struct mlx5_rl_table {
+	/* protect rate limit table */
+	struct mutex            rl_lock;
+	u16                     max_size;
+	u32                     max_rate;
+	u32                     min_rate;
+	struct mlx5_rl_entry   *rl_entry;
+};
+
+enum port_module_event_status_type {
+	MLX5_MODULE_STATUS_PLUGGED   = 0x1,
+	MLX5_MODULE_STATUS_UNPLUGGED = 0x2,
+	MLX5_MODULE_STATUS_ERROR     = 0x3,
+	MLX5_MODULE_STATUS_NUM       = 0x3,
+};
+
+enum  port_module_event_error_type {
+	MLX5_MODULE_EVENT_ERROR_POWER_BUDGET_EXCEEDED,
+	MLX5_MODULE_EVENT_ERROR_LONG_RANGE_FOR_NON_MLNX_CABLE_MODULE,
+	MLX5_MODULE_EVENT_ERROR_BUS_STUCK,
+	MLX5_MODULE_EVENT_ERROR_NO_EEPROM_RETRY_TIMEOUT,
+	MLX5_MODULE_EVENT_ERROR_ENFORCE_PART_NUMBER_LIST,
+	MLX5_MODULE_EVENT_ERROR_UNKNOWN_IDENTIFIER,
+	MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE,
+	MLX5_MODULE_EVENT_ERROR_BAD_CABLE,
+	MLX5_MODULE_EVENT_ERROR_UNKNOWN,
+	MLX5_MODULE_EVENT_ERROR_NUM,
+};
+
+struct mlx5_port_module_event_stats {
+	u64 status_counters[MLX5_MODULE_STATUS_NUM];
+	u64 error_counters[MLX5_MODULE_EVENT_ERROR_NUM];
+};
+
+struct mlx5_priv {
+	char			name[MLX5_MAX_NAME_LEN];
+	struct mlx5_eq_table	eq_table;
+	struct msix_entry	*msix_arr;
+	struct mlx5_irq_info	*irq_info;
+
+	/* pages stuff */
+	struct workqueue_struct *pg_wq;
+	struct rb_root		page_root;
+	int			fw_pages;
+	atomic_t		reg_pages;
+	struct list_head	free_list;
+	int			vfs_pages;
+
+	struct mlx5_core_health health;
+
+	struct mlx5_srq_table	srq_table;
+
+	/* start: qp staff */
+	struct mlx5_qp_table	qp_table;
+	struct dentry	       *qp_debugfs;
+	struct dentry	       *eq_debugfs;
+	struct dentry	       *cq_debugfs;
+	struct dentry	       *cmdif_debugfs;
+	/* end: qp staff */
+
+	/* start: cq staff */
+	struct mlx5_cq_table	cq_table;
+	/* end: cq staff */
+
+	/* start: mkey staff */
+	struct mlx5_mkey_table	mkey_table;
+	/* end: mkey staff */
+
+	/* start: alloc staff */
+	/* protect buffer alocation according to numa node */
+	struct mutex            alloc_mutex;
+	int                     numa_node;
+
+	struct mutex            pgdir_mutex;
+	struct list_head        pgdir_list;
+	/* end: alloc staff */
+	struct dentry	       *dbg_root;
+
+	/* protect mkey key part */
+	spinlock_t		mkey_lock;
+	u8			mkey_key;
+
+	struct list_head        dev_list;
+	struct list_head        ctx_list;
+	spinlock_t              ctx_lock;
+
+	struct mlx5_flow_steering *steering;
+	struct mlx5_eswitch     *eswitch;
+	struct mlx5_core_sriov	sriov;
+	struct mlx5_lag		*lag;
+	unsigned long		pci_dev_data;
+	struct mlx5_fc_stats		fc_stats;
+	struct mlx5_rl_table            rl_table;
+
+	struct mlx5_port_module_event_stats  pme_stats;
+
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+	void		      (*pfault)(struct mlx5_core_dev *dev,
+					void *context,
+					struct mlx5_pagefault *pfault);
+	void		       *pfault_ctx;
+	struct srcu_struct      pfault_srcu;
+#endif
+	struct mlx5_bfreg_data		bfregs;
+	struct mlx5_uars_page	       *uar;
+};
+
+enum mlx5_device_state {
+	MLX5_DEVICE_STATE_UP,
+	MLX5_DEVICE_STATE_INTERNAL_ERROR,
+};
+
+enum mlx5_interface_state {
+	MLX5_INTERFACE_STATE_DOWN = BIT(0),
+	MLX5_INTERFACE_STATE_UP = BIT(1),
+	MLX5_INTERFACE_STATE_SHUTDOWN = BIT(2),
+};
+
+enum mlx5_pci_status {
+	MLX5_PCI_STATUS_DISABLED,
+	MLX5_PCI_STATUS_ENABLED,
+};
+
+enum mlx5_pagefault_type_flags {
+	MLX5_PFAULT_REQUESTOR = 1 << 0,
+	MLX5_PFAULT_WRITE     = 1 << 1,
+	MLX5_PFAULT_RDMA      = 1 << 2,
+};
+
+/* Contains the details of a pagefault. */
+struct mlx5_pagefault {
+	u32			bytes_committed;
+	u32			token;
+	u8			event_subtype;
+	u8			type;
+	union {
+		/* Initiator or send message responder pagefault details. */
+		struct {
+			/* Received packet size, only valid for responders. */
+			u32	packet_size;
+			/*
+			 * Number of resource holding WQE, depends on type.
+			 */
+			u32	wq_num;
+			/*
+			 * WQE index. Refers to either the send queue or
+			 * receive queue, according to event_subtype.
+			 */
+			u16	wqe_index;
+		} wqe;
+		/* RDMA responder pagefault details */
+		struct {
+			u32	r_key;
+			/*
+			 * Received packet size, minimal size page fault
+			 * resolution required for forward progress.
+			 */
+			u32	packet_size;
+			u32	rdma_op_len;
+			u64	rdma_va;
+		} rdma;
+	};
+
+	struct mlx5_eq	       *eq;
+	struct work_struct	work;
+};
+
+struct mlx5_td {
+	struct list_head tirs_list;
+	u32              tdn;
+};
+
+struct mlx5e_resources {
+	u32                        pdn;
+	struct mlx5_td             td;
+	struct mlx5_core_mkey      mkey;
+};
+
+struct mlx5_core_dev {
+	struct pci_dev	       *pdev;
+	/* sync pci state */
+	struct mutex		pci_status_mutex;
+	enum mlx5_pci_status	pci_status;
+	u8			rev_id;
+	char			board_id[MLX5_BOARD_ID_LEN];
+	struct mlx5_cmd		cmd;
+	struct mlx5_port_caps	port_caps[MLX5_MAX_PORTS];
+	u32 hca_caps_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
+	u32 hca_caps_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
+	phys_addr_t		iseg_base;
+	struct mlx5_init_seg __iomem *iseg;
+	enum mlx5_device_state	state;
+	/* sync interface state */
+	struct mutex		intf_state_mutex;
+	unsigned long		intf_state;
+	void			(*event) (struct mlx5_core_dev *dev,
+					  enum mlx5_dev_event event,
+					  unsigned long param);
+	struct mlx5_priv	priv;
+	struct mlx5_profile	*profile;
+	atomic_t		num_qps;
+	u32			issi;
+	struct mlx5e_resources  mlx5e_res;
+#ifdef CONFIG_RFS_ACCEL
+	struct cpu_rmap         *rmap;
+#endif
+};
+
+struct mlx5_db {
+	__be32			*db;
+	union {
+		struct mlx5_db_pgdir		*pgdir;
+		struct mlx5_ib_user_db_page	*user_page;
+	}			u;
+	dma_addr_t		dma;
+	int			index;
+};
+
+enum {
+	MLX5_COMP_EQ_SIZE = 1024,
+};
+
+enum {
+	MLX5_PTYS_IB = 1 << 0,
+	MLX5_PTYS_EN = 1 << 2,
+};
+
+typedef void (*mlx5_cmd_cbk_t)(int status, void *context);
+
+struct mlx5_cmd_work_ent {
+	struct mlx5_cmd_msg    *in;
+	struct mlx5_cmd_msg    *out;
+	void		       *uout;
+	int			uout_size;
+	mlx5_cmd_cbk_t		callback;
+	struct delayed_work	cb_timeout_work;
+	void		       *context;
+	int			idx;
+	struct completion	done;
+	struct mlx5_cmd        *cmd;
+	struct work_struct	work;
+	struct mlx5_cmd_layout *lay;
+	int			ret;
+	int			page_queue;
+	u8			status;
+	u8			token;
+	u64			ts1;
+	u64			ts2;
+	u16			op;
+};
+
+struct mlx5_pas {
+	u64	pa;
+	u8	log_sz;
+};
+
+enum port_state_policy {
+	MLX5_POLICY_DOWN	= 0,
+	MLX5_POLICY_UP		= 1,
+	MLX5_POLICY_FOLLOW	= 2,
+	MLX5_POLICY_INVALID	= 0xffffffff
+};
+
+enum phy_port_state {
+	MLX5_AAA_111
+};
+
+struct mlx5_hca_vport_context {
+	u32			field_select;
+	bool			sm_virt_aware;
+	bool			has_smi;
+	bool			has_raw;
+	enum port_state_policy	policy;
+	enum phy_port_state	phys_state;
+	enum ib_port_state	vport_state;
+	u8			port_physical_state;
+	u64			sys_image_guid;
+	u64			port_guid;
+	u64			node_guid;
+	u32			cap_mask1;
+	u32			cap_mask1_perm;
+	u32			cap_mask2;
+	u32			cap_mask2_perm;
+	u16			lid;
+	u8			init_type_reply; /* bitmask: see ib spec 14.2.5.6 InitTypeReply */
+	u8			lmc;
+	u8			subnet_timeout;
+	u16			sm_lid;
+	u8			sm_sl;
+	u16			qkey_violation_counter;
+	u16			pkey_violation_counter;
+	bool			grh_required;
+};
+
+static inline void *mlx5_buf_offset(struct mlx5_buf *buf, int offset)
+{
+		return buf->direct.buf + offset;
+}
+
+extern struct workqueue_struct *mlx5_core_wq;
+
+#define STRUCT_FIELD(header, field) \
+	.struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field),      \
+	.struct_size_bytes   = sizeof((struct ib_unpacked_ ## header *)0)->field
+
+static inline struct mlx5_core_dev *pci2mlx5_core_dev(struct pci_dev *pdev)
+{
+	return pci_get_drvdata(pdev);
+}
+
+extern struct dentry *mlx5_debugfs_root;
+
+static inline u16 fw_rev_maj(struct mlx5_core_dev *dev)
+{
+	return ioread32be(&dev->iseg->fw_rev) & 0xffff;
+}
+
+static inline u16 fw_rev_min(struct mlx5_core_dev *dev)
+{
+	return ioread32be(&dev->iseg->fw_rev) >> 16;
+}
+
+static inline u16 fw_rev_sub(struct mlx5_core_dev *dev)
+{
+	return ioread32be(&dev->iseg->cmdif_rev_fw_sub) & 0xffff;
+}
+
+static inline u16 cmdif_rev(struct mlx5_core_dev *dev)
+{
+	return ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
+}
+
+static inline void *mlx5_vzalloc(unsigned long size)
+{
+	void *rtn;
+
+	rtn = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
+	if (!rtn)
+		rtn = vzalloc(size);
+	return rtn;
+}
+
+static inline u32 mlx5_base_mkey(const u32 key)
+{
+	return key & 0xffffff00u;
+}
+
+int mlx5_cmd_init(struct mlx5_core_dev *dev);
+void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
+void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
+void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
+
+int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
+		  int out_size);
+int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
+		     void *out, int out_size, mlx5_cmd_cbk_t callback,
+		     void *context);
+void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome);
+
+int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
+int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
+int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
+void mlx5_health_cleanup(struct mlx5_core_dev *dev);
+int mlx5_health_init(struct mlx5_core_dev *dev);
+void mlx5_start_health_poll(struct mlx5_core_dev *dev);
+void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
+void mlx5_drain_health_wq(struct mlx5_core_dev *dev);
+int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
+			struct mlx5_buf *buf, int node);
+int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf);
+void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf);
+int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
+			     struct mlx5_frag_buf *buf, int node);
+void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf);
+struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
+						      gfp_t flags, int npages);
+void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev,
+				 struct mlx5_cmd_mailbox *head);
+int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+			 struct mlx5_srq_attr *in);
+int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq);
+int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+			struct mlx5_srq_attr *out);
+int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+		      u16 lwm, int is_srq);
+void mlx5_init_mkey_table(struct mlx5_core_dev *dev);
+void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev);
+int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
+			     struct mlx5_core_mkey *mkey,
+			     u32 *in, int inlen,
+			     u32 *out, int outlen,
+			     mlx5_cmd_cbk_t callback, void *context);
+int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
+			  struct mlx5_core_mkey *mkey,
+			  u32 *in, int inlen);
+int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
+			   struct mlx5_core_mkey *mkey);
+int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey,
+			 u32 *out, int outlen);
+int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *_mkey,
+			     u32 *mkey);
+int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn);
+int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn);
+int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
+		      u16 opmod, u8 port);
+void mlx5_pagealloc_init(struct mlx5_core_dev *dev);
+void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
+int mlx5_pagealloc_start(struct mlx5_core_dev *dev);
+void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
+void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
+				 s32 npages);
+int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot);
+int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev);
+void mlx5_register_debugfs(void);
+void mlx5_unregister_debugfs(void);
+int mlx5_eq_init(struct mlx5_core_dev *dev);
+void mlx5_eq_cleanup(struct mlx5_core_dev *dev);
+void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas);
+void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas);
+void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn);
+void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type);
+void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type);
+struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn);
+void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec);
+void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type);
+int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
+		       int nent, u64 mask, const char *name,
+		       enum mlx5_eq_type type);
+int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
+int mlx5_start_eqs(struct mlx5_core_dev *dev);
+int mlx5_stop_eqs(struct mlx5_core_dev *dev);
+int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
+		    unsigned int *irqn);
+int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
+int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
+
+int mlx5_qp_debugfs_init(struct mlx5_core_dev *dev);
+void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev);
+int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
+			 int size_in, void *data_out, int size_out,
+			 u16 reg_num, int arg, int write);
+
+int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
+void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
+int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
+		       u32 *out, int outlen);
+int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev);
+void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev);
+int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
+void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
+int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db);
+int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db,
+		       int node);
+void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db);
+
+const char *mlx5_command_str(int command);
+int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev);
+void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev);
+int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn,
+			 int npsvs, u32 *sig_index);
+int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num);
+void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common);
+int mlx5_query_odp_caps(struct mlx5_core_dev *dev,
+			struct mlx5_odp_caps *odp_caps);
+int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev,
+			     u8 port_num, void *out, size_t sz);
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 token,
+				u32 wq_num, u8 type, int error);
+#endif
+
+int mlx5_init_rl_table(struct mlx5_core_dev *dev);
+void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev);
+int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u32 rate, u16 *index);
+void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, u32 rate);
+bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate);
+int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
+		     bool map_wc, bool fast_path);
+void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg);
+
+static inline int fw_initializing(struct mlx5_core_dev *dev)
+{
+	return ioread32be(&dev->iseg->initializing) >> 31;
+}
+
+static inline u32 mlx5_mkey_to_idx(u32 mkey)
+{
+	return mkey >> 8;
+}
+
+static inline u32 mlx5_idx_to_mkey(u32 mkey_idx)
+{
+	return mkey_idx << 8;
+}
+
+static inline u8 mlx5_mkey_variant(u32 mkey)
+{
+	return mkey & 0xff;
+}
+
+enum {
+	MLX5_PROF_MASK_QP_SIZE		= (u64)1 << 0,
+	MLX5_PROF_MASK_MR_CACHE		= (u64)1 << 1,
+};
+
+enum {
+	MAX_MR_CACHE_ENTRIES    = 21,
+};
+
+enum {
+	MLX5_INTERFACE_PROTOCOL_IB  = 0,
+	MLX5_INTERFACE_PROTOCOL_ETH = 1,
+};
+
+struct mlx5_interface {
+	void *			(*add)(struct mlx5_core_dev *dev);
+	void			(*remove)(struct mlx5_core_dev *dev, void *context);
+	int			(*attach)(struct mlx5_core_dev *dev, void *context);
+	void			(*detach)(struct mlx5_core_dev *dev, void *context);
+	void			(*event)(struct mlx5_core_dev *dev, void *context,
+					 enum mlx5_dev_event event, unsigned long param);
+	void			(*pfault)(struct mlx5_core_dev *dev,
+					  void *context,
+					  struct mlx5_pagefault *pfault);
+	void *                  (*get_dev)(void *context);
+	int			protocol;
+	struct list_head	list;
+};
+
+void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol);
+int mlx5_register_interface(struct mlx5_interface *intf);
+void mlx5_unregister_interface(struct mlx5_interface *intf);
+int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id);
+
+int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev);
+int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev);
+bool mlx5_lag_is_active(struct mlx5_core_dev *dev);
+struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev);
+struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev);
+void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up);
+
+struct mlx5_profile {
+	u64	mask;
+	u8	log_max_qp;
+	struct {
+		int	size;
+		int	limit;
+	} mr_cache[MAX_MR_CACHE_ENTRIES];
+};
+
+enum {
+	MLX5_PCI_DEV_IS_VF		= 1 << 0,
+};
+
+static inline int mlx5_core_is_pf(struct mlx5_core_dev *dev)
+{
+	return !(dev->priv.pci_dev_data & MLX5_PCI_DEV_IS_VF);
+}
+
+static inline int mlx5_get_gid_table_len(u16 param)
+{
+	if (param > 4) {
+		pr_warn("gid table length is zero\n");
+		return 0;
+	}
+
+	return 8 * (1 << param);
+}
+
+static inline bool mlx5_rl_is_supported(struct mlx5_core_dev *dev)
+{
+	return !!(dev->priv.rl_table.max_size);
+}
+
+enum {
+	MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32,
+};
+
+#endif /* MLX5_DRIVER_H */
diff --git a/drivers/net/ethernet/mellanox/mlx5/lib/fs.h b/drivers/net/ethernet/mellanox/mlx5/lib/fs.h
new file mode 100644
index 0000000..df43b14
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/lib/fs.h
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _MLX5_FS_
+#define _MLX5_FS_
+
+#include "lib/driver.h"
+#include "lib/mlx5_ifc.h"
+
+#define MLX5_FS_DEFAULT_FLOW_TAG 0x0
+
+enum {
+	MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO	= 1 << 16,
+};
+
+enum {
+	MLX5_FLOW_TABLE_TUNNEL_EN = BIT(0),
+};
+
+#define LEFTOVERS_RULE_NUM	 2
+static inline void build_leftovers_ft_param(int *priority,
+					    int *n_ent,
+					    int *n_grp)
+{
+	*priority = 0; /* Priority of leftovers_prio-0 */
+	*n_ent = LEFTOVERS_RULE_NUM;
+	*n_grp = LEFTOVERS_RULE_NUM;
+}
+
+enum mlx5_flow_namespace_type {
+	MLX5_FLOW_NAMESPACE_BYPASS,
+	MLX5_FLOW_NAMESPACE_LAG,
+	MLX5_FLOW_NAMESPACE_OFFLOADS,
+	MLX5_FLOW_NAMESPACE_ETHTOOL,
+	MLX5_FLOW_NAMESPACE_KERNEL,
+	MLX5_FLOW_NAMESPACE_LEFTOVERS,
+	MLX5_FLOW_NAMESPACE_ANCHOR,
+	MLX5_FLOW_NAMESPACE_FDB,
+	MLX5_FLOW_NAMESPACE_ESW_EGRESS,
+	MLX5_FLOW_NAMESPACE_ESW_INGRESS,
+	MLX5_FLOW_NAMESPACE_SNIFFER_RX,
+	MLX5_FLOW_NAMESPACE_SNIFFER_TX,
+};
+
+struct mlx5_flow_table;
+struct mlx5_flow_group;
+struct mlx5_flow_namespace;
+struct mlx5_flow_handle;
+
+struct mlx5_flow_spec {
+	u8   match_criteria_enable;
+	u32  match_criteria[MLX5_ST_SZ_DW(fte_match_param)];
+	u32  match_value[MLX5_ST_SZ_DW(fte_match_param)];
+};
+
+struct mlx5_flow_destination {
+	enum mlx5_flow_destination_type	type;
+	union {
+		u32			tir_num;
+		struct mlx5_flow_table	*ft;
+		u32			vport_num;
+		struct mlx5_fc		*counter;
+	};
+};
+
+struct mlx5_flow_namespace *
+mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
+			enum mlx5_flow_namespace_type type);
+
+struct mlx5_flow_table *
+mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
+				    int prio,
+				    int num_flow_table_entries,
+				    int max_num_groups,
+				    u32 level,
+				    u32 flags);
+
+struct mlx5_flow_table *
+mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
+		       int prio,
+		       int num_flow_table_entries,
+		       u32 level,
+		       u32 flags);
+struct mlx5_flow_table *
+mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
+			     int prio,
+			     int num_flow_table_entries,
+			     u32 level, u16 vport);
+struct mlx5_flow_table *mlx5_create_lag_demux_flow_table(
+					       struct mlx5_flow_namespace *ns,
+					       int prio, u32 level);
+int mlx5_destroy_flow_table(struct mlx5_flow_table *ft);
+
+/* inbox should be set with the following values:
+ * start_flow_index
+ * end_flow_index
+ * match_criteria_enable
+ * match_criteria
+ */
+struct mlx5_flow_group *
+mlx5_create_flow_group(struct mlx5_flow_table *ft, u32 *in);
+void mlx5_destroy_flow_group(struct mlx5_flow_group *fg);
+
+struct mlx5_flow_act {
+	u32 action;
+	u32 flow_tag;
+	u32 encap_id;
+};
+
+/* Single destination per rule.
+ * Group ID is implied by the match criteria.
+ */
+struct mlx5_flow_handle *
+mlx5_add_flow_rules(struct mlx5_flow_table *ft,
+		    struct mlx5_flow_spec *spec,
+		    struct mlx5_flow_act *flow_act,
+		    struct mlx5_flow_destination *dest,
+		    int dest_num);
+void mlx5_del_flow_rules(struct mlx5_flow_handle *fr);
+
+int mlx5_modify_rule_destination(struct mlx5_flow_handle *handler,
+				 struct mlx5_flow_destination *new_dest,
+				 struct mlx5_flow_destination *old_dest);
+
+struct mlx5_fc *mlx5_flow_rule_counter(struct mlx5_flow_handle *handler);
+struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging);
+void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter);
+void mlx5_fc_query_cached(struct mlx5_fc *counter,
+			  u64 *bytes, u64 *packets, u64 *lastuse);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/lib/fw.c b/drivers/net/ethernet/mellanox/mlx5/lib/fw.c
new file mode 100644
index 0000000..7371ae0
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/lib/fw.c
@@ -0,0 +1,176 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "lib/driver.h"
+#include "lib/cmd.h"
+#include <linux/module.h>
+#include "mlx5.h"
+
+static int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev, u32 *out,
+				  int outlen)
+{
+	u32 in[MLX5_ST_SZ_DW(query_adapter_in)] = {0};
+
+	MLX5_SET(query_adapter_in, in, opcode, MLX5_CMD_OP_QUERY_ADAPTER);
+	return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
+}
+
+int mlx5_query_board_id(struct mlx5_core_dev *dev)
+{
+	u32 *out;
+	int outlen = MLX5_ST_SZ_BYTES(query_adapter_out);
+	int err;
+
+	out = kzalloc(outlen, GFP_KERNEL);
+	if (!out)
+		return -ENOMEM;
+
+	err = mlx5_cmd_query_adapter(dev, out, outlen);
+	if (err)
+		goto out;
+
+	memcpy(dev->board_id,
+	       MLX5_ADDR_OF(query_adapter_out, out,
+			    query_adapter_struct.vsd_contd_psid),
+	       MLX5_FLD_SZ_BYTES(query_adapter_out,
+				 query_adapter_struct.vsd_contd_psid));
+
+out:
+	kfree(out);
+	return err;
+}
+
+int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id)
+{
+	u32 *out;
+	int outlen = MLX5_ST_SZ_BYTES(query_adapter_out);
+	int err;
+
+	out = kzalloc(outlen, GFP_KERNEL);
+	if (!out)
+		return -ENOMEM;
+
+	err = mlx5_cmd_query_adapter(mdev, out, outlen);
+	if (err)
+		goto out;
+
+	*vendor_id = MLX5_GET(query_adapter_out, out,
+			      query_adapter_struct.ieee_vendor_id);
+out:
+	kfree(out);
+	return err;
+}
+EXPORT_SYMBOL(mlx5_core_query_vendor_id);
+
+int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
+{
+	int err;
+
+	err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL);
+	if (err)
+		return err;
+
+	if (MLX5_CAP_GEN(dev, eth_net_offloads)) {
+		err = mlx5_core_get_caps(dev, MLX5_CAP_ETHERNET_OFFLOADS);
+		if (err)
+			return err;
+	}
+
+	if (MLX5_CAP_GEN(dev, pg)) {
+		err = mlx5_core_get_caps(dev, MLX5_CAP_ODP);
+		if (err)
+			return err;
+	}
+
+	if (MLX5_CAP_GEN(dev, atomic)) {
+		err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC);
+		if (err)
+			return err;
+	}
+
+	if (MLX5_CAP_GEN(dev, roce)) {
+		err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE);
+		if (err)
+			return err;
+	}
+
+	if (MLX5_CAP_GEN(dev, nic_flow_table)) {
+		err = mlx5_core_get_caps(dev, MLX5_CAP_FLOW_TABLE);
+		if (err)
+			return err;
+	}
+
+	if (MLX5_CAP_GEN(dev, vport_group_manager) &&
+	    MLX5_CAP_GEN(dev, eswitch_flow_table)) {
+		err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH_FLOW_TABLE);
+		if (err)
+			return err;
+	}
+
+	if (MLX5_CAP_GEN(dev, eswitch_flow_table)) {
+		err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH);
+		if (err)
+			return err;
+	}
+
+	if (MLX5_CAP_GEN(dev, vector_calc)) {
+		err = mlx5_core_get_caps(dev, MLX5_CAP_VECTOR_CALC);
+		if (err)
+			return err;
+	}
+
+	if (MLX5_CAP_GEN(dev, qos)) {
+		err = mlx5_core_get_caps(dev, MLX5_CAP_QOS);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+int mlx5_cmd_init_hca(struct mlx5_core_dev *dev)
+{
+	u32 out[MLX5_ST_SZ_DW(init_hca_out)] = {0};
+	u32 in[MLX5_ST_SZ_DW(init_hca_in)]   = {0};
+
+	MLX5_SET(init_hca_in, in, opcode, MLX5_CMD_OP_INIT_HCA);
+	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev)
+{
+	u32 out[MLX5_ST_SZ_DW(teardown_hca_out)] = {0};
+	u32 in[MLX5_ST_SZ_DW(teardown_hca_in)]   = {0};
+
+	MLX5_SET(teardown_hca_in, in, opcode, MLX5_CMD_OP_TEARDOWN_HCA);
+	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/lib/mad.c b/drivers/net/ethernet/mellanox/mlx5/lib/mad.c
new file mode 100644
index 0000000..6c85726
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/lib/mad.c
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include "lib/driver.h"
+#include "lib/cmd.h"
+#include "mlx5.h"
+
+int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
+		      u16 opmod, u8 port)
+{
+	int outlen = MLX5_ST_SZ_BYTES(mad_ifc_out);
+	int inlen = MLX5_ST_SZ_BYTES(mad_ifc_in);
+	int err = -ENOMEM;
+	void *data;
+	void *resp;
+	u32 *out;
+	u32 *in;
+
+	in = kzalloc(inlen, GFP_KERNEL);
+	out = kzalloc(outlen, GFP_KERNEL);
+	if (!in || !out)
+		goto out;
+
+	MLX5_SET(mad_ifc_in, in, opcode, MLX5_CMD_OP_MAD_IFC);
+	MLX5_SET(mad_ifc_in, in, op_mod, opmod);
+	MLX5_SET(mad_ifc_in, in, port, port);
+
+	data = MLX5_ADDR_OF(mad_ifc_in, in, mad);
+	memcpy(data, inb, MLX5_FLD_SZ_BYTES(mad_ifc_in, mad));
+
+	err = mlx5_cmd_exec(dev, in, inlen, out, outlen);
+	if (err)
+		goto out;
+
+	resp = MLX5_ADDR_OF(mad_ifc_out, out, response_mad_packet);
+	memcpy(outb, resp,
+	       MLX5_FLD_SZ_BYTES(mad_ifc_out, response_mad_packet));
+
+out:
+	kfree(out);
+	kfree(in);
+	return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_core_mad_ifc);
diff --git a/drivers/net/ethernet/mellanox/mlx5/lib/mcg.c b/drivers/net/ethernet/mellanox/mlx5/lib/mcg.c
new file mode 100644
index 0000000..e231aa4
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/lib/mcg.c
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include "lib/driver.h"
+#include "lib/cmd.h"
+#include <rdma/ib_verbs.h>
+#include "mlx5.h"
+
+int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn)
+{
+	u32 out[MLX5_ST_SZ_DW(attach_to_mcg_out)] = {0};
+	u32 in[MLX5_ST_SZ_DW(attach_to_mcg_in)]   = {0};
+	void *gid;
+
+	MLX5_SET(attach_to_mcg_in, in, opcode, MLX5_CMD_OP_ATTACH_TO_MCG);
+	MLX5_SET(attach_to_mcg_in, in, qpn, qpn);
+	gid = MLX5_ADDR_OF(attach_to_mcg_in, in, multicast_gid);
+	memcpy(gid, mgid, sizeof(*mgid));
+	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+EXPORT_SYMBOL(mlx5_core_attach_mcg);
+
+int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn)
+{
+	u32 out[MLX5_ST_SZ_DW(detach_from_mcg_out)] = {0};
+	u32 in[MLX5_ST_SZ_DW(detach_from_mcg_in)]   = {0};
+	void *gid;
+
+	MLX5_SET(detach_from_mcg_in, in, opcode, MLX5_CMD_OP_DETACH_FROM_MCG);
+	MLX5_SET(detach_from_mcg_in, in, qpn, qpn);
+	gid = MLX5_ADDR_OF(detach_from_mcg_in, in, multicast_gid);
+	memcpy(gid, mgid, sizeof(*mgid));
+	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+EXPORT_SYMBOL(mlx5_core_detach_mcg);
diff --git a/drivers/net/ethernet/mellanox/mlx5/lib/mlx5_ifc.h b/drivers/net/ethernet/mellanox/mlx5/lib/mlx5_ifc.h
new file mode 100644
index 0000000..1223fef
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/lib/mlx5_ifc.h
@@ -0,0 +1,8287 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+*/
+#ifndef MLX5_IFC_H
+#define MLX5_IFC_H
+
+enum {
+	MLX5_EVENT_TYPE_CODING_COMPLETION_EVENTS                   = 0x0,
+	MLX5_EVENT_TYPE_CODING_PATH_MIGRATED_SUCCEEDED             = 0x1,
+	MLX5_EVENT_TYPE_CODING_COMMUNICATION_ESTABLISHED           = 0x2,
+	MLX5_EVENT_TYPE_CODING_SEND_QUEUE_DRAINED                  = 0x3,
+	MLX5_EVENT_TYPE_CODING_LAST_WQE_REACHED                    = 0x13,
+	MLX5_EVENT_TYPE_CODING_SRQ_LIMIT                           = 0x14,
+	MLX5_EVENT_TYPE_CODING_DCT_ALL_CONNECTIONS_CLOSED          = 0x1c,
+	MLX5_EVENT_TYPE_CODING_DCT_ACCESS_KEY_VIOLATION            = 0x1d,
+	MLX5_EVENT_TYPE_CODING_CQ_ERROR                            = 0x4,
+	MLX5_EVENT_TYPE_CODING_LOCAL_WQ_CATASTROPHIC_ERROR         = 0x5,
+	MLX5_EVENT_TYPE_CODING_PATH_MIGRATION_FAILED               = 0x7,
+	MLX5_EVENT_TYPE_CODING_PAGE_FAULT_EVENT                    = 0xc,
+	MLX5_EVENT_TYPE_CODING_INVALID_REQUEST_LOCAL_WQ_ERROR      = 0x10,
+	MLX5_EVENT_TYPE_CODING_LOCAL_ACCESS_VIOLATION_WQ_ERROR     = 0x11,
+	MLX5_EVENT_TYPE_CODING_LOCAL_SRQ_CATASTROPHIC_ERROR        = 0x12,
+	MLX5_EVENT_TYPE_CODING_INTERNAL_ERROR                      = 0x8,
+	MLX5_EVENT_TYPE_CODING_PORT_STATE_CHANGE                   = 0x9,
+	MLX5_EVENT_TYPE_CODING_GPIO_EVENT                          = 0x15,
+	MLX5_EVENT_TYPE_CODING_REMOTE_CONFIGURATION_PROTOCOL_EVENT = 0x19,
+	MLX5_EVENT_TYPE_CODING_DOORBELL_BLUEFLAME_CONGESTION_EVENT = 0x1a,
+	MLX5_EVENT_TYPE_CODING_STALL_VL_EVENT                      = 0x1b,
+	MLX5_EVENT_TYPE_CODING_DROPPED_PACKET_LOGGED_EVENT         = 0x1f,
+	MLX5_EVENT_TYPE_CODING_COMMAND_INTERFACE_COMPLETION        = 0xa,
+	MLX5_EVENT_TYPE_CODING_PAGE_REQUEST                        = 0xb
+};
+
+enum {
+	MLX5_MODIFY_TIR_BITMASK_LRO                   = 0x0,
+	MLX5_MODIFY_TIR_BITMASK_INDIRECT_TABLE        = 0x1,
+	MLX5_MODIFY_TIR_BITMASK_HASH                  = 0x2,
+	MLX5_MODIFY_TIR_BITMASK_TUNNELED_OFFLOAD_EN   = 0x3
+};
+
+enum {
+	MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE        = 0x0,
+	MLX5_SET_HCA_CAP_OP_MOD_ATOMIC                = 0x3,
+};
+
+enum {
+	MLX5_CMD_OP_QUERY_HCA_CAP                 = 0x100,
+	MLX5_CMD_OP_QUERY_ADAPTER                 = 0x101,
+	MLX5_CMD_OP_INIT_HCA                      = 0x102,
+	MLX5_CMD_OP_TEARDOWN_HCA                  = 0x103,
+	MLX5_CMD_OP_ENABLE_HCA                    = 0x104,
+	MLX5_CMD_OP_DISABLE_HCA                   = 0x105,
+	MLX5_CMD_OP_QUERY_PAGES                   = 0x107,
+	MLX5_CMD_OP_MANAGE_PAGES                  = 0x108,
+	MLX5_CMD_OP_SET_HCA_CAP                   = 0x109,
+	MLX5_CMD_OP_QUERY_ISSI                    = 0x10a,
+	MLX5_CMD_OP_SET_ISSI                      = 0x10b,
+	MLX5_CMD_OP_SET_DRIVER_VERSION            = 0x10d,
+	MLX5_CMD_OP_CREATE_MKEY                   = 0x200,
+	MLX5_CMD_OP_QUERY_MKEY                    = 0x201,
+	MLX5_CMD_OP_DESTROY_MKEY                  = 0x202,
+	MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS        = 0x203,
+	MLX5_CMD_OP_PAGE_FAULT_RESUME             = 0x204,
+	MLX5_CMD_OP_CREATE_EQ                     = 0x301,
+	MLX5_CMD_OP_DESTROY_EQ                    = 0x302,
+	MLX5_CMD_OP_QUERY_EQ                      = 0x303,
+	MLX5_CMD_OP_GEN_EQE                       = 0x304,
+	MLX5_CMD_OP_CREATE_CQ                     = 0x400,
+	MLX5_CMD_OP_DESTROY_CQ                    = 0x401,
+	MLX5_CMD_OP_QUERY_CQ                      = 0x402,
+	MLX5_CMD_OP_MODIFY_CQ                     = 0x403,
+	MLX5_CMD_OP_CREATE_QP                     = 0x500,
+	MLX5_CMD_OP_DESTROY_QP                    = 0x501,
+	MLX5_CMD_OP_RST2INIT_QP                   = 0x502,
+	MLX5_CMD_OP_INIT2RTR_QP                   = 0x503,
+	MLX5_CMD_OP_RTR2RTS_QP                    = 0x504,
+	MLX5_CMD_OP_RTS2RTS_QP                    = 0x505,
+	MLX5_CMD_OP_SQERR2RTS_QP                  = 0x506,
+	MLX5_CMD_OP_2ERR_QP                       = 0x507,
+	MLX5_CMD_OP_2RST_QP                       = 0x50a,
+	MLX5_CMD_OP_QUERY_QP                      = 0x50b,
+	MLX5_CMD_OP_SQD_RTS_QP                    = 0x50c,
+	MLX5_CMD_OP_INIT2INIT_QP                  = 0x50e,
+	MLX5_CMD_OP_CREATE_PSV                    = 0x600,
+	MLX5_CMD_OP_DESTROY_PSV                   = 0x601,
+	MLX5_CMD_OP_CREATE_SRQ                    = 0x700,
+	MLX5_CMD_OP_DESTROY_SRQ                   = 0x701,
+	MLX5_CMD_OP_QUERY_SRQ                     = 0x702,
+	MLX5_CMD_OP_ARM_RQ                        = 0x703,
+	MLX5_CMD_OP_CREATE_XRC_SRQ                = 0x705,
+	MLX5_CMD_OP_DESTROY_XRC_SRQ               = 0x706,
+	MLX5_CMD_OP_QUERY_XRC_SRQ                 = 0x707,
+	MLX5_CMD_OP_ARM_XRC_SRQ                   = 0x708,
+	MLX5_CMD_OP_CREATE_DCT                    = 0x710,
+	MLX5_CMD_OP_DESTROY_DCT                   = 0x711,
+	MLX5_CMD_OP_DRAIN_DCT                     = 0x712,
+	MLX5_CMD_OP_QUERY_DCT                     = 0x713,
+	MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION     = 0x714,
+	MLX5_CMD_OP_CREATE_XRQ                    = 0x717,
+	MLX5_CMD_OP_DESTROY_XRQ                   = 0x718,
+	MLX5_CMD_OP_QUERY_XRQ                     = 0x719,
+	MLX5_CMD_OP_ARM_XRQ                       = 0x71a,
+	MLX5_CMD_OP_QUERY_VPORT_STATE             = 0x750,
+	MLX5_CMD_OP_MODIFY_VPORT_STATE            = 0x751,
+	MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT       = 0x752,
+	MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT      = 0x753,
+	MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT       = 0x754,
+	MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT      = 0x755,
+	MLX5_CMD_OP_QUERY_ROCE_ADDRESS            = 0x760,
+	MLX5_CMD_OP_SET_ROCE_ADDRESS              = 0x761,
+	MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT       = 0x762,
+	MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT      = 0x763,
+	MLX5_CMD_OP_QUERY_HCA_VPORT_GID           = 0x764,
+	MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY          = 0x765,
+	MLX5_CMD_OP_QUERY_VPORT_COUNTER           = 0x770,
+	MLX5_CMD_OP_ALLOC_Q_COUNTER               = 0x771,
+	MLX5_CMD_OP_DEALLOC_Q_COUNTER             = 0x772,
+	MLX5_CMD_OP_QUERY_Q_COUNTER               = 0x773,
+	MLX5_CMD_OP_SET_RATE_LIMIT                = 0x780,
+	MLX5_CMD_OP_QUERY_RATE_LIMIT              = 0x781,
+	MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT      = 0x782,
+	MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT     = 0x783,
+	MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT       = 0x784,
+	MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT      = 0x785,
+	MLX5_CMD_OP_CREATE_QOS_PARA_VPORT         = 0x786,
+	MLX5_CMD_OP_DESTROY_QOS_PARA_VPORT        = 0x787,
+	MLX5_CMD_OP_ALLOC_PD                      = 0x800,
+	MLX5_CMD_OP_DEALLOC_PD                    = 0x801,
+	MLX5_CMD_OP_ALLOC_UAR                     = 0x802,
+	MLX5_CMD_OP_DEALLOC_UAR                   = 0x803,
+	MLX5_CMD_OP_CONFIG_INT_MODERATION         = 0x804,
+	MLX5_CMD_OP_ACCESS_REG                    = 0x805,
+	MLX5_CMD_OP_ATTACH_TO_MCG                 = 0x806,
+	MLX5_CMD_OP_DETACH_FROM_MCG               = 0x807,
+	MLX5_CMD_OP_GET_DROPPED_PACKET_LOG        = 0x80a,
+	MLX5_CMD_OP_MAD_IFC                       = 0x50d,
+	MLX5_CMD_OP_QUERY_MAD_DEMUX               = 0x80b,
+	MLX5_CMD_OP_SET_MAD_DEMUX                 = 0x80c,
+	MLX5_CMD_OP_NOP                           = 0x80d,
+	MLX5_CMD_OP_ALLOC_XRCD                    = 0x80e,
+	MLX5_CMD_OP_DEALLOC_XRCD                  = 0x80f,
+	MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN        = 0x816,
+	MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN      = 0x817,
+	MLX5_CMD_OP_QUERY_CONG_STATUS             = 0x822,
+	MLX5_CMD_OP_MODIFY_CONG_STATUS            = 0x823,
+	MLX5_CMD_OP_QUERY_CONG_PARAMS             = 0x824,
+	MLX5_CMD_OP_MODIFY_CONG_PARAMS            = 0x825,
+	MLX5_CMD_OP_QUERY_CONG_STATISTICS         = 0x826,
+	MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT           = 0x827,
+	MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT        = 0x828,
+	MLX5_CMD_OP_SET_L2_TABLE_ENTRY            = 0x829,
+	MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY          = 0x82a,
+	MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY         = 0x82b,
+	MLX5_CMD_OP_SET_WOL_ROL                   = 0x830,
+	MLX5_CMD_OP_QUERY_WOL_ROL                 = 0x831,
+	MLX5_CMD_OP_CREATE_LAG                    = 0x840,
+	MLX5_CMD_OP_MODIFY_LAG                    = 0x841,
+	MLX5_CMD_OP_QUERY_LAG                     = 0x842,
+	MLX5_CMD_OP_DESTROY_LAG                   = 0x843,
+	MLX5_CMD_OP_CREATE_VPORT_LAG              = 0x844,
+	MLX5_CMD_OP_DESTROY_VPORT_LAG             = 0x845,
+	MLX5_CMD_OP_CREATE_TIR                    = 0x900,
+	MLX5_CMD_OP_MODIFY_TIR                    = 0x901,
+	MLX5_CMD_OP_DESTROY_TIR                   = 0x902,
+	MLX5_CMD_OP_QUERY_TIR                     = 0x903,
+	MLX5_CMD_OP_CREATE_SQ                     = 0x904,
+	MLX5_CMD_OP_MODIFY_SQ                     = 0x905,
+	MLX5_CMD_OP_DESTROY_SQ                    = 0x906,
+	MLX5_CMD_OP_QUERY_SQ                      = 0x907,
+	MLX5_CMD_OP_CREATE_RQ                     = 0x908,
+	MLX5_CMD_OP_MODIFY_RQ                     = 0x909,
+	MLX5_CMD_OP_DESTROY_RQ                    = 0x90a,
+	MLX5_CMD_OP_QUERY_RQ                      = 0x90b,
+	MLX5_CMD_OP_CREATE_RMP                    = 0x90c,
+	MLX5_CMD_OP_MODIFY_RMP                    = 0x90d,
+	MLX5_CMD_OP_DESTROY_RMP                   = 0x90e,
+	MLX5_CMD_OP_QUERY_RMP                     = 0x90f,
+	MLX5_CMD_OP_CREATE_TIS                    = 0x912,
+	MLX5_CMD_OP_MODIFY_TIS                    = 0x913,
+	MLX5_CMD_OP_DESTROY_TIS                   = 0x914,
+	MLX5_CMD_OP_QUERY_TIS                     = 0x915,
+	MLX5_CMD_OP_CREATE_RQT                    = 0x916,
+	MLX5_CMD_OP_MODIFY_RQT                    = 0x917,
+	MLX5_CMD_OP_DESTROY_RQT                   = 0x918,
+	MLX5_CMD_OP_QUERY_RQT                     = 0x919,
+	MLX5_CMD_OP_SET_FLOW_TABLE_ROOT		  = 0x92f,
+	MLX5_CMD_OP_CREATE_FLOW_TABLE             = 0x930,
+	MLX5_CMD_OP_DESTROY_FLOW_TABLE            = 0x931,
+	MLX5_CMD_OP_QUERY_FLOW_TABLE              = 0x932,
+	MLX5_CMD_OP_CREATE_FLOW_GROUP             = 0x933,
+	MLX5_CMD_OP_DESTROY_FLOW_GROUP            = 0x934,
+	MLX5_CMD_OP_QUERY_FLOW_GROUP              = 0x935,
+	MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY          = 0x936,
+	MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY        = 0x937,
+	MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY       = 0x938,
+	MLX5_CMD_OP_ALLOC_FLOW_COUNTER            = 0x939,
+	MLX5_CMD_OP_DEALLOC_FLOW_COUNTER          = 0x93a,
+	MLX5_CMD_OP_QUERY_FLOW_COUNTER            = 0x93b,
+	MLX5_CMD_OP_MODIFY_FLOW_TABLE             = 0x93c,
+	MLX5_CMD_OP_ALLOC_ENCAP_HEADER            = 0x93d,
+	MLX5_CMD_OP_DEALLOC_ENCAP_HEADER          = 0x93e,
+	MLX5_CMD_OP_MAX
+};
+
+struct mlx5_ifc_flow_table_fields_supported_bits {
+	u8         outer_dmac[0x1];
+	u8         outer_smac[0x1];
+	u8         outer_ether_type[0x1];
+	u8         reserved_at_3[0x1];
+	u8         outer_first_prio[0x1];
+	u8         outer_first_cfi[0x1];
+	u8         outer_first_vid[0x1];
+	u8         reserved_at_7[0x1];
+	u8         outer_second_prio[0x1];
+	u8         outer_second_cfi[0x1];
+	u8         outer_second_vid[0x1];
+	u8         reserved_at_b[0x1];
+	u8         outer_sip[0x1];
+	u8         outer_dip[0x1];
+	u8         outer_frag[0x1];
+	u8         outer_ip_protocol[0x1];
+	u8         outer_ip_ecn[0x1];
+	u8         outer_ip_dscp[0x1];
+	u8         outer_udp_sport[0x1];
+	u8         outer_udp_dport[0x1];
+	u8         outer_tcp_sport[0x1];
+	u8         outer_tcp_dport[0x1];
+	u8         outer_tcp_flags[0x1];
+	u8         outer_gre_protocol[0x1];
+	u8         outer_gre_key[0x1];
+	u8         outer_vxlan_vni[0x1];
+	u8         reserved_at_1a[0x5];
+	u8         source_eswitch_port[0x1];
+
+	u8         inner_dmac[0x1];
+	u8         inner_smac[0x1];
+	u8         inner_ether_type[0x1];
+	u8         reserved_at_23[0x1];
+	u8         inner_first_prio[0x1];
+	u8         inner_first_cfi[0x1];
+	u8         inner_first_vid[0x1];
+	u8         reserved_at_27[0x1];
+	u8         inner_second_prio[0x1];
+	u8         inner_second_cfi[0x1];
+	u8         inner_second_vid[0x1];
+	u8         reserved_at_2b[0x1];
+	u8         inner_sip[0x1];
+	u8         inner_dip[0x1];
+	u8         inner_frag[0x1];
+	u8         inner_ip_protocol[0x1];
+	u8         inner_ip_ecn[0x1];
+	u8         inner_ip_dscp[0x1];
+	u8         inner_udp_sport[0x1];
+	u8         inner_udp_dport[0x1];
+	u8         inner_tcp_sport[0x1];
+	u8         inner_tcp_dport[0x1];
+	u8         inner_tcp_flags[0x1];
+	u8         reserved_at_37[0x9];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_flow_table_prop_layout_bits {
+	u8         ft_support[0x1];
+	u8         reserved_at_1[0x1];
+	u8         flow_counter[0x1];
+	u8	   flow_modify_en[0x1];
+	u8         modify_root[0x1];
+	u8         identified_miss_table_mode[0x1];
+	u8         flow_table_modify[0x1];
+	u8         encap[0x1];
+	u8         decap[0x1];
+	u8         reserved_at_9[0x17];
+
+	u8         reserved_at_20[0x2];
+	u8         log_max_ft_size[0x6];
+	u8         reserved_at_28[0x10];
+	u8         max_ft_level[0x8];
+
+	u8         reserved_at_40[0x20];
+
+	u8         reserved_at_60[0x18];
+	u8         log_max_ft_num[0x8];
+
+	u8         reserved_at_80[0x18];
+	u8         log_max_destination[0x8];
+
+	u8         reserved_at_a0[0x18];
+	u8         log_max_flow[0x8];
+
+	u8         reserved_at_c0[0x40];
+
+	struct mlx5_ifc_flow_table_fields_supported_bits ft_field_support;
+
+	struct mlx5_ifc_flow_table_fields_supported_bits ft_field_bitmask_support;
+};
+
+struct mlx5_ifc_odp_per_transport_service_cap_bits {
+	u8         send[0x1];
+	u8         receive[0x1];
+	u8         write[0x1];
+	u8         read[0x1];
+	u8         atomic[0x1];
+	u8         srq_receive[0x1];
+	u8         reserved_at_6[0x1a];
+};
+
+struct mlx5_ifc_ipv4_layout_bits {
+	u8         reserved_at_0[0x60];
+
+	u8         ipv4[0x20];
+};
+
+struct mlx5_ifc_ipv6_layout_bits {
+	u8         ipv6[16][0x8];
+};
+
+union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits {
+	struct mlx5_ifc_ipv6_layout_bits ipv6_layout;
+	struct mlx5_ifc_ipv4_layout_bits ipv4_layout;
+	u8         reserved_at_0[0x80];
+};
+
+struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
+	u8         smac_47_16[0x20];
+
+	u8         smac_15_0[0x10];
+	u8         ethertype[0x10];
+
+	u8         dmac_47_16[0x20];
+
+	u8         dmac_15_0[0x10];
+	u8         first_prio[0x3];
+	u8         first_cfi[0x1];
+	u8         first_vid[0xc];
+
+	u8         ip_protocol[0x8];
+	u8         ip_dscp[0x6];
+	u8         ip_ecn[0x2];
+	u8         vlan_tag[0x1];
+	u8         reserved_at_91[0x1];
+	u8         frag[0x1];
+	u8         reserved_at_93[0x4];
+	u8         tcp_flags[0x9];
+
+	u8         tcp_sport[0x10];
+	u8         tcp_dport[0x10];
+
+	u8         reserved_at_c0[0x20];
+
+	u8         udp_sport[0x10];
+	u8         udp_dport[0x10];
+
+	union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits src_ipv4_src_ipv6;
+
+	union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits dst_ipv4_dst_ipv6;
+};
+
+struct mlx5_ifc_fte_match_set_misc_bits {
+	u8         reserved_at_0[0x8];
+	u8         source_sqn[0x18];
+
+	u8         reserved_at_20[0x10];
+	u8         source_port[0x10];
+
+	u8         outer_second_prio[0x3];
+	u8         outer_second_cfi[0x1];
+	u8         outer_second_vid[0xc];
+	u8         inner_second_prio[0x3];
+	u8         inner_second_cfi[0x1];
+	u8         inner_second_vid[0xc];
+
+	u8         outer_second_vlan_tag[0x1];
+	u8         inner_second_vlan_tag[0x1];
+	u8         reserved_at_62[0xe];
+	u8         gre_protocol[0x10];
+
+	u8         gre_key_h[0x18];
+	u8         gre_key_l[0x8];
+
+	u8         vxlan_vni[0x18];
+	u8         reserved_at_b8[0x8];
+
+	u8         reserved_at_c0[0x20];
+
+	u8         reserved_at_e0[0xc];
+	u8         outer_ipv6_flow_label[0x14];
+
+	u8         reserved_at_100[0xc];
+	u8         inner_ipv6_flow_label[0x14];
+
+	u8         reserved_at_120[0xe0];
+};
+
+struct mlx5_ifc_cmd_pas_bits {
+	u8         pa_h[0x20];
+
+	u8         pa_l[0x14];
+	u8         reserved_at_34[0xc];
+};
+
+struct mlx5_ifc_uint64_bits {
+	u8         hi[0x20];
+
+	u8         lo[0x20];
+};
+
+enum {
+	MLX5_ADS_STAT_RATE_NO_LIMIT  = 0x0,
+	MLX5_ADS_STAT_RATE_2_5GBPS   = 0x7,
+	MLX5_ADS_STAT_RATE_10GBPS    = 0x8,
+	MLX5_ADS_STAT_RATE_30GBPS    = 0x9,
+	MLX5_ADS_STAT_RATE_5GBPS     = 0xa,
+	MLX5_ADS_STAT_RATE_20GBPS    = 0xb,
+	MLX5_ADS_STAT_RATE_40GBPS    = 0xc,
+	MLX5_ADS_STAT_RATE_60GBPS    = 0xd,
+	MLX5_ADS_STAT_RATE_80GBPS    = 0xe,
+	MLX5_ADS_STAT_RATE_120GBPS   = 0xf,
+};
+
+struct mlx5_ifc_ads_bits {
+	u8         fl[0x1];
+	u8         free_ar[0x1];
+	u8         reserved_at_2[0xe];
+	u8         pkey_index[0x10];
+
+	u8         reserved_at_20[0x8];
+	u8         grh[0x1];
+	u8         mlid[0x7];
+	u8         rlid[0x10];
+
+	u8         ack_timeout[0x5];
+	u8         reserved_at_45[0x3];
+	u8         src_addr_index[0x8];
+	u8         reserved_at_50[0x4];
+	u8         stat_rate[0x4];
+	u8         hop_limit[0x8];
+
+	u8         reserved_at_60[0x4];
+	u8         tclass[0x8];
+	u8         flow_label[0x14];
+
+	u8         rgid_rip[16][0x8];
+
+	u8         reserved_at_100[0x4];
+	u8         f_dscp[0x1];
+	u8         f_ecn[0x1];
+	u8         reserved_at_106[0x1];
+	u8         f_eth_prio[0x1];
+	u8         ecn[0x2];
+	u8         dscp[0x6];
+	u8         udp_sport[0x10];
+
+	u8         dei_cfi[0x1];
+	u8         eth_prio[0x3];
+	u8         sl[0x4];
+	u8         port[0x8];
+	u8         rmac_47_32[0x10];
+
+	u8         rmac_31_0[0x20];
+};
+
+struct mlx5_ifc_flow_table_nic_cap_bits {
+	u8         nic_rx_multi_path_tirs[0x1];
+	u8         nic_rx_multi_path_tirs_fts[0x1];
+	u8         allow_sniffer_and_nic_rx_shared_tir[0x1];
+	u8         reserved_at_3[0x1fd];
+
+	struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive;
+
+	u8         reserved_at_400[0x200];
+
+	struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive_sniffer;
+
+	struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit;
+
+	u8         reserved_at_a00[0x200];
+
+	struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit_sniffer;
+
+	u8         reserved_at_e00[0x7200];
+};
+
+struct mlx5_ifc_flow_table_eswitch_cap_bits {
+	u8     reserved_at_0[0x200];
+
+	struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_esw_fdb;
+
+	struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_esw_acl_ingress;
+
+	struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_esw_acl_egress;
+
+	u8      reserved_at_800[0x7800];
+};
+
+struct mlx5_ifc_e_switch_cap_bits {
+	u8         vport_svlan_strip[0x1];
+	u8         vport_cvlan_strip[0x1];
+	u8         vport_svlan_insert[0x1];
+	u8         vport_cvlan_insert_if_not_exist[0x1];
+	u8         vport_cvlan_insert_overwrite[0x1];
+	u8         reserved_at_5[0x19];
+	u8         nic_vport_node_guid_modify[0x1];
+	u8         nic_vport_port_guid_modify[0x1];
+
+	u8         vxlan_encap_decap[0x1];
+	u8         nvgre_encap_decap[0x1];
+	u8         reserved_at_22[0x9];
+	u8         log_max_encap_headers[0x5];
+	u8         reserved_2b[0x6];
+	u8         max_encap_header_size[0xa];
+
+	u8         reserved_40[0x7c0];
+
+};
+
+struct mlx5_ifc_qos_cap_bits {
+	u8         packet_pacing[0x1];
+	u8         esw_scheduling[0x1];
+	u8         reserved_at_2[0x1e];
+
+	u8         reserved_at_20[0x20];
+
+	u8         packet_pacing_max_rate[0x20];
+
+	u8         packet_pacing_min_rate[0x20];
+
+	u8         reserved_at_80[0x10];
+	u8         packet_pacing_rate_table_size[0x10];
+
+	u8         esw_element_type[0x10];
+	u8         esw_tsar_type[0x10];
+
+	u8         reserved_at_c0[0x10];
+	u8         max_qos_para_vport[0x10];
+
+	u8         max_tsar_bw_share[0x20];
+
+	u8         reserved_at_100[0x700];
+};
+
+struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
+	u8         csum_cap[0x1];
+	u8         vlan_cap[0x1];
+	u8         lro_cap[0x1];
+	u8         lro_psh_flag[0x1];
+	u8         lro_time_stamp[0x1];
+	u8         reserved_at_5[0x3];
+	u8         self_lb_en_modifiable[0x1];
+	u8         reserved_at_9[0x2];
+	u8         max_lso_cap[0x5];
+	u8         multi_pkt_send_wqe[0x2];
+	u8	   wqe_inline_mode[0x2];
+	u8         rss_ind_tbl_cap[0x4];
+	u8         reg_umr_sq[0x1];
+	u8         scatter_fcs[0x1];
+	u8         reserved_at_1a[0x1];
+	u8         tunnel_lso_const_out_ip_id[0x1];
+	u8         reserved_at_1c[0x2];
+	u8         tunnel_statless_gre[0x1];
+	u8         tunnel_stateless_vxlan[0x1];
+
+	u8         reserved_at_20[0x20];
+
+	u8         reserved_at_40[0x10];
+	u8         lro_min_mss_size[0x10];
+
+	u8         reserved_at_60[0x120];
+
+	u8         lro_timer_supported_periods[4][0x20];
+
+	u8         reserved_at_200[0x600];
+};
+
+struct mlx5_ifc_roce_cap_bits {
+	u8         roce_apm[0x1];
+	u8         reserved_at_1[0x1f];
+
+	u8         reserved_at_20[0x60];
+
+	u8         reserved_at_80[0xc];
+	u8         l3_type[0x4];
+	u8         reserved_at_90[0x8];
+	u8         roce_version[0x8];
+
+	u8         reserved_at_a0[0x10];
+	u8         r_roce_dest_udp_port[0x10];
+
+	u8         r_roce_max_src_udp_port[0x10];
+	u8         r_roce_min_src_udp_port[0x10];
+
+	u8         reserved_at_e0[0x10];
+	u8         roce_address_table_size[0x10];
+
+	u8         reserved_at_100[0x700];
+};
+
+enum {
+	MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_1_BYTE     = 0x0,
+	MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_2_BYTES    = 0x2,
+	MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_4_BYTES    = 0x4,
+	MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_8_BYTES    = 0x8,
+	MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_16_BYTES   = 0x10,
+	MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_32_BYTES   = 0x20,
+	MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_64_BYTES   = 0x40,
+	MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_128_BYTES  = 0x80,
+	MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_256_BYTES  = 0x100,
+};
+
+enum {
+	MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_1_BYTE     = 0x1,
+	MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_2_BYTES    = 0x2,
+	MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_4_BYTES    = 0x4,
+	MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_8_BYTES    = 0x8,
+	MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_16_BYTES   = 0x10,
+	MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_32_BYTES   = 0x20,
+	MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_64_BYTES   = 0x40,
+	MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_128_BYTES  = 0x80,
+	MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_256_BYTES  = 0x100,
+};
+
+struct mlx5_ifc_atomic_caps_bits {
+	u8         reserved_at_0[0x40];
+
+	u8         atomic_req_8B_endianess_mode[0x2];
+	u8         reserved_at_42[0x4];
+	u8         supported_atomic_req_8B_endianess_mode_1[0x1];
+
+	u8         reserved_at_47[0x19];
+
+	u8         reserved_at_60[0x20];
+
+	u8         reserved_at_80[0x10];
+	u8         atomic_operations[0x10];
+
+	u8         reserved_at_a0[0x10];
+	u8         atomic_size_qp[0x10];
+
+	u8         reserved_at_c0[0x10];
+	u8         atomic_size_dc[0x10];
+
+	u8         reserved_at_e0[0x720];
+};
+
+struct mlx5_ifc_odp_cap_bits {
+	u8         reserved_at_0[0x40];
+
+	u8         sig[0x1];
+	u8         reserved_at_41[0x1f];
+
+	u8         reserved_at_60[0x20];
+
+	struct mlx5_ifc_odp_per_transport_service_cap_bits rc_odp_caps;
+
+	struct mlx5_ifc_odp_per_transport_service_cap_bits uc_odp_caps;
+
+	struct mlx5_ifc_odp_per_transport_service_cap_bits ud_odp_caps;
+
+	u8         reserved_at_e0[0x720];
+};
+
+struct mlx5_ifc_calc_op {
+	u8        reserved_at_0[0x10];
+	u8        reserved_at_10[0x9];
+	u8        op_swap_endianness[0x1];
+	u8        op_min[0x1];
+	u8        op_xor[0x1];
+	u8        op_or[0x1];
+	u8        op_and[0x1];
+	u8        op_max[0x1];
+	u8        op_add[0x1];
+};
+
+struct mlx5_ifc_vector_calc_cap_bits {
+	u8         calc_matrix[0x1];
+	u8         reserved_at_1[0x1f];
+	u8         reserved_at_20[0x8];
+	u8         max_vec_count[0x8];
+	u8         reserved_at_30[0xd];
+	u8         max_chunk_size[0x3];
+	struct mlx5_ifc_calc_op calc0;
+	struct mlx5_ifc_calc_op calc1;
+	struct mlx5_ifc_calc_op calc2;
+	struct mlx5_ifc_calc_op calc3;
+
+	u8         reserved_at_e0[0x720];
+};
+
+enum {
+	MLX5_WQ_TYPE_LINKED_LIST  = 0x0,
+	MLX5_WQ_TYPE_CYCLIC       = 0x1,
+	MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ = 0x2,
+};
+
+enum {
+	MLX5_WQ_END_PAD_MODE_NONE   = 0x0,
+	MLX5_WQ_END_PAD_MODE_ALIGN  = 0x1,
+};
+
+enum {
+	MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_8_GID_ENTRIES    = 0x0,
+	MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_16_GID_ENTRIES   = 0x1,
+	MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_32_GID_ENTRIES   = 0x2,
+	MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_64_GID_ENTRIES   = 0x3,
+	MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_128_GID_ENTRIES  = 0x4,
+};
+
+enum {
+	MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_128_ENTRIES  = 0x0,
+	MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_256_ENTRIES  = 0x1,
+	MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_512_ENTRIES  = 0x2,
+	MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_1K_ENTRIES   = 0x3,
+	MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_2K_ENTRIES   = 0x4,
+	MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_4K_ENTRIES   = 0x5,
+};
+
+enum {
+	MLX5_CMD_HCA_CAP_PORT_TYPE_IB        = 0x0,
+	MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET  = 0x1,
+};
+
+enum {
+	MLX5_CMD_HCA_CAP_CMDIF_CHECKSUM_DISABLED       = 0x0,
+	MLX5_CMD_HCA_CAP_CMDIF_CHECKSUM_INITIAL_STATE  = 0x1,
+	MLX5_CMD_HCA_CAP_CMDIF_CHECKSUM_ENABLED        = 0x3,
+};
+
+enum {
+	MLX5_CAP_PORT_TYPE_IB  = 0x0,
+	MLX5_CAP_PORT_TYPE_ETH = 0x1,
+};
+
+struct mlx5_ifc_cmd_hca_cap_bits {
+	u8         reserved_at_0[0x80];
+
+	u8         log_max_srq_sz[0x8];
+	u8         log_max_qp_sz[0x8];
+	u8         reserved_at_90[0xb];
+	u8         log_max_qp[0x5];
+
+	u8         reserved_at_a0[0xb];
+	u8         log_max_srq[0x5];
+	u8         reserved_at_b0[0x10];
+
+	u8         reserved_at_c0[0x8];
+	u8         log_max_cq_sz[0x8];
+	u8         reserved_at_d0[0xb];
+	u8         log_max_cq[0x5];
+
+	u8         log_max_eq_sz[0x8];
+	u8         reserved_at_e8[0x2];
+	u8         log_max_mkey[0x6];
+	u8         reserved_at_f0[0xc];
+	u8         log_max_eq[0x4];
+
+	u8         max_indirection[0x8];
+	u8         fixed_buffer_size[0x1];
+	u8         log_max_mrw_sz[0x7];
+	u8         reserved_at_110[0x2];
+	u8         log_max_bsf_list_size[0x6];
+	u8         umr_extended_translation_offset[0x1];
+	u8         null_mkey[0x1];
+	u8         log_max_klm_list_size[0x6];
+
+	u8         reserved_at_120[0xa];
+	u8         log_max_ra_req_dc[0x6];
+	u8         reserved_at_130[0xa];
+	u8         log_max_ra_res_dc[0x6];
+
+	u8         reserved_at_140[0xa];
+	u8         log_max_ra_req_qp[0x6];
+	u8         reserved_at_150[0xa];
+	u8         log_max_ra_res_qp[0x6];
+
+	u8         pad_cap[0x1];
+	u8         cc_query_allowed[0x1];
+	u8         cc_modify_allowed[0x1];
+	u8         reserved_at_163[0xd];
+	u8         gid_table_size[0x10];
+
+	u8         out_of_seq_cnt[0x1];
+	u8         vport_counters[0x1];
+	u8         retransmission_q_counters[0x1];
+	u8         reserved_at_183[0x1];
+	u8         modify_rq_counter_set_id[0x1];
+	u8         reserved_at_185[0x1];
+	u8         max_qp_cnt[0xa];
+	u8         pkey_table_size[0x10];
+
+	u8         vport_group_manager[0x1];
+	u8         vhca_group_manager[0x1];
+	u8         ib_virt[0x1];
+	u8         eth_virt[0x1];
+	u8         reserved_at_1a4[0x1];
+	u8         ets[0x1];
+	u8         nic_flow_table[0x1];
+	u8         eswitch_flow_table[0x1];
+	u8	   early_vf_enable[0x1];
+	u8         reserved_at_1a9[0x2];
+	u8         local_ca_ack_delay[0x5];
+	u8         port_module_event[0x1];
+	u8         reserved_at_1b1[0x1];
+	u8         ports_check[0x1];
+	u8         reserved_at_1b3[0x1];
+	u8         disable_link_up[0x1];
+	u8         beacon_led[0x1];
+	u8         port_type[0x2];
+	u8         num_ports[0x8];
+
+	u8         reserved_at_1c0[0x3];
+	u8         log_max_msg[0x5];
+	u8         reserved_at_1c8[0x4];
+	u8         max_tc[0x4];
+	u8         reserved_at_1d0[0x1];
+	u8         dcbx[0x1];
+	u8         reserved_at_1d2[0x4];
+	u8         rol_s[0x1];
+	u8         rol_g[0x1];
+	u8         reserved_at_1d8[0x1];
+	u8         wol_s[0x1];
+	u8         wol_g[0x1];
+	u8         wol_a[0x1];
+	u8         wol_b[0x1];
+	u8         wol_m[0x1];
+	u8         wol_u[0x1];
+	u8         wol_p[0x1];
+
+	u8         stat_rate_support[0x10];
+	u8         reserved_at_1f0[0xc];
+	u8         cqe_version[0x4];
+
+	u8         compact_address_vector[0x1];
+	u8         striding_rq[0x1];
+	u8         reserved_at_202[0x2];
+	u8         ipoib_basic_offloads[0x1];
+	u8         reserved_at_205[0xa];
+	u8         drain_sigerr[0x1];
+	u8         cmdif_checksum[0x2];
+	u8         sigerr_cqe[0x1];
+	u8         reserved_at_213[0x1];
+	u8         wq_signature[0x1];
+	u8         sctr_data_cqe[0x1];
+	u8         reserved_at_216[0x1];
+	u8         sho[0x1];
+	u8         tph[0x1];
+	u8         rf[0x1];
+	u8         dct[0x1];
+	u8         qos[0x1];
+	u8         eth_net_offloads[0x1];
+	u8         roce[0x1];
+	u8         atomic[0x1];
+	u8         reserved_at_21f[0x1];
+
+	u8         cq_oi[0x1];
+	u8         cq_resize[0x1];
+	u8         cq_moderation[0x1];
+	u8         reserved_at_223[0x3];
+	u8         cq_eq_remap[0x1];
+	u8         pg[0x1];
+	u8         block_lb_mc[0x1];
+	u8         reserved_at_229[0x1];
+	u8         scqe_break_moderation[0x1];
+	u8         cq_period_start_from_cqe[0x1];
+	u8         cd[0x1];
+	u8         reserved_at_22d[0x1];
+	u8         apm[0x1];
+	u8         vector_calc[0x1];
+	u8         umr_ptr_rlky[0x1];
+	u8	   imaicl[0x1];
+	u8         reserved_at_232[0x4];
+	u8         qkv[0x1];
+	u8         pkv[0x1];
+	u8         set_deth_sqpn[0x1];
+	u8         reserved_at_239[0x3];
+	u8         xrc[0x1];
+	u8         ud[0x1];
+	u8         uc[0x1];
+	u8         rc[0x1];
+
+	u8         uar_4k[0x1];
+	u8         reserved_at_241[0x9];
+	u8         uar_sz[0x6];
+	u8         reserved_at_250[0x8];
+	u8         log_pg_sz[0x8];
+
+	u8         bf[0x1];
+	u8         driver_version[0x1];
+	u8         pad_tx_eth_packet[0x1];
+	u8         reserved_at_263[0x8];
+	u8         log_bf_reg_size[0x5];
+
+	u8         reserved_at_270[0xb];
+	u8         lag_master[0x1];
+	u8         num_lag_ports[0x4];
+
+	u8         reserved_at_280[0x10];
+	u8         max_wqe_sz_sq[0x10];
+
+	u8         reserved_at_2a0[0x10];
+	u8         max_wqe_sz_rq[0x10];
+
+	u8         reserved_at_2c0[0x10];
+	u8         max_wqe_sz_sq_dc[0x10];
+
+	u8         reserved_at_2e0[0x7];
+	u8         max_qp_mcg[0x19];
+
+	u8         reserved_at_300[0x18];
+	u8         log_max_mcg[0x8];
+
+	u8         reserved_at_320[0x3];
+	u8         log_max_transport_domain[0x5];
+	u8         reserved_at_328[0x3];
+	u8         log_max_pd[0x5];
+	u8         reserved_at_330[0xb];
+	u8         log_max_xrcd[0x5];
+
+	u8         reserved_at_340[0x8];
+	u8         log_max_flow_counter_bulk[0x8];
+	u8         max_flow_counter[0x10];
+
+
+	u8         reserved_at_360[0x3];
+	u8         log_max_rq[0x5];
+	u8         reserved_at_368[0x3];
+	u8         log_max_sq[0x5];
+	u8         reserved_at_370[0x3];
+	u8         log_max_tir[0x5];
+	u8         reserved_at_378[0x3];
+	u8         log_max_tis[0x5];
+
+	u8         basic_cyclic_rcv_wqe[0x1];
+	u8         reserved_at_381[0x2];
+	u8         log_max_rmp[0x5];
+	u8         reserved_at_388[0x3];
+	u8         log_max_rqt[0x5];
+	u8         reserved_at_390[0x3];
+	u8         log_max_rqt_size[0x5];
+	u8         reserved_at_398[0x3];
+	u8         log_max_tis_per_sq[0x5];
+
+	u8         reserved_at_3a0[0x3];
+	u8         log_max_stride_sz_rq[0x5];
+	u8         reserved_at_3a8[0x3];
+	u8         log_min_stride_sz_rq[0x5];
+	u8         reserved_at_3b0[0x3];
+	u8         log_max_stride_sz_sq[0x5];
+	u8         reserved_at_3b8[0x3];
+	u8         log_min_stride_sz_sq[0x5];
+
+	u8         reserved_at_3c0[0x1b];
+	u8         log_max_wq_sz[0x5];
+
+	u8         nic_vport_change_event[0x1];
+	u8         reserved_at_3e1[0xa];
+	u8         log_max_vlan_list[0x5];
+	u8         reserved_at_3f0[0x3];
+	u8         log_max_current_mc_list[0x5];
+	u8         reserved_at_3f8[0x3];
+	u8         log_max_current_uc_list[0x5];
+
+	u8         reserved_at_400[0x80];
+
+	u8         reserved_at_480[0x3];
+	u8         log_max_l2_table[0x5];
+	u8         reserved_at_488[0x8];
+	u8         log_uar_page_sz[0x10];
+
+	u8         reserved_at_4a0[0x20];
+	u8         device_frequency_mhz[0x20];
+	u8         device_frequency_khz[0x20];
+
+	u8         reserved_at_500[0x20];
+	u8	   num_of_uars_per_page[0x20];
+	u8         reserved_at_540[0x40];
+
+	u8         reserved_at_580[0x3f];
+	u8         cqe_compression[0x1];
+
+	u8         cqe_compression_timeout[0x10];
+	u8         cqe_compression_max_num[0x10];
+
+	u8         reserved_at_5e0[0x10];
+	u8         tag_matching[0x1];
+	u8         rndv_offload_rc[0x1];
+	u8         rndv_offload_dc[0x1];
+	u8         log_tag_matching_list_sz[0x5];
+	u8         reserved_at_5f8[0x3];
+	u8         log_max_xrq[0x5];
+
+	u8         reserved_at_600[0x200];
+};
+
+enum mlx5_flow_destination_type {
+	MLX5_FLOW_DESTINATION_TYPE_VPORT        = 0x0,
+	MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE   = 0x1,
+	MLX5_FLOW_DESTINATION_TYPE_TIR          = 0x2,
+
+	MLX5_FLOW_DESTINATION_TYPE_COUNTER      = 0x100,
+};
+
+struct mlx5_ifc_dest_format_struct_bits {
+	u8         destination_type[0x8];
+	u8         destination_id[0x18];
+
+	u8         reserved_at_20[0x20];
+};
+
+struct mlx5_ifc_flow_counter_list_bits {
+	u8         clear[0x1];
+	u8         num_of_counters[0xf];
+	u8         flow_counter_id[0x10];
+
+	u8         reserved_at_20[0x20];
+};
+
+union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits {
+	struct mlx5_ifc_dest_format_struct_bits dest_format_struct;
+	struct mlx5_ifc_flow_counter_list_bits flow_counter_list;
+	u8         reserved_at_0[0x40];
+};
+
+struct mlx5_ifc_fte_match_param_bits {
+	struct mlx5_ifc_fte_match_set_lyr_2_4_bits outer_headers;
+
+	struct mlx5_ifc_fte_match_set_misc_bits misc_parameters;
+
+	struct mlx5_ifc_fte_match_set_lyr_2_4_bits inner_headers;
+
+	u8         reserved_at_600[0xa00];
+};
+
+enum {
+	MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP     = 0x0,
+	MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP     = 0x1,
+	MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT   = 0x2,
+	MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT   = 0x3,
+	MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_IPSEC_SPI  = 0x4,
+};
+
+struct mlx5_ifc_rx_hash_field_select_bits {
+	u8         l3_prot_type[0x1];
+	u8         l4_prot_type[0x1];
+	u8         selected_fields[0x1e];
+};
+
+enum {
+	MLX5_WQ_WQ_TYPE_WQ_LINKED_LIST  = 0x0,
+	MLX5_WQ_WQ_TYPE_WQ_CYCLIC       = 0x1,
+};
+
+enum {
+	MLX5_WQ_END_PADDING_MODE_END_PAD_NONE   = 0x0,
+	MLX5_WQ_END_PADDING_MODE_END_PAD_ALIGN  = 0x1,
+};
+
+struct mlx5_ifc_wq_bits {
+	u8         wq_type[0x4];
+	u8         wq_signature[0x1];
+	u8         end_padding_mode[0x2];
+	u8         cd_slave[0x1];
+	u8         reserved_at_8[0x18];
+
+	u8         hds_skip_first_sge[0x1];
+	u8         log2_hds_buf_size[0x3];
+	u8         reserved_at_24[0x7];
+	u8         page_offset[0x5];
+	u8         lwm[0x10];
+
+	u8         reserved_at_40[0x8];
+	u8         pd[0x18];
+
+	u8         reserved_at_60[0x8];
+	u8         uar_page[0x18];
+
+	u8         dbr_addr[0x40];
+
+	u8         hw_counter[0x20];
+
+	u8         sw_counter[0x20];
+
+	u8         reserved_at_100[0xc];
+	u8         log_wq_stride[0x4];
+	u8         reserved_at_110[0x3];
+	u8         log_wq_pg_sz[0x5];
+	u8         reserved_at_118[0x3];
+	u8         log_wq_sz[0x5];
+
+	u8         reserved_at_120[0x15];
+	u8         log_wqe_num_of_strides[0x3];
+	u8         two_byte_shift_en[0x1];
+	u8         reserved_at_139[0x4];
+	u8         log_wqe_stride_size[0x3];
+
+	u8         reserved_at_140[0x4c0];
+
+	struct mlx5_ifc_cmd_pas_bits pas[0];
+};
+
+struct mlx5_ifc_rq_num_bits {
+	u8         reserved_at_0[0x8];
+	u8         rq_num[0x18];
+};
+
+struct mlx5_ifc_mac_address_layout_bits {
+	u8         reserved_at_0[0x10];
+	u8         mac_addr_47_32[0x10];
+
+	u8         mac_addr_31_0[0x20];
+};
+
+struct mlx5_ifc_vlan_layout_bits {
+	u8         reserved_at_0[0x14];
+	u8         vlan[0x0c];
+
+	u8         reserved_at_20[0x20];
+};
+
+struct mlx5_ifc_cong_control_r_roce_ecn_np_bits {
+	u8         reserved_at_0[0xa0];
+
+	u8         min_time_between_cnps[0x20];
+
+	u8         reserved_at_c0[0x12];
+	u8         cnp_dscp[0x6];
+	u8         reserved_at_d8[0x5];
+	u8         cnp_802p_prio[0x3];
+
+	u8         reserved_at_e0[0x720];
+};
+
+struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits {
+	u8         reserved_at_0[0x60];
+
+	u8         reserved_at_60[0x4];
+	u8         clamp_tgt_rate[0x1];
+	u8         reserved_at_65[0x3];
+	u8         clamp_tgt_rate_after_time_inc[0x1];
+	u8         reserved_at_69[0x17];
+
+	u8         reserved_at_80[0x20];
+
+	u8         rpg_time_reset[0x20];
+
+	u8         rpg_byte_reset[0x20];
+
+	u8         rpg_threshold[0x20];
+
+	u8         rpg_max_rate[0x20];
+
+	u8         rpg_ai_rate[0x20];
+
+	u8         rpg_hai_rate[0x20];
+
+	u8         rpg_gd[0x20];
+
+	u8         rpg_min_dec_fac[0x20];
+
+	u8         rpg_min_rate[0x20];
+
+	u8         reserved_at_1c0[0xe0];
+
+	u8         rate_to_set_on_first_cnp[0x20];
+
+	u8         dce_tcp_g[0x20];
+
+	u8         dce_tcp_rtt[0x20];
+
+	u8         rate_reduce_monitor_period[0x20];
+
+	u8         reserved_at_320[0x20];
+
+	u8         initial_alpha_value[0x20];
+
+	u8         reserved_at_360[0x4a0];
+};
+
+struct mlx5_ifc_cong_control_802_1qau_rp_bits {
+	u8         reserved_at_0[0x80];
+
+	u8         rppp_max_rps[0x20];
+
+	u8         rpg_time_reset[0x20];
+
+	u8         rpg_byte_reset[0x20];
+
+	u8         rpg_threshold[0x20];
+
+	u8         rpg_max_rate[0x20];
+
+	u8         rpg_ai_rate[0x20];
+
+	u8         rpg_hai_rate[0x20];
+
+	u8         rpg_gd[0x20];
+
+	u8         rpg_min_dec_fac[0x20];
+
+	u8         rpg_min_rate[0x20];
+
+	u8         reserved_at_1c0[0x640];
+};
+
+enum {
+	MLX5_RESIZE_FIELD_SELECT_RESIZE_FIELD_SELECT_LOG_CQ_SIZE    = 0x1,
+	MLX5_RESIZE_FIELD_SELECT_RESIZE_FIELD_SELECT_PAGE_OFFSET    = 0x2,
+	MLX5_RESIZE_FIELD_SELECT_RESIZE_FIELD_SELECT_LOG_PAGE_SIZE  = 0x4,
+};
+
+struct mlx5_ifc_resize_field_select_bits {
+	u8         resize_field_select[0x20];
+};
+
+enum {
+	MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_CQ_PERIOD     = 0x1,
+	MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_CQ_MAX_COUNT  = 0x2,
+	MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_OI            = 0x4,
+	MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_C_EQN         = 0x8,
+};
+
+struct mlx5_ifc_modify_field_select_bits {
+	u8         modify_field_select[0x20];
+};
+
+struct mlx5_ifc_field_select_r_roce_np_bits {
+	u8         field_select_r_roce_np[0x20];
+};
+
+struct mlx5_ifc_field_select_r_roce_rp_bits {
+	u8         field_select_r_roce_rp[0x20];
+};
+
+enum {
+	MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPPP_MAX_RPS     = 0x4,
+	MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_TIME_RESET   = 0x8,
+	MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_BYTE_RESET   = 0x10,
+	MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_THRESHOLD    = 0x20,
+	MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_MAX_RATE     = 0x40,
+	MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_AI_RATE      = 0x80,
+	MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_HAI_RATE     = 0x100,
+	MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_GD           = 0x200,
+	MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_MIN_DEC_FAC  = 0x400,
+	MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_MIN_RATE     = 0x800,
+};
+
+struct mlx5_ifc_field_select_802_1qau_rp_bits {
+	u8         field_select_8021qaurp[0x20];
+};
+
+struct mlx5_ifc_phys_layer_cntrs_bits {
+	u8         time_since_last_clear_high[0x20];
+
+	u8         time_since_last_clear_low[0x20];
+
+	u8         symbol_errors_high[0x20];
+
+	u8         symbol_errors_low[0x20];
+
+	u8         sync_headers_errors_high[0x20];
+
+	u8         sync_headers_errors_low[0x20];
+
+	u8         edpl_bip_errors_lane0_high[0x20];
+
+	u8         edpl_bip_errors_lane0_low[0x20];
+
+	u8         edpl_bip_errors_lane1_high[0x20];
+
+	u8         edpl_bip_errors_lane1_low[0x20];
+
+	u8         edpl_bip_errors_lane2_high[0x20];
+
+	u8         edpl_bip_errors_lane2_low[0x20];
+
+	u8         edpl_bip_errors_lane3_high[0x20];
+
+	u8         edpl_bip_errors_lane3_low[0x20];
+
+	u8         fc_fec_corrected_blocks_lane0_high[0x20];
+
+	u8         fc_fec_corrected_blocks_lane0_low[0x20];
+
+	u8         fc_fec_corrected_blocks_lane1_high[0x20];
+
+	u8         fc_fec_corrected_blocks_lane1_low[0x20];
+
+	u8         fc_fec_corrected_blocks_lane2_high[0x20];
+
+	u8         fc_fec_corrected_blocks_lane2_low[0x20];
+
+	u8         fc_fec_corrected_blocks_lane3_high[0x20];
+
+	u8         fc_fec_corrected_blocks_lane3_low[0x20];
+
+	u8         fc_fec_uncorrectable_blocks_lane0_high[0x20];
+
+	u8         fc_fec_uncorrectable_blocks_lane0_low[0x20];
+
+	u8         fc_fec_uncorrectable_blocks_lane1_high[0x20];
+
+	u8         fc_fec_uncorrectable_blocks_lane1_low[0x20];
+
+	u8         fc_fec_uncorrectable_blocks_lane2_high[0x20];
+
+	u8         fc_fec_uncorrectable_blocks_lane2_low[0x20];
+
+	u8         fc_fec_uncorrectable_blocks_lane3_high[0x20];
+
+	u8         fc_fec_uncorrectable_blocks_lane3_low[0x20];
+
+	u8         rs_fec_corrected_blocks_high[0x20];
+
+	u8         rs_fec_corrected_blocks_low[0x20];
+
+	u8         rs_fec_uncorrectable_blocks_high[0x20];
+
+	u8         rs_fec_uncorrectable_blocks_low[0x20];
+
+	u8         rs_fec_no_errors_blocks_high[0x20];
+
+	u8         rs_fec_no_errors_blocks_low[0x20];
+
+	u8         rs_fec_single_error_blocks_high[0x20];
+
+	u8         rs_fec_single_error_blocks_low[0x20];
+
+	u8         rs_fec_corrected_symbols_total_high[0x20];
+
+	u8         rs_fec_corrected_symbols_total_low[0x20];
+
+	u8         rs_fec_corrected_symbols_lane0_high[0x20];
+
+	u8         rs_fec_corrected_symbols_lane0_low[0x20];
+
+	u8         rs_fec_corrected_symbols_lane1_high[0x20];
+
+	u8         rs_fec_corrected_symbols_lane1_low[0x20];
+
+	u8         rs_fec_corrected_symbols_lane2_high[0x20];
+
+	u8         rs_fec_corrected_symbols_lane2_low[0x20];
+
+	u8         rs_fec_corrected_symbols_lane3_high[0x20];
+
+	u8         rs_fec_corrected_symbols_lane3_low[0x20];
+
+	u8         link_down_events[0x20];
+
+	u8         successful_recovery_events[0x20];
+
+	u8         reserved_at_640[0x180];
+};
+
+struct mlx5_ifc_ib_port_cntrs_grp_data_layout_bits {
+	u8	   symbol_error_counter[0x10];
+
+	u8         link_error_recovery_counter[0x8];
+
+	u8         link_downed_counter[0x8];
+
+	u8         port_rcv_errors[0x10];
+
+	u8         port_rcv_remote_physical_errors[0x10];
+
+	u8         port_rcv_switch_relay_errors[0x10];
+
+	u8         port_xmit_discards[0x10];
+
+	u8         port_xmit_constraint_errors[0x8];
+
+	u8         port_rcv_constraint_errors[0x8];
+
+	u8         reserved_at_70[0x8];
+
+	u8         link_overrun_errors[0x8];
+
+	u8	   reserved_at_80[0x10];
+
+	u8         vl_15_dropped[0x10];
+
+	u8	   reserved_at_a0[0xa0];
+};
+
+struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits {
+	u8         transmit_queue_high[0x20];
+
+	u8         transmit_queue_low[0x20];
+
+	u8         reserved_at_40[0x780];
+};
+
+struct mlx5_ifc_eth_per_prio_grp_data_layout_bits {
+	u8         rx_octets_high[0x20];
+
+	u8         rx_octets_low[0x20];
+
+	u8         reserved_at_40[0xc0];
+
+	u8         rx_frames_high[0x20];
+
+	u8         rx_frames_low[0x20];
+
+	u8         tx_octets_high[0x20];
+
+	u8         tx_octets_low[0x20];
+
+	u8         reserved_at_180[0xc0];
+
+	u8         tx_frames_high[0x20];
+
+	u8         tx_frames_low[0x20];
+
+	u8         rx_pause_high[0x20];
+
+	u8         rx_pause_low[0x20];
+
+	u8         rx_pause_duration_high[0x20];
+
+	u8         rx_pause_duration_low[0x20];
+
+	u8         tx_pause_high[0x20];
+
+	u8         tx_pause_low[0x20];
+
+	u8         tx_pause_duration_high[0x20];
+
+	u8         tx_pause_duration_low[0x20];
+
+	u8         rx_pause_transition_high[0x20];
+
+	u8         rx_pause_transition_low[0x20];
+
+	u8         reserved_at_3c0[0x400];
+};
+
+struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits {
+	u8         port_transmit_wait_high[0x20];
+
+	u8         port_transmit_wait_low[0x20];
+
+	u8         reserved_at_40[0x780];
+};
+
+struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits {
+	u8         dot3stats_alignment_errors_high[0x20];
+
+	u8         dot3stats_alignment_errors_low[0x20];
+
+	u8         dot3stats_fcs_errors_high[0x20];
+
+	u8         dot3stats_fcs_errors_low[0x20];
+
+	u8         dot3stats_single_collision_frames_high[0x20];
+
+	u8         dot3stats_single_collision_frames_low[0x20];
+
+	u8         dot3stats_multiple_collision_frames_high[0x20];
+
+	u8         dot3stats_multiple_collision_frames_low[0x20];
+
+	u8         dot3stats_sqe_test_errors_high[0x20];
+
+	u8         dot3stats_sqe_test_errors_low[0x20];
+
+	u8         dot3stats_deferred_transmissions_high[0x20];
+
+	u8         dot3stats_deferred_transmissions_low[0x20];
+
+	u8         dot3stats_late_collisions_high[0x20];
+
+	u8         dot3stats_late_collisions_low[0x20];
+
+	u8         dot3stats_excessive_collisions_high[0x20];
+
+	u8         dot3stats_excessive_collisions_low[0x20];
+
+	u8         dot3stats_internal_mac_transmit_errors_high[0x20];
+
+	u8         dot3stats_internal_mac_transmit_errors_low[0x20];
+
+	u8         dot3stats_carrier_sense_errors_high[0x20];
+
+	u8         dot3stats_carrier_sense_errors_low[0x20];
+
+	u8         dot3stats_frame_too_longs_high[0x20];
+
+	u8         dot3stats_frame_too_longs_low[0x20];
+
+	u8         dot3stats_internal_mac_receive_errors_high[0x20];
+
+	u8         dot3stats_internal_mac_receive_errors_low[0x20];
+
+	u8         dot3stats_symbol_errors_high[0x20];
+
+	u8         dot3stats_symbol_errors_low[0x20];
+
+	u8         dot3control_in_unknown_opcodes_high[0x20];
+
+	u8         dot3control_in_unknown_opcodes_low[0x20];
+
+	u8         dot3in_pause_frames_high[0x20];
+
+	u8         dot3in_pause_frames_low[0x20];
+
+	u8         dot3out_pause_frames_high[0x20];
+
+	u8         dot3out_pause_frames_low[0x20];
+
+	u8         reserved_at_400[0x3c0];
+};
+
+struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits {
+	u8         ether_stats_drop_events_high[0x20];
+
+	u8         ether_stats_drop_events_low[0x20];
+
+	u8         ether_stats_octets_high[0x20];
+
+	u8         ether_stats_octets_low[0x20];
+
+	u8         ether_stats_pkts_high[0x20];
+
+	u8         ether_stats_pkts_low[0x20];
+
+	u8         ether_stats_broadcast_pkts_high[0x20];
+
+	u8         ether_stats_broadcast_pkts_low[0x20];
+
+	u8         ether_stats_multicast_pkts_high[0x20];
+
+	u8         ether_stats_multicast_pkts_low[0x20];
+
+	u8         ether_stats_crc_align_errors_high[0x20];
+
+	u8         ether_stats_crc_align_errors_low[0x20];
+
+	u8         ether_stats_undersize_pkts_high[0x20];
+
+	u8         ether_stats_undersize_pkts_low[0x20];
+
+	u8         ether_stats_oversize_pkts_high[0x20];
+
+	u8         ether_stats_oversize_pkts_low[0x20];
+
+	u8         ether_stats_fragments_high[0x20];
+
+	u8         ether_stats_fragments_low[0x20];
+
+	u8         ether_stats_jabbers_high[0x20];
+
+	u8         ether_stats_jabbers_low[0x20];
+
+	u8         ether_stats_collisions_high[0x20];
+
+	u8         ether_stats_collisions_low[0x20];
+
+	u8         ether_stats_pkts64octets_high[0x20];
+
+	u8         ether_stats_pkts64octets_low[0x20];
+
+	u8         ether_stats_pkts65to127octets_high[0x20];
+
+	u8         ether_stats_pkts65to127octets_low[0x20];
+
+	u8         ether_stats_pkts128to255octets_high[0x20];
+
+	u8         ether_stats_pkts128to255octets_low[0x20];
+
+	u8         ether_stats_pkts256to511octets_high[0x20];
+
+	u8         ether_stats_pkts256to511octets_low[0x20];
+
+	u8         ether_stats_pkts512to1023octets_high[0x20];
+
+	u8         ether_stats_pkts512to1023octets_low[0x20];
+
+	u8         ether_stats_pkts1024to1518octets_high[0x20];
+
+	u8         ether_stats_pkts1024to1518octets_low[0x20];
+
+	u8         ether_stats_pkts1519to2047octets_high[0x20];
+
+	u8         ether_stats_pkts1519to2047octets_low[0x20];
+
+	u8         ether_stats_pkts2048to4095octets_high[0x20];
+
+	u8         ether_stats_pkts2048to4095octets_low[0x20];
+
+	u8         ether_stats_pkts4096to8191octets_high[0x20];
+
+	u8         ether_stats_pkts4096to8191octets_low[0x20];
+
+	u8         ether_stats_pkts8192to10239octets_high[0x20];
+
+	u8         ether_stats_pkts8192to10239octets_low[0x20];
+
+	u8         reserved_at_540[0x280];
+};
+
+struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits {
+	u8         if_in_octets_high[0x20];
+
+	u8         if_in_octets_low[0x20];
+
+	u8         if_in_ucast_pkts_high[0x20];
+
+	u8         if_in_ucast_pkts_low[0x20];
+
+	u8         if_in_discards_high[0x20];
+
+	u8         if_in_discards_low[0x20];
+
+	u8         if_in_errors_high[0x20];
+
+	u8         if_in_errors_low[0x20];
+
+	u8         if_in_unknown_protos_high[0x20];
+
+	u8         if_in_unknown_protos_low[0x20];
+
+	u8         if_out_octets_high[0x20];
+
+	u8         if_out_octets_low[0x20];
+
+	u8         if_out_ucast_pkts_high[0x20];
+
+	u8         if_out_ucast_pkts_low[0x20];
+
+	u8         if_out_discards_high[0x20];
+
+	u8         if_out_discards_low[0x20];
+
+	u8         if_out_errors_high[0x20];
+
+	u8         if_out_errors_low[0x20];
+
+	u8         if_in_multicast_pkts_high[0x20];
+
+	u8         if_in_multicast_pkts_low[0x20];
+
+	u8         if_in_broadcast_pkts_high[0x20];
+
+	u8         if_in_broadcast_pkts_low[0x20];
+
+	u8         if_out_multicast_pkts_high[0x20];
+
+	u8         if_out_multicast_pkts_low[0x20];
+
+	u8         if_out_broadcast_pkts_high[0x20];
+
+	u8         if_out_broadcast_pkts_low[0x20];
+
+	u8         reserved_at_340[0x480];
+};
+
+struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits {
+	u8         a_frames_transmitted_ok_high[0x20];
+
+	u8         a_frames_transmitted_ok_low[0x20];
+
+	u8         a_frames_received_ok_high[0x20];
+
+	u8         a_frames_received_ok_low[0x20];
+
+	u8         a_frame_check_sequence_errors_high[0x20];
+
+	u8         a_frame_check_sequence_errors_low[0x20];
+
+	u8         a_alignment_errors_high[0x20];
+
+	u8         a_alignment_errors_low[0x20];
+
+	u8         a_octets_transmitted_ok_high[0x20];
+
+	u8         a_octets_transmitted_ok_low[0x20];
+
+	u8         a_octets_received_ok_high[0x20];
+
+	u8         a_octets_received_ok_low[0x20];
+
+	u8         a_multicast_frames_xmitted_ok_high[0x20];
+
+	u8         a_multicast_frames_xmitted_ok_low[0x20];
+
+	u8         a_broadcast_frames_xmitted_ok_high[0x20];
+
+	u8         a_broadcast_frames_xmitted_ok_low[0x20];
+
+	u8         a_multicast_frames_received_ok_high[0x20];
+
+	u8         a_multicast_frames_received_ok_low[0x20];
+
+	u8         a_broadcast_frames_received_ok_high[0x20];
+
+	u8         a_broadcast_frames_received_ok_low[0x20];
+
+	u8         a_in_range_length_errors_high[0x20];
+
+	u8         a_in_range_length_errors_low[0x20];
+
+	u8         a_out_of_range_length_field_high[0x20];
+
+	u8         a_out_of_range_length_field_low[0x20];
+
+	u8         a_frame_too_long_errors_high[0x20];
+
+	u8         a_frame_too_long_errors_low[0x20];
+
+	u8         a_symbol_error_during_carrier_high[0x20];
+
+	u8         a_symbol_error_during_carrier_low[0x20];
+
+	u8         a_mac_control_frames_transmitted_high[0x20];
+
+	u8         a_mac_control_frames_transmitted_low[0x20];
+
+	u8         a_mac_control_frames_received_high[0x20];
+
+	u8         a_mac_control_frames_received_low[0x20];
+
+	u8         a_unsupported_opcodes_received_high[0x20];
+
+	u8         a_unsupported_opcodes_received_low[0x20];
+
+	u8         a_pause_mac_ctrl_frames_received_high[0x20];
+
+	u8         a_pause_mac_ctrl_frames_received_low[0x20];
+
+	u8         a_pause_mac_ctrl_frames_transmitted_high[0x20];
+
+	u8         a_pause_mac_ctrl_frames_transmitted_low[0x20];
+
+	u8         reserved_at_4c0[0x300];
+};
+
+struct mlx5_ifc_pcie_perf_cntrs_grp_data_layout_bits {
+	u8         life_time_counter_high[0x20];
+
+	u8         life_time_counter_low[0x20];
+
+	u8         rx_errors[0x20];
+
+	u8         tx_errors[0x20];
+
+	u8         l0_to_recovery_eieos[0x20];
+
+	u8         l0_to_recovery_ts[0x20];
+
+	u8         l0_to_recovery_framing[0x20];
+
+	u8         l0_to_recovery_retrain[0x20];
+
+	u8         crc_error_dllp[0x20];
+
+	u8         crc_error_tlp[0x20];
+
+	u8         reserved_at_140[0x680];
+};
+
+struct mlx5_ifc_pcie_tas_cntrs_grp_data_layout_bits {
+	u8         life_time_counter_high[0x20];
+
+	u8         life_time_counter_low[0x20];
+
+	u8         time_to_boot_image_start[0x20];
+
+	u8         time_to_link_image[0x20];
+
+	u8         calibration_time[0x20];
+
+	u8         time_to_first_perst[0x20];
+
+	u8         time_to_detect_state[0x20];
+
+	u8         time_to_l0[0x20];
+
+	u8         time_to_crs_en[0x20];
+
+	u8         time_to_plastic_image_start[0x20];
+
+	u8         time_to_iron_image_start[0x20];
+
+	u8         perst_handler[0x20];
+
+	u8         times_in_l1[0x20];
+
+	u8         times_in_l23[0x20];
+
+	u8         dl_down[0x20];
+
+	u8         config_cycle1usec[0x20];
+
+	u8         config_cycle2to7usec[0x20];
+
+	u8         config_cycle_8to15usec[0x20];
+
+	u8         config_cycle_16_to_63usec[0x20];
+
+	u8         config_cycle_64usec[0x20];
+
+	u8         correctable_err_msg_sent[0x20];
+
+	u8         non_fatal_err_msg_sent[0x20];
+
+	u8         fatal_err_msg_sent[0x20];
+
+	u8         reserved_at_2e0[0x4e0];
+};
+
+struct mlx5_ifc_cmd_inter_comp_event_bits {
+	u8         command_completion_vector[0x20];
+
+	u8         reserved_at_20[0xc0];
+};
+
+struct mlx5_ifc_stall_vl_event_bits {
+	u8         reserved_at_0[0x18];
+	u8         port_num[0x1];
+	u8         reserved_at_19[0x3];
+	u8         vl[0x4];
+
+	u8         reserved_at_20[0xa0];
+};
+
+struct mlx5_ifc_db_bf_congestion_event_bits {
+	u8         event_subtype[0x8];
+	u8         reserved_at_8[0x8];
+	u8         congestion_level[0x8];
+	u8         reserved_at_18[0x8];
+
+	u8         reserved_at_20[0xa0];
+};
+
+struct mlx5_ifc_gpio_event_bits {
+	u8         reserved_at_0[0x60];
+
+	u8         gpio_event_hi[0x20];
+
+	u8         gpio_event_lo[0x20];
+
+	u8         reserved_at_a0[0x40];
+};
+
+struct mlx5_ifc_port_state_change_event_bits {
+	u8         reserved_at_0[0x40];
+
+	u8         port_num[0x4];
+	u8         reserved_at_44[0x1c];
+
+	u8         reserved_at_60[0x80];
+};
+
+struct mlx5_ifc_dropped_packet_logged_bits {
+	u8         reserved_at_0[0xe0];
+};
+
+enum {
+	MLX5_CQ_ERROR_SYNDROME_CQ_OVERRUN                 = 0x1,
+	MLX5_CQ_ERROR_SYNDROME_CQ_ACCESS_VIOLATION_ERROR  = 0x2,
+};
+
+struct mlx5_ifc_cq_error_bits {
+	u8         reserved_at_0[0x8];
+	u8         cqn[0x18];
+
+	u8         reserved_at_20[0x20];
+
+	u8         reserved_at_40[0x18];
+	u8         syndrome[0x8];
+
+	u8         reserved_at_60[0x80];
+};
+
+struct mlx5_ifc_rdma_page_fault_event_bits {
+	u8         bytes_committed[0x20];
+
+	u8         r_key[0x20];
+
+	u8         reserved_at_40[0x10];
+	u8         packet_len[0x10];
+
+	u8         rdma_op_len[0x20];
+
+	u8         rdma_va[0x40];
+
+	u8         reserved_at_c0[0x5];
+	u8         rdma[0x1];
+	u8         write[0x1];
+	u8         requestor[0x1];
+	u8         qp_number[0x18];
+};
+
+struct mlx5_ifc_wqe_associated_page_fault_event_bits {
+	u8         bytes_committed[0x20];
+
+	u8         reserved_at_20[0x10];
+	u8         wqe_index[0x10];
+
+	u8         reserved_at_40[0x10];
+	u8         len[0x10];
+
+	u8         reserved_at_60[0x60];
+
+	u8         reserved_at_c0[0x5];
+	u8         rdma[0x1];
+	u8         write_read[0x1];
+	u8         requestor[0x1];
+	u8         qpn[0x18];
+};
+
+struct mlx5_ifc_qp_events_bits {
+	u8         reserved_at_0[0xa0];
+
+	u8         type[0x8];
+	u8         reserved_at_a8[0x18];
+
+	u8         reserved_at_c0[0x8];
+	u8         qpn_rqn_sqn[0x18];
+};
+
+struct mlx5_ifc_dct_events_bits {
+	u8         reserved_at_0[0xc0];
+
+	u8         reserved_at_c0[0x8];
+	u8         dct_number[0x18];
+};
+
+struct mlx5_ifc_comp_event_bits {
+	u8         reserved_at_0[0xc0];
+
+	u8         reserved_at_c0[0x8];
+	u8         cq_number[0x18];
+};
+
+enum {
+	MLX5_QPC_STATE_RST        = 0x0,
+	MLX5_QPC_STATE_INIT       = 0x1,
+	MLX5_QPC_STATE_RTR        = 0x2,
+	MLX5_QPC_STATE_RTS        = 0x3,
+	MLX5_QPC_STATE_SQER       = 0x4,
+	MLX5_QPC_STATE_ERR        = 0x6,
+	MLX5_QPC_STATE_SQD        = 0x7,
+	MLX5_QPC_STATE_SUSPENDED  = 0x9,
+};
+
+enum {
+	MLX5_QPC_ST_RC            = 0x0,
+	MLX5_QPC_ST_UC            = 0x1,
+	MLX5_QPC_ST_UD            = 0x2,
+	MLX5_QPC_ST_XRC           = 0x3,
+	MLX5_QPC_ST_DCI           = 0x5,
+	MLX5_QPC_ST_QP0           = 0x7,
+	MLX5_QPC_ST_QP1           = 0x8,
+	MLX5_QPC_ST_RAW_DATAGRAM  = 0x9,
+	MLX5_QPC_ST_REG_UMR       = 0xc,
+};
+
+enum {
+	MLX5_QPC_PM_STATE_ARMED     = 0x0,
+	MLX5_QPC_PM_STATE_REARM     = 0x1,
+	MLX5_QPC_PM_STATE_RESERVED  = 0x2,
+	MLX5_QPC_PM_STATE_MIGRATED  = 0x3,
+};
+
+enum {
+	MLX5_QPC_END_PADDING_MODE_SCATTER_AS_IS                = 0x0,
+	MLX5_QPC_END_PADDING_MODE_PAD_TO_CACHE_LINE_ALIGNMENT  = 0x1,
+};
+
+enum {
+	MLX5_QPC_MTU_256_BYTES        = 0x1,
+	MLX5_QPC_MTU_512_BYTES        = 0x2,
+	MLX5_QPC_MTU_1K_BYTES         = 0x3,
+	MLX5_QPC_MTU_2K_BYTES         = 0x4,
+	MLX5_QPC_MTU_4K_BYTES         = 0x5,
+	MLX5_QPC_MTU_RAW_ETHERNET_QP  = 0x7,
+};
+
+enum {
+	MLX5_QPC_ATOMIC_MODE_IB_SPEC     = 0x1,
+	MLX5_QPC_ATOMIC_MODE_ONLY_8B     = 0x2,
+	MLX5_QPC_ATOMIC_MODE_UP_TO_8B    = 0x3,
+	MLX5_QPC_ATOMIC_MODE_UP_TO_16B   = 0x4,
+	MLX5_QPC_ATOMIC_MODE_UP_TO_32B   = 0x5,
+	MLX5_QPC_ATOMIC_MODE_UP_TO_64B   = 0x6,
+	MLX5_QPC_ATOMIC_MODE_UP_TO_128B  = 0x7,
+	MLX5_QPC_ATOMIC_MODE_UP_TO_256B  = 0x8,
+};
+
+enum {
+	MLX5_QPC_CS_REQ_DISABLE    = 0x0,
+	MLX5_QPC_CS_REQ_UP_TO_32B  = 0x11,
+	MLX5_QPC_CS_REQ_UP_TO_64B  = 0x22,
+};
+
+enum {
+	MLX5_QPC_CS_RES_DISABLE    = 0x0,
+	MLX5_QPC_CS_RES_UP_TO_32B  = 0x1,
+	MLX5_QPC_CS_RES_UP_TO_64B  = 0x2,
+};
+
+struct mlx5_ifc_qpc_bits {
+	u8         state[0x4];
+	u8         lag_tx_port_affinity[0x4];
+	u8         st[0x8];
+	u8         reserved_at_10[0x3];
+	u8         pm_state[0x2];
+	u8         reserved_at_15[0x7];
+	u8         end_padding_mode[0x2];
+	u8         reserved_at_1e[0x2];
+
+	u8         wq_signature[0x1];
+	u8         block_lb_mc[0x1];
+	u8         atomic_like_write_en[0x1];
+	u8         latency_sensitive[0x1];
+	u8         reserved_at_24[0x1];
+	u8         drain_sigerr[0x1];
+	u8         reserved_at_26[0x2];
+	u8         pd[0x18];
+
+	u8         mtu[0x3];
+	u8         log_msg_max[0x5];
+	u8         reserved_at_48[0x1];
+	u8         log_rq_size[0x4];
+	u8         log_rq_stride[0x3];
+	u8         no_sq[0x1];
+	u8         log_sq_size[0x4];
+	u8         reserved_at_55[0x6];
+	u8         rlky[0x1];
+	u8         ulp_stateless_offload_mode[0x4];
+
+	u8         counter_set_id[0x8];
+	u8         uar_page[0x18];
+
+	u8         reserved_at_80[0x8];
+	u8         user_index[0x18];
+
+	u8         reserved_at_a0[0x3];
+	u8         log_page_size[0x5];
+	u8         remote_qpn[0x18];
+
+	struct mlx5_ifc_ads_bits primary_address_path;
+
+	struct mlx5_ifc_ads_bits secondary_address_path;
+
+	u8         log_ack_req_freq[0x4];
+	u8         reserved_at_384[0x4];
+	u8         log_sra_max[0x3];
+	u8         reserved_at_38b[0x2];
+	u8         retry_count[0x3];
+	u8         rnr_retry[0x3];
+	u8         reserved_at_393[0x1];
+	u8         fre[0x1];
+	u8         cur_rnr_retry[0x3];
+	u8         cur_retry_count[0x3];
+	u8         reserved_at_39b[0x5];
+
+	u8         reserved_at_3a0[0x20];
+
+	u8         reserved_at_3c0[0x8];
+	u8         next_send_psn[0x18];
+
+	u8         reserved_at_3e0[0x8];
+	u8         cqn_snd[0x18];
+
+	u8         reserved_at_400[0x8];
+	u8         deth_sqpn[0x18];
+
+	u8         reserved_at_420[0x20];
+
+	u8         reserved_at_440[0x8];
+	u8         last_acked_psn[0x18];
+
+	u8         reserved_at_460[0x8];
+	u8         ssn[0x18];
+
+	u8         reserved_at_480[0x8];
+	u8         log_rra_max[0x3];
+	u8         reserved_at_48b[0x1];
+	u8         atomic_mode[0x4];
+	u8         rre[0x1];
+	u8         rwe[0x1];
+	u8         rae[0x1];
+	u8         reserved_at_493[0x1];
+	u8         page_offset[0x6];
+	u8         reserved_at_49a[0x3];
+	u8         cd_slave_receive[0x1];
+	u8         cd_slave_send[0x1];
+	u8         cd_master[0x1];
+
+	u8         reserved_at_4a0[0x3];
+	u8         min_rnr_nak[0x5];
+	u8         next_rcv_psn[0x18];
+
+	u8         reserved_at_4c0[0x8];
+	u8         xrcd[0x18];
+
+	u8         reserved_at_4e0[0x8];
+	u8         cqn_rcv[0x18];
+
+	u8         dbr_addr[0x40];
+
+	u8         q_key[0x20];
+
+	u8         reserved_at_560[0x5];
+	u8         rq_type[0x3];
+	u8         srqn_rmpn_xrqn[0x18];
+
+	u8         reserved_at_580[0x8];
+	u8         rmsn[0x18];
+
+	u8         hw_sq_wqebb_counter[0x10];
+	u8         sw_sq_wqebb_counter[0x10];
+
+	u8         hw_rq_counter[0x20];
+
+	u8         sw_rq_counter[0x20];
+
+	u8         reserved_at_600[0x20];
+
+	u8         reserved_at_620[0xf];
+	u8         cgs[0x1];
+	u8         cs_req[0x8];
+	u8         cs_res[0x8];
+
+	u8         dc_access_key[0x40];
+
+	u8         reserved_at_680[0xc0];
+};
+
+struct mlx5_ifc_roce_addr_layout_bits {
+	u8         source_l3_address[16][0x8];
+
+	u8         reserved_at_80[0x3];
+	u8         vlan_valid[0x1];
+	u8         vlan_id[0xc];
+	u8         source_mac_47_32[0x10];
+
+	u8         source_mac_31_0[0x20];
+
+	u8         reserved_at_c0[0x14];
+	u8         roce_l3_type[0x4];
+	u8         roce_version[0x8];
+
+	u8         reserved_at_e0[0x20];
+};
+
+union mlx5_ifc_hca_cap_union_bits {
+	struct mlx5_ifc_cmd_hca_cap_bits cmd_hca_cap;
+	struct mlx5_ifc_odp_cap_bits odp_cap;
+	struct mlx5_ifc_atomic_caps_bits atomic_caps;
+	struct mlx5_ifc_roce_cap_bits roce_cap;
+	struct mlx5_ifc_per_protocol_networking_offload_caps_bits per_protocol_networking_offload_caps;
+	struct mlx5_ifc_flow_table_nic_cap_bits flow_table_nic_cap;
+	struct mlx5_ifc_flow_table_eswitch_cap_bits flow_table_eswitch_cap;
+	struct mlx5_ifc_e_switch_cap_bits e_switch_cap;
+	struct mlx5_ifc_vector_calc_cap_bits vector_calc_cap;
+	struct mlx5_ifc_qos_cap_bits qos_cap;
+	u8         reserved_at_0[0x8000];
+};
+
+enum {
+	MLX5_FLOW_CONTEXT_ACTION_ALLOW     = 0x1,
+	MLX5_FLOW_CONTEXT_ACTION_DROP      = 0x2,
+	MLX5_FLOW_CONTEXT_ACTION_FWD_DEST  = 0x4,
+	MLX5_FLOW_CONTEXT_ACTION_COUNT     = 0x8,
+	MLX5_FLOW_CONTEXT_ACTION_ENCAP     = 0x10,
+	MLX5_FLOW_CONTEXT_ACTION_DECAP     = 0x20,
+};
+
+struct mlx5_ifc_flow_context_bits {
+	u8         reserved_at_0[0x20];
+
+	u8         group_id[0x20];
+
+	u8         reserved_at_40[0x8];
+	u8         flow_tag[0x18];
+
+	u8         reserved_at_60[0x10];
+	u8         action[0x10];
+
+	u8         reserved_at_80[0x8];
+	u8         destination_list_size[0x18];
+
+	u8         reserved_at_a0[0x8];
+	u8         flow_counter_list_size[0x18];
+
+	u8         encap_id[0x20];
+
+	u8         reserved_at_e0[0x120];
+
+	struct mlx5_ifc_fte_match_param_bits match_value;
+
+	u8         reserved_at_1200[0x600];
+
+	union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits destination[0];
+};
+
+enum {
+	MLX5_XRC_SRQC_STATE_GOOD   = 0x0,
+	MLX5_XRC_SRQC_STATE_ERROR  = 0x1,
+};
+
+struct mlx5_ifc_xrc_srqc_bits {
+	u8         state[0x4];
+	u8         log_xrc_srq_size[0x4];
+	u8         reserved_at_8[0x18];
+
+	u8         wq_signature[0x1];
+	u8         cont_srq[0x1];
+	u8         reserved_at_22[0x1];
+	u8         rlky[0x1];
+	u8         basic_cyclic_rcv_wqe[0x1];
+	u8         log_rq_stride[0x3];
+	u8         xrcd[0x18];
+
+	u8         page_offset[0x6];
+	u8         reserved_at_46[0x2];
+	u8         cqn[0x18];
+
+	u8         reserved_at_60[0x20];
+
+	u8         user_index_equal_xrc_srqn[0x1];
+	u8         reserved_at_81[0x1];
+	u8         log_page_size[0x6];
+	u8         user_index[0x18];
+
+	u8         reserved_at_a0[0x20];
+
+	u8         reserved_at_c0[0x8];
+	u8         pd[0x18];
+
+	u8         lwm[0x10];
+	u8         wqe_cnt[0x10];
+
+	u8         reserved_at_100[0x40];
+
+	u8         db_record_addr_h[0x20];
+
+	u8         db_record_addr_l[0x1e];
+	u8         reserved_at_17e[0x2];
+
+	u8         reserved_at_180[0x80];
+};
+
+struct mlx5_ifc_traffic_counter_bits {
+	u8         packets[0x40];
+
+	u8         octets[0x40];
+};
+
+struct mlx5_ifc_tisc_bits {
+	u8         strict_lag_tx_port_affinity[0x1];
+	u8         reserved_at_1[0x3];
+	u8         lag_tx_port_affinity[0x04];
+
+	u8         reserved_at_8[0x4];
+	u8         prio[0x4];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x100];
+
+	u8         reserved_at_120[0x8];
+	u8         transport_domain[0x18];
+
+	u8         reserved_at_140[0x3c0];
+};
+
+enum {
+	MLX5_TIRC_DISP_TYPE_DIRECT    = 0x0,
+	MLX5_TIRC_DISP_TYPE_INDIRECT  = 0x1,
+};
+
+enum {
+	MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO  = 0x1,
+	MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO  = 0x2,
+};
+
+enum {
+	MLX5_RX_HASH_FN_NONE           = 0x0,
+	MLX5_RX_HASH_FN_INVERTED_XOR8  = 0x1,
+	MLX5_RX_HASH_FN_TOEPLITZ       = 0x2,
+};
+
+enum {
+	MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST_    = 0x1,
+	MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST_  = 0x2,
+};
+
+struct mlx5_ifc_tirc_bits {
+	u8         reserved_at_0[0x20];
+
+	u8         disp_type[0x4];
+	u8         reserved_at_24[0x1c];
+
+	u8         reserved_at_40[0x40];
+
+	u8         reserved_at_80[0x4];
+	u8         lro_timeout_period_usecs[0x10];
+	u8         lro_enable_mask[0x4];
+	u8         lro_max_ip_payload_size[0x8];
+
+	u8         reserved_at_a0[0x40];
+
+	u8         reserved_at_e0[0x8];
+	u8         inline_rqn[0x18];
+
+	u8         rx_hash_symmetric[0x1];
+	u8         reserved_at_101[0x1];
+	u8         tunneled_offload_en[0x1];
+	u8         reserved_at_103[0x5];
+	u8         indirect_table[0x18];
+
+	u8         rx_hash_fn[0x4];
+	u8         reserved_at_124[0x2];
+	u8         self_lb_block[0x2];
+	u8         transport_domain[0x18];
+
+	u8         rx_hash_toeplitz_key[10][0x20];
+
+	struct mlx5_ifc_rx_hash_field_select_bits rx_hash_field_selector_outer;
+
+	struct mlx5_ifc_rx_hash_field_select_bits rx_hash_field_selector_inner;
+
+	u8         reserved_at_2c0[0x4c0];
+};
+
+enum {
+	MLX5_SRQC_STATE_GOOD   = 0x0,
+	MLX5_SRQC_STATE_ERROR  = 0x1,
+};
+
+struct mlx5_ifc_srqc_bits {
+	u8         state[0x4];
+	u8         log_srq_size[0x4];
+	u8         reserved_at_8[0x18];
+
+	u8         wq_signature[0x1];
+	u8         cont_srq[0x1];
+	u8         reserved_at_22[0x1];
+	u8         rlky[0x1];
+	u8         reserved_at_24[0x1];
+	u8         log_rq_stride[0x3];
+	u8         xrcd[0x18];
+
+	u8         page_offset[0x6];
+	u8         reserved_at_46[0x2];
+	u8         cqn[0x18];
+
+	u8         reserved_at_60[0x20];
+
+	u8         reserved_at_80[0x2];
+	u8         log_page_size[0x6];
+	u8         reserved_at_88[0x18];
+
+	u8         reserved_at_a0[0x20];
+
+	u8         reserved_at_c0[0x8];
+	u8         pd[0x18];
+
+	u8         lwm[0x10];
+	u8         wqe_cnt[0x10];
+
+	u8         reserved_at_100[0x40];
+
+	u8         dbr_addr[0x40];
+
+	u8         reserved_at_180[0x80];
+};
+
+enum {
+	MLX5_SQC_STATE_RST  = 0x0,
+	MLX5_SQC_STATE_RDY  = 0x1,
+	MLX5_SQC_STATE_ERR  = 0x3,
+};
+
+struct mlx5_ifc_sqc_bits {
+	u8         rlky[0x1];
+	u8         cd_master[0x1];
+	u8         fre[0x1];
+	u8         flush_in_error_en[0x1];
+	u8         reserved_at_4[0x1];
+	u8	   min_wqe_inline_mode[0x3];
+	u8         state[0x4];
+	u8         reg_umr[0x1];
+	u8         reserved_at_d[0x13];
+
+	u8         reserved_at_20[0x8];
+	u8         user_index[0x18];
+
+	u8         reserved_at_40[0x8];
+	u8         cqn[0x18];
+
+	u8         reserved_at_60[0x90];
+
+	u8         packet_pacing_rate_limit_index[0x10];
+	u8         tis_lst_sz[0x10];
+	u8         reserved_at_110[0x10];
+
+	u8         reserved_at_120[0x40];
+
+	u8         reserved_at_160[0x8];
+	u8         tis_num_0[0x18];
+
+	struct mlx5_ifc_wq_bits wq;
+};
+
+enum {
+	SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR = 0x0,
+	SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT = 0x1,
+	SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC = 0x2,
+	SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC = 0x3,
+};
+
+struct mlx5_ifc_scheduling_context_bits {
+	u8         element_type[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         element_attributes[0x20];
+
+	u8         parent_element_id[0x20];
+
+	u8         reserved_at_60[0x40];
+
+	u8         bw_share[0x20];
+
+	u8         max_average_bw[0x20];
+
+	u8         reserved_at_e0[0x120];
+};
+
+struct mlx5_ifc_rqtc_bits {
+	u8         reserved_at_0[0xa0];
+
+	u8         reserved_at_a0[0x10];
+	u8         rqt_max_size[0x10];
+
+	u8         reserved_at_c0[0x10];
+	u8         rqt_actual_size[0x10];
+
+	u8         reserved_at_e0[0x6a0];
+
+	struct mlx5_ifc_rq_num_bits rq_num[0];
+};
+
+enum {
+	MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE  = 0x0,
+	MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_RMP     = 0x1,
+};
+
+enum {
+	MLX5_RQC_STATE_RST  = 0x0,
+	MLX5_RQC_STATE_RDY  = 0x1,
+	MLX5_RQC_STATE_ERR  = 0x3,
+};
+
+struct mlx5_ifc_rqc_bits {
+	u8         rlky[0x1];
+	u8         reserved_at_1[0x1];
+	u8         scatter_fcs[0x1];
+	u8         vsd[0x1];
+	u8         mem_rq_type[0x4];
+	u8         state[0x4];
+	u8         reserved_at_c[0x1];
+	u8         flush_in_error_en[0x1];
+	u8         reserved_at_e[0x12];
+
+	u8         reserved_at_20[0x8];
+	u8         user_index[0x18];
+
+	u8         reserved_at_40[0x8];
+	u8         cqn[0x18];
+
+	u8         counter_set_id[0x8];
+	u8         reserved_at_68[0x18];
+
+	u8         reserved_at_80[0x8];
+	u8         rmpn[0x18];
+
+	u8         reserved_at_a0[0xe0];
+
+	struct mlx5_ifc_wq_bits wq;
+};
+
+enum {
+	MLX5_RMPC_STATE_RDY  = 0x1,
+	MLX5_RMPC_STATE_ERR  = 0x3,
+};
+
+struct mlx5_ifc_rmpc_bits {
+	u8         reserved_at_0[0x8];
+	u8         state[0x4];
+	u8         reserved_at_c[0x14];
+
+	u8         basic_cyclic_rcv_wqe[0x1];
+	u8         reserved_at_21[0x1f];
+
+	u8         reserved_at_40[0x140];
+
+	struct mlx5_ifc_wq_bits wq;
+};
+
+struct mlx5_ifc_nic_vport_context_bits {
+	u8         reserved_at_0[0x5];
+	u8         min_wqe_inline_mode[0x3];
+	u8         reserved_at_8[0x17];
+	u8         roce_en[0x1];
+
+	u8         arm_change_event[0x1];
+	u8         reserved_at_21[0x1a];
+	u8         event_on_mtu[0x1];
+	u8         event_on_promisc_change[0x1];
+	u8         event_on_vlan_change[0x1];
+	u8         event_on_mc_address_change[0x1];
+	u8         event_on_uc_address_change[0x1];
+
+	u8         reserved_at_40[0xf0];
+
+	u8         mtu[0x10];
+
+	u8         system_image_guid[0x40];
+	u8         port_guid[0x40];
+	u8         node_guid[0x40];
+
+	u8         reserved_at_200[0x140];
+	u8         qkey_violation_counter[0x10];
+	u8         reserved_at_350[0x430];
+
+	u8         promisc_uc[0x1];
+	u8         promisc_mc[0x1];
+	u8         promisc_all[0x1];
+	u8         reserved_at_783[0x2];
+	u8         allowed_list_type[0x3];
+	u8         reserved_at_788[0xc];
+	u8         allowed_list_size[0xc];
+
+	struct mlx5_ifc_mac_address_layout_bits permanent_address;
+
+	u8         reserved_at_7e0[0x20];
+
+	u8         current_uc_mac_address[0][0x40];
+};
+
+enum {
+	MLX5_MKC_ACCESS_MODE_PA    = 0x0,
+	MLX5_MKC_ACCESS_MODE_MTT   = 0x1,
+	MLX5_MKC_ACCESS_MODE_KLMS  = 0x2,
+	MLX5_MKC_ACCESS_MODE_KSM   = 0x3,
+};
+
+struct mlx5_ifc_mkc_bits {
+	u8         reserved_at_0[0x1];
+	u8         free[0x1];
+	u8         reserved_at_2[0xd];
+	u8         small_fence_on_rdma_read_response[0x1];
+	u8         umr_en[0x1];
+	u8         a[0x1];
+	u8         rw[0x1];
+	u8         rr[0x1];
+	u8         lw[0x1];
+	u8         lr[0x1];
+	u8         access_mode[0x2];
+	u8         reserved_at_18[0x8];
+
+	u8         qpn[0x18];
+	u8         mkey_7_0[0x8];
+
+	u8         reserved_at_40[0x20];
+
+	u8         length64[0x1];
+	u8         bsf_en[0x1];
+	u8         sync_umr[0x1];
+	u8         reserved_at_63[0x2];
+	u8         expected_sigerr_count[0x1];
+	u8         reserved_at_66[0x1];
+	u8         en_rinval[0x1];
+	u8         pd[0x18];
+
+	u8         start_addr[0x40];
+
+	u8         len[0x40];
+
+	u8         bsf_octword_size[0x20];
+
+	u8         reserved_at_120[0x80];
+
+	u8         translations_octword_size[0x20];
+
+	u8         reserved_at_1c0[0x1b];
+	u8         log_page_size[0x5];
+
+	u8         reserved_at_1e0[0x20];
+};
+
+struct mlx5_ifc_pkey_bits {
+	u8         reserved_at_0[0x10];
+	u8         pkey[0x10];
+};
+
+struct mlx5_ifc_array128_auto_bits {
+	u8         array128_auto[16][0x8];
+};
+
+struct mlx5_ifc_hca_vport_context_bits {
+	u8         field_select[0x20];
+
+	u8         reserved_at_20[0xe0];
+
+	u8         sm_virt_aware[0x1];
+	u8         has_smi[0x1];
+	u8         has_raw[0x1];
+	u8         grh_required[0x1];
+	u8         reserved_at_104[0xc];
+	u8         port_physical_state[0x4];
+	u8         vport_state_policy[0x4];
+	u8         port_state[0x4];
+	u8         vport_state[0x4];
+
+	u8         reserved_at_120[0x20];
+
+	u8         system_image_guid[0x40];
+
+	u8         port_guid[0x40];
+
+	u8         node_guid[0x40];
+
+	u8         cap_mask1[0x20];
+
+	u8         cap_mask1_field_select[0x20];
+
+	u8         cap_mask2[0x20];
+
+	u8         cap_mask2_field_select[0x20];
+
+	u8         reserved_at_280[0x80];
+
+	u8         lid[0x10];
+	u8         reserved_at_310[0x4];
+	u8         init_type_reply[0x4];
+	u8         lmc[0x3];
+	u8         subnet_timeout[0x5];
+
+	u8         sm_lid[0x10];
+	u8         sm_sl[0x4];
+	u8         reserved_at_334[0xc];
+
+	u8         qkey_violation_counter[0x10];
+	u8         pkey_violation_counter[0x10];
+
+	u8         reserved_at_360[0xca0];
+};
+
+struct mlx5_ifc_esw_vport_context_bits {
+	u8         reserved_at_0[0x3];
+	u8         vport_svlan_strip[0x1];
+	u8         vport_cvlan_strip[0x1];
+	u8         vport_svlan_insert[0x1];
+	u8         vport_cvlan_insert[0x2];
+	u8         reserved_at_8[0x18];
+
+	u8         reserved_at_20[0x20];
+
+	u8         svlan_cfi[0x1];
+	u8         svlan_pcp[0x3];
+	u8         svlan_id[0xc];
+	u8         cvlan_cfi[0x1];
+	u8         cvlan_pcp[0x3];
+	u8         cvlan_id[0xc];
+
+	u8         reserved_at_60[0x7a0];
+};
+
+enum {
+	MLX5_EQC_STATUS_OK                = 0x0,
+	MLX5_EQC_STATUS_EQ_WRITE_FAILURE  = 0xa,
+};
+
+enum {
+	MLX5_EQC_ST_ARMED  = 0x9,
+	MLX5_EQC_ST_FIRED  = 0xa,
+};
+
+struct mlx5_ifc_eqc_bits {
+	u8         status[0x4];
+	u8         reserved_at_4[0x9];
+	u8         ec[0x1];
+	u8         oi[0x1];
+	u8         reserved_at_f[0x5];
+	u8         st[0x4];
+	u8         reserved_at_18[0x8];
+
+	u8         reserved_at_20[0x20];
+
+	u8         reserved_at_40[0x14];
+	u8         page_offset[0x6];
+	u8         reserved_at_5a[0x6];
+
+	u8         reserved_at_60[0x3];
+	u8         log_eq_size[0x5];
+	u8         uar_page[0x18];
+
+	u8         reserved_at_80[0x20];
+
+	u8         reserved_at_a0[0x18];
+	u8         intr[0x8];
+
+	u8         reserved_at_c0[0x3];
+	u8         log_page_size[0x5];
+	u8         reserved_at_c8[0x18];
+
+	u8         reserved_at_e0[0x60];
+
+	u8         reserved_at_140[0x8];
+	u8         consumer_counter[0x18];
+
+	u8         reserved_at_160[0x8];
+	u8         producer_counter[0x18];
+
+	u8         reserved_at_180[0x80];
+};
+
+enum {
+	MLX5_DCTC_STATE_ACTIVE    = 0x0,
+	MLX5_DCTC_STATE_DRAINING  = 0x1,
+	MLX5_DCTC_STATE_DRAINED   = 0x2,
+};
+
+enum {
+	MLX5_DCTC_CS_RES_DISABLE    = 0x0,
+	MLX5_DCTC_CS_RES_NA         = 0x1,
+	MLX5_DCTC_CS_RES_UP_TO_64B  = 0x2,
+};
+
+enum {
+	MLX5_DCTC_MTU_256_BYTES  = 0x1,
+	MLX5_DCTC_MTU_512_BYTES  = 0x2,
+	MLX5_DCTC_MTU_1K_BYTES   = 0x3,
+	MLX5_DCTC_MTU_2K_BYTES   = 0x4,
+	MLX5_DCTC_MTU_4K_BYTES   = 0x5,
+};
+
+struct mlx5_ifc_dctc_bits {
+	u8         reserved_at_0[0x4];
+	u8         state[0x4];
+	u8         reserved_at_8[0x18];
+
+	u8         reserved_at_20[0x8];
+	u8         user_index[0x18];
+
+	u8         reserved_at_40[0x8];
+	u8         cqn[0x18];
+
+	u8         counter_set_id[0x8];
+	u8         atomic_mode[0x4];
+	u8         rre[0x1];
+	u8         rwe[0x1];
+	u8         rae[0x1];
+	u8         atomic_like_write_en[0x1];
+	u8         latency_sensitive[0x1];
+	u8         rlky[0x1];
+	u8         free_ar[0x1];
+	u8         reserved_at_73[0xd];
+
+	u8         reserved_at_80[0x8];
+	u8         cs_res[0x8];
+	u8         reserved_at_90[0x3];
+	u8         min_rnr_nak[0x5];
+	u8         reserved_at_98[0x8];
+
+	u8         reserved_at_a0[0x8];
+	u8         srqn_xrqn[0x18];
+
+	u8         reserved_at_c0[0x8];
+	u8         pd[0x18];
+
+	u8         tclass[0x8];
+	u8         reserved_at_e8[0x4];
+	u8         flow_label[0x14];
+
+	u8         dc_access_key[0x40];
+
+	u8         reserved_at_140[0x5];
+	u8         mtu[0x3];
+	u8         port[0x8];
+	u8         pkey_index[0x10];
+
+	u8         reserved_at_160[0x8];
+	u8         my_addr_index[0x8];
+	u8         reserved_at_170[0x8];
+	u8         hop_limit[0x8];
+
+	u8         dc_access_key_violation_count[0x20];
+
+	u8         reserved_at_1a0[0x14];
+	u8         dei_cfi[0x1];
+	u8         eth_prio[0x3];
+	u8         ecn[0x2];
+	u8         dscp[0x6];
+
+	u8         reserved_at_1c0[0x40];
+};
+
+enum {
+	MLX5_CQC_STATUS_OK             = 0x0,
+	MLX5_CQC_STATUS_CQ_OVERFLOW    = 0x9,
+	MLX5_CQC_STATUS_CQ_WRITE_FAIL  = 0xa,
+};
+
+enum {
+	MLX5_CQC_CQE_SZ_64_BYTES   = 0x0,
+	MLX5_CQC_CQE_SZ_128_BYTES  = 0x1,
+};
+
+enum {
+	MLX5_CQC_ST_SOLICITED_NOTIFICATION_REQUEST_ARMED  = 0x6,
+	MLX5_CQC_ST_NOTIFICATION_REQUEST_ARMED            = 0x9,
+	MLX5_CQC_ST_FIRED                                 = 0xa,
+};
+
+enum {
+	MLX5_CQ_PERIOD_MODE_START_FROM_EQE = 0x0,
+	MLX5_CQ_PERIOD_MODE_START_FROM_CQE = 0x1,
+	MLX5_CQ_PERIOD_NUM_MODES
+};
+
+struct mlx5_ifc_cqc_bits {
+	u8         status[0x4];
+	u8         reserved_at_4[0x4];
+	u8         cqe_sz[0x3];
+	u8         cc[0x1];
+	u8         reserved_at_c[0x1];
+	u8         scqe_break_moderation_en[0x1];
+	u8         oi[0x1];
+	u8         cq_period_mode[0x2];
+	u8         cqe_comp_en[0x1];
+	u8         mini_cqe_res_format[0x2];
+	u8         st[0x4];
+	u8         reserved_at_18[0x8];
+
+	u8         reserved_at_20[0x20];
+
+	u8         reserved_at_40[0x14];
+	u8         page_offset[0x6];
+	u8         reserved_at_5a[0x6];
+
+	u8         reserved_at_60[0x3];
+	u8         log_cq_size[0x5];
+	u8         uar_page[0x18];
+
+	u8         reserved_at_80[0x4];
+	u8         cq_period[0xc];
+	u8         cq_max_count[0x10];
+
+	u8         reserved_at_a0[0x18];
+	u8         c_eqn[0x8];
+
+	u8         reserved_at_c0[0x3];
+	u8         log_page_size[0x5];
+	u8         reserved_at_c8[0x18];
+
+	u8         reserved_at_e0[0x20];
+
+	u8         reserved_at_100[0x8];
+	u8         last_notified_index[0x18];
+
+	u8         reserved_at_120[0x8];
+	u8         last_solicit_index[0x18];
+
+	u8         reserved_at_140[0x8];
+	u8         consumer_counter[0x18];
+
+	u8         reserved_at_160[0x8];
+	u8         producer_counter[0x18];
+
+	u8         reserved_at_180[0x40];
+
+	u8         dbr_addr[0x40];
+};
+
+union mlx5_ifc_cong_control_roce_ecn_auto_bits {
+	struct mlx5_ifc_cong_control_802_1qau_rp_bits cong_control_802_1qau_rp;
+	struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits cong_control_r_roce_ecn_rp;
+	struct mlx5_ifc_cong_control_r_roce_ecn_np_bits cong_control_r_roce_ecn_np;
+	u8         reserved_at_0[0x800];
+};
+
+struct mlx5_ifc_query_adapter_param_block_bits {
+	u8         reserved_at_0[0xc0];
+
+	u8         reserved_at_c0[0x8];
+	u8         ieee_vendor_id[0x18];
+
+	u8         reserved_at_e0[0x10];
+	u8         vsd_vendor_id[0x10];
+
+	u8         vsd[208][0x8];
+
+	u8         vsd_contd_psid[16][0x8];
+};
+
+enum {
+	MLX5_XRQC_STATE_GOOD   = 0x0,
+	MLX5_XRQC_STATE_ERROR  = 0x1,
+};
+
+enum {
+	MLX5_XRQC_TOPOLOGY_NO_SPECIAL_TOPOLOGY = 0x0,
+	MLX5_XRQC_TOPOLOGY_TAG_MATCHING        = 0x1,
+};
+
+enum {
+	MLX5_XRQC_OFFLOAD_RNDV = 0x1,
+};
+
+struct mlx5_ifc_tag_matching_topology_context_bits {
+	u8         log_matching_list_sz[0x4];
+	u8         reserved_at_4[0xc];
+	u8         append_next_index[0x10];
+
+	u8         sw_phase_cnt[0x10];
+	u8         hw_phase_cnt[0x10];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_xrqc_bits {
+	u8         state[0x4];
+	u8         rlkey[0x1];
+	u8         reserved_at_5[0xf];
+	u8         topology[0x4];
+	u8         reserved_at_18[0x4];
+	u8         offload[0x4];
+
+	u8         reserved_at_20[0x8];
+	u8         user_index[0x18];
+
+	u8         reserved_at_40[0x8];
+	u8         cqn[0x18];
+
+	u8         reserved_at_60[0xa0];
+
+	struct mlx5_ifc_tag_matching_topology_context_bits tag_matching_topology_context;
+
+	u8         reserved_at_180[0x880];
+
+	struct mlx5_ifc_wq_bits wq;
+};
+
+union mlx5_ifc_modify_field_select_resize_field_select_auto_bits {
+	struct mlx5_ifc_modify_field_select_bits modify_field_select;
+	struct mlx5_ifc_resize_field_select_bits resize_field_select;
+	u8         reserved_at_0[0x20];
+};
+
+union mlx5_ifc_field_select_802_1_r_roce_auto_bits {
+	struct mlx5_ifc_field_select_802_1qau_rp_bits field_select_802_1qau_rp;
+	struct mlx5_ifc_field_select_r_roce_rp_bits field_select_r_roce_rp;
+	struct mlx5_ifc_field_select_r_roce_np_bits field_select_r_roce_np;
+	u8         reserved_at_0[0x20];
+};
+
+union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits {
+	struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits eth_802_3_cntrs_grp_data_layout;
+	struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits eth_2863_cntrs_grp_data_layout;
+	struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout;
+	struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits eth_3635_cntrs_grp_data_layout;
+	struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits eth_extended_cntrs_grp_data_layout;
+	struct mlx5_ifc_eth_per_prio_grp_data_layout_bits eth_per_prio_grp_data_layout;
+	struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits eth_per_traffic_grp_data_layout;
+	struct mlx5_ifc_ib_port_cntrs_grp_data_layout_bits ib_port_cntrs_grp_data_layout;
+	struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs;
+	u8         reserved_at_0[0x7c0];
+};
+
+union mlx5_ifc_pcie_cntrs_grp_data_layout_auto_bits {
+	struct mlx5_ifc_pcie_perf_cntrs_grp_data_layout_bits pcie_perf_cntrs_grp_data_layout;
+	struct mlx5_ifc_pcie_tas_cntrs_grp_data_layout_bits pcie_tas_cntrs_grp_data_layout;
+	u8         reserved_at_0[0x7c0];
+};
+
+union mlx5_ifc_event_auto_bits {
+	struct mlx5_ifc_comp_event_bits comp_event;
+	struct mlx5_ifc_dct_events_bits dct_events;
+	struct mlx5_ifc_qp_events_bits qp_events;
+	struct mlx5_ifc_wqe_associated_page_fault_event_bits wqe_associated_page_fault_event;
+	struct mlx5_ifc_rdma_page_fault_event_bits rdma_page_fault_event;
+	struct mlx5_ifc_cq_error_bits cq_error;
+	struct mlx5_ifc_dropped_packet_logged_bits dropped_packet_logged;
+	struct mlx5_ifc_port_state_change_event_bits port_state_change_event;
+	struct mlx5_ifc_gpio_event_bits gpio_event;
+	struct mlx5_ifc_db_bf_congestion_event_bits db_bf_congestion_event;
+	struct mlx5_ifc_stall_vl_event_bits stall_vl_event;
+	struct mlx5_ifc_cmd_inter_comp_event_bits cmd_inter_comp_event;
+	u8         reserved_at_0[0xe0];
+};
+
+struct mlx5_ifc_health_buffer_bits {
+	u8         reserved_at_0[0x100];
+
+	u8         assert_existptr[0x20];
+
+	u8         assert_callra[0x20];
+
+	u8         reserved_at_140[0x40];
+
+	u8         fw_version[0x20];
+
+	u8         hw_id[0x20];
+
+	u8         reserved_at_1c0[0x20];
+
+	u8         irisc_index[0x8];
+	u8         synd[0x8];
+	u8         ext_synd[0x10];
+};
+
+struct mlx5_ifc_register_loopback_control_bits {
+	u8         no_lb[0x1];
+	u8         reserved_at_1[0x7];
+	u8         port[0x8];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x60];
+};
+
+struct mlx5_ifc_vport_tc_element_bits {
+	u8         traffic_class[0x4];
+	u8         reserved_at_4[0xc];
+	u8         vport_number[0x10];
+};
+
+struct mlx5_ifc_vport_element_bits {
+	u8         reserved_at_0[0x10];
+	u8         vport_number[0x10];
+};
+
+enum {
+	TSAR_ELEMENT_TSAR_TYPE_DWRR = 0x0,
+	TSAR_ELEMENT_TSAR_TYPE_ROUND_ROBIN = 0x1,
+	TSAR_ELEMENT_TSAR_TYPE_ETS = 0x2,
+};
+
+struct mlx5_ifc_tsar_element_bits {
+	u8         reserved_at_0[0x8];
+	u8         tsar_type[0x8];
+	u8         reserved_at_10[0x10];
+};
+
+struct mlx5_ifc_teardown_hca_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+enum {
+	MLX5_TEARDOWN_HCA_IN_PROFILE_GRACEFUL_CLOSE  = 0x0,
+	MLX5_TEARDOWN_HCA_IN_PROFILE_PANIC_CLOSE     = 0x1,
+};
+
+struct mlx5_ifc_teardown_hca_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x10];
+	u8         profile[0x10];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_sqerr2rts_qp_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_sqerr2rts_qp_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x8];
+	u8         qpn[0x18];
+
+	u8         reserved_at_60[0x20];
+
+	u8         opt_param_mask[0x20];
+
+	u8         reserved_at_a0[0x20];
+
+	struct mlx5_ifc_qpc_bits qpc;
+
+	u8         reserved_at_800[0x80];
+};
+
+struct mlx5_ifc_sqd2rts_qp_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_sqd2rts_qp_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x8];
+	u8         qpn[0x18];
+
+	u8         reserved_at_60[0x20];
+
+	u8         opt_param_mask[0x20];
+
+	u8         reserved_at_a0[0x20];
+
+	struct mlx5_ifc_qpc_bits qpc;
+
+	u8         reserved_at_800[0x80];
+};
+
+struct mlx5_ifc_set_roce_address_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_set_roce_address_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         roce_address_index[0x10];
+	u8         reserved_at_50[0x10];
+
+	u8         reserved_at_60[0x20];
+
+	struct mlx5_ifc_roce_addr_layout_bits roce_address;
+};
+
+struct mlx5_ifc_set_mad_demux_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+enum {
+	MLX5_SET_MAD_DEMUX_IN_DEMUX_MODE_PASS_ALL   = 0x0,
+	MLX5_SET_MAD_DEMUX_IN_DEMUX_MODE_SELECTIVE  = 0x2,
+};
+
+struct mlx5_ifc_set_mad_demux_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x20];
+
+	u8         reserved_at_60[0x6];
+	u8         demux_mode[0x2];
+	u8         reserved_at_68[0x18];
+};
+
+struct mlx5_ifc_set_l2_table_entry_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_set_l2_table_entry_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x60];
+
+	u8         reserved_at_a0[0x8];
+	u8         table_index[0x18];
+
+	u8         reserved_at_c0[0x20];
+
+	u8         reserved_at_e0[0x13];
+	u8         vlan_valid[0x1];
+	u8         vlan[0xc];
+
+	struct mlx5_ifc_mac_address_layout_bits mac_address;
+
+	u8         reserved_at_140[0xc0];
+};
+
+struct mlx5_ifc_set_issi_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_set_issi_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x10];
+	u8         current_issi[0x10];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_set_hca_cap_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_set_hca_cap_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x40];
+
+	union mlx5_ifc_hca_cap_union_bits capability;
+};
+
+enum {
+	MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION    = 0x0,
+	MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_TAG  = 0x1,
+	MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST    = 0x2,
+	MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS    = 0x3
+};
+
+struct mlx5_ifc_set_fte_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_set_fte_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         other_vport[0x1];
+	u8         reserved_at_41[0xf];
+	u8         vport_number[0x10];
+
+	u8         reserved_at_60[0x20];
+
+	u8         table_type[0x8];
+	u8         reserved_at_88[0x18];
+
+	u8         reserved_at_a0[0x8];
+	u8         table_id[0x18];
+
+	u8         reserved_at_c0[0x18];
+	u8         modify_enable_mask[0x8];
+
+	u8         reserved_at_e0[0x20];
+
+	u8         flow_index[0x20];
+
+	u8         reserved_at_120[0xe0];
+
+	struct mlx5_ifc_flow_context_bits flow_context;
+};
+
+struct mlx5_ifc_rts2rts_qp_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_rts2rts_qp_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x8];
+	u8         qpn[0x18];
+
+	u8         reserved_at_60[0x20];
+
+	u8         opt_param_mask[0x20];
+
+	u8         reserved_at_a0[0x20];
+
+	struct mlx5_ifc_qpc_bits qpc;
+
+	u8         reserved_at_800[0x80];
+};
+
+struct mlx5_ifc_rtr2rts_qp_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_rtr2rts_qp_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x8];
+	u8         qpn[0x18];
+
+	u8         reserved_at_60[0x20];
+
+	u8         opt_param_mask[0x20];
+
+	u8         reserved_at_a0[0x20];
+
+	struct mlx5_ifc_qpc_bits qpc;
+
+	u8         reserved_at_800[0x80];
+};
+
+struct mlx5_ifc_rst2init_qp_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_rst2init_qp_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x8];
+	u8         qpn[0x18];
+
+	u8         reserved_at_60[0x20];
+
+	u8         opt_param_mask[0x20];
+
+	u8         reserved_at_a0[0x20];
+
+	struct mlx5_ifc_qpc_bits qpc;
+
+	u8         reserved_at_800[0x80];
+};
+
+struct mlx5_ifc_query_xrq_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+
+	struct mlx5_ifc_xrqc_bits xrq_context;
+};
+
+struct mlx5_ifc_query_xrq_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x8];
+	u8         xrqn[0x18];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_query_xrc_srq_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+
+	struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry;
+
+	u8         reserved_at_280[0x600];
+
+	u8         pas[0][0x40];
+};
+
+struct mlx5_ifc_query_xrc_srq_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x8];
+	u8         xrc_srqn[0x18];
+
+	u8         reserved_at_60[0x20];
+};
+
+enum {
+	MLX5_QUERY_VPORT_STATE_OUT_STATE_DOWN  = 0x0,
+	MLX5_QUERY_VPORT_STATE_OUT_STATE_UP    = 0x1,
+};
+
+struct mlx5_ifc_query_vport_state_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x20];
+
+	u8         reserved_at_60[0x18];
+	u8         admin_state[0x4];
+	u8         state[0x4];
+};
+
+enum {
+	MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT  = 0x0,
+	MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT   = 0x1,
+};
+
+struct mlx5_ifc_query_vport_state_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         other_vport[0x1];
+	u8         reserved_at_41[0xf];
+	u8         vport_number[0x10];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_query_vport_counter_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+
+	struct mlx5_ifc_traffic_counter_bits received_errors;
+
+	struct mlx5_ifc_traffic_counter_bits transmit_errors;
+
+	struct mlx5_ifc_traffic_counter_bits received_ib_unicast;
+
+	struct mlx5_ifc_traffic_counter_bits transmitted_ib_unicast;
+
+	struct mlx5_ifc_traffic_counter_bits received_ib_multicast;
+
+	struct mlx5_ifc_traffic_counter_bits transmitted_ib_multicast;
+
+	struct mlx5_ifc_traffic_counter_bits received_eth_broadcast;
+
+	struct mlx5_ifc_traffic_counter_bits transmitted_eth_broadcast;
+
+	struct mlx5_ifc_traffic_counter_bits received_eth_unicast;
+
+	struct mlx5_ifc_traffic_counter_bits transmitted_eth_unicast;
+
+	struct mlx5_ifc_traffic_counter_bits received_eth_multicast;
+
+	struct mlx5_ifc_traffic_counter_bits transmitted_eth_multicast;
+
+	u8         reserved_at_680[0xa00];
+};
+
+enum {
+	MLX5_QUERY_VPORT_COUNTER_IN_OP_MOD_VPORT_COUNTERS  = 0x0,
+};
+
+struct mlx5_ifc_query_vport_counter_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         other_vport[0x1];
+	u8         reserved_at_41[0xb];
+	u8	   port_num[0x4];
+	u8         vport_number[0x10];
+
+	u8         reserved_at_60[0x60];
+
+	u8         clear[0x1];
+	u8         reserved_at_c1[0x1f];
+
+	u8         reserved_at_e0[0x20];
+};
+
+struct mlx5_ifc_query_tis_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+
+	struct mlx5_ifc_tisc_bits tis_context;
+};
+
+struct mlx5_ifc_query_tis_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x8];
+	u8         tisn[0x18];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_query_tir_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0xc0];
+
+	struct mlx5_ifc_tirc_bits tir_context;
+};
+
+struct mlx5_ifc_query_tir_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x8];
+	u8         tirn[0x18];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_query_srq_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+
+	struct mlx5_ifc_srqc_bits srq_context_entry;
+
+	u8         reserved_at_280[0x600];
+
+	u8         pas[0][0x40];
+};
+
+struct mlx5_ifc_query_srq_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x8];
+	u8         srqn[0x18];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_query_sq_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0xc0];
+
+	struct mlx5_ifc_sqc_bits sq_context;
+};
+
+struct mlx5_ifc_query_sq_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x8];
+	u8         sqn[0x18];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_query_special_contexts_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         dump_fill_mkey[0x20];
+
+	u8         resd_lkey[0x20];
+
+	u8         null_mkey[0x20];
+
+	u8         reserved_at_a0[0x60];
+};
+
+struct mlx5_ifc_query_special_contexts_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_query_scheduling_element_out_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0xc0];
+
+	struct mlx5_ifc_scheduling_context_bits scheduling_context;
+
+	u8         reserved_at_300[0x100];
+};
+
+enum {
+	SCHEDULING_HIERARCHY_E_SWITCH = 0x2,
+};
+
+struct mlx5_ifc_query_scheduling_element_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         scheduling_hierarchy[0x8];
+	u8         reserved_at_48[0x18];
+
+	u8         scheduling_element_id[0x20];
+
+	u8         reserved_at_80[0x180];
+};
+
+struct mlx5_ifc_query_rqt_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0xc0];
+
+	struct mlx5_ifc_rqtc_bits rqt_context;
+};
+
+struct mlx5_ifc_query_rqt_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x8];
+	u8         rqtn[0x18];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_query_rq_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0xc0];
+
+	struct mlx5_ifc_rqc_bits rq_context;
+};
+
+struct mlx5_ifc_query_rq_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x8];
+	u8         rqn[0x18];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_query_roce_address_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+
+	struct mlx5_ifc_roce_addr_layout_bits roce_address;
+};
+
+struct mlx5_ifc_query_roce_address_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         roce_address_index[0x10];
+	u8         reserved_at_50[0x10];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_query_rmp_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0xc0];
+
+	struct mlx5_ifc_rmpc_bits rmp_context;
+};
+
+struct mlx5_ifc_query_rmp_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x8];
+	u8         rmpn[0x18];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_query_qp_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+
+	u8         opt_param_mask[0x20];
+
+	u8         reserved_at_a0[0x20];
+
+	struct mlx5_ifc_qpc_bits qpc;
+
+	u8         reserved_at_800[0x80];
+
+	u8         pas[0][0x40];
+};
+
+struct mlx5_ifc_query_qp_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x8];
+	u8         qpn[0x18];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_query_q_counter_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+
+	u8         rx_write_requests[0x20];
+
+	u8         reserved_at_a0[0x20];
+
+	u8         rx_read_requests[0x20];
+
+	u8         reserved_at_e0[0x20];
+
+	u8         rx_atomic_requests[0x20];
+
+	u8         reserved_at_120[0x20];
+
+	u8         rx_dct_connect[0x20];
+
+	u8         reserved_at_160[0x20];
+
+	u8         out_of_buffer[0x20];
+
+	u8         reserved_at_1a0[0x20];
+
+	u8         out_of_sequence[0x20];
+
+	u8         reserved_at_1e0[0x20];
+
+	u8         duplicate_request[0x20];
+
+	u8         reserved_at_220[0x20];
+
+	u8         rnr_nak_retry_err[0x20];
+
+	u8         reserved_at_260[0x20];
+
+	u8         packet_seq_err[0x20];
+
+	u8         reserved_at_2a0[0x20];
+
+	u8         implied_nak_seq_err[0x20];
+
+	u8         reserved_at_2e0[0x20];
+
+	u8         local_ack_timeout_err[0x20];
+
+	u8         reserved_at_320[0x4e0];
+};
+
+struct mlx5_ifc_query_q_counter_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x80];
+
+	u8         clear[0x1];
+	u8         reserved_at_c1[0x1f];
+
+	u8         reserved_at_e0[0x18];
+	u8         counter_set_id[0x8];
+};
+
+struct mlx5_ifc_query_pages_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x10];
+	u8         function_id[0x10];
+
+	u8         num_pages[0x20];
+};
+
+enum {
+	MLX5_QUERY_PAGES_IN_OP_MOD_BOOT_PAGES     = 0x1,
+	MLX5_QUERY_PAGES_IN_OP_MOD_INIT_PAGES     = 0x2,
+	MLX5_QUERY_PAGES_IN_OP_MOD_REGULAR_PAGES  = 0x3,
+};
+
+struct mlx5_ifc_query_pages_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x10];
+	u8         function_id[0x10];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_query_nic_vport_context_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+
+	struct mlx5_ifc_nic_vport_context_bits nic_vport_context;
+};
+
+struct mlx5_ifc_query_nic_vport_context_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         other_vport[0x1];
+	u8         reserved_at_41[0xf];
+	u8         vport_number[0x10];
+
+	u8         reserved_at_60[0x5];
+	u8         allowed_list_type[0x3];
+	u8         reserved_at_68[0x18];
+};
+
+struct mlx5_ifc_query_mkey_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+
+	struct mlx5_ifc_mkc_bits memory_key_mkey_entry;
+
+	u8         reserved_at_280[0x600];
+
+	u8         bsf0_klm0_pas_mtt0_1[16][0x8];
+
+	u8         bsf1_klm1_pas_mtt2_3[16][0x8];
+};
+
+struct mlx5_ifc_query_mkey_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x8];
+	u8         mkey_index[0x18];
+
+	u8         pg_access[0x1];
+	u8         reserved_at_61[0x1f];
+};
+
+struct mlx5_ifc_query_mad_demux_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+
+	u8         mad_dumux_parameters_block[0x20];
+};
+
+struct mlx5_ifc_query_mad_demux_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_query_l2_table_entry_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0xa0];
+
+	u8         reserved_at_e0[0x13];
+	u8         vlan_valid[0x1];
+	u8         vlan[0xc];
+
+	struct mlx5_ifc_mac_address_layout_bits mac_address;
+
+	u8         reserved_at_140[0xc0];
+};
+
+struct mlx5_ifc_query_l2_table_entry_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x60];
+
+	u8         reserved_at_a0[0x8];
+	u8         table_index[0x18];
+
+	u8         reserved_at_c0[0x140];
+};
+
+struct mlx5_ifc_query_issi_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x10];
+	u8         current_issi[0x10];
+
+	u8         reserved_at_60[0xa0];
+
+	u8         reserved_at_100[76][0x8];
+	u8         supported_issi_dw0[0x20];
+};
+
+struct mlx5_ifc_query_issi_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_set_driver_version_out_bits {
+	u8         status[0x8];
+	u8         reserved_0[0x18];
+
+	u8         syndrome[0x20];
+	u8         reserved_1[0x40];
+};
+
+struct mlx5_ifc_set_driver_version_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_0[0x10];
+
+	u8         reserved_1[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_2[0x40];
+	u8         driver_version[64][0x8];
+};
+
+struct mlx5_ifc_query_hca_vport_pkey_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+
+	struct mlx5_ifc_pkey_bits pkey[0];
+};
+
+struct mlx5_ifc_query_hca_vport_pkey_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         other_vport[0x1];
+	u8         reserved_at_41[0xb];
+	u8         port_num[0x4];
+	u8         vport_number[0x10];
+
+	u8         reserved_at_60[0x10];
+	u8         pkey_index[0x10];
+};
+
+enum {
+	MLX5_HCA_VPORT_SEL_PORT_GUID	= 1 << 0,
+	MLX5_HCA_VPORT_SEL_NODE_GUID	= 1 << 1,
+	MLX5_HCA_VPORT_SEL_STATE_POLICY	= 1 << 2,
+};
+
+struct mlx5_ifc_query_hca_vport_gid_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x20];
+
+	u8         gids_num[0x10];
+	u8         reserved_at_70[0x10];
+
+	struct mlx5_ifc_array128_auto_bits gid[0];
+};
+
+struct mlx5_ifc_query_hca_vport_gid_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         other_vport[0x1];
+	u8         reserved_at_41[0xb];
+	u8         port_num[0x4];
+	u8         vport_number[0x10];
+
+	u8         reserved_at_60[0x10];
+	u8         gid_index[0x10];
+};
+
+struct mlx5_ifc_query_hca_vport_context_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+
+	struct mlx5_ifc_hca_vport_context_bits hca_vport_context;
+};
+
+struct mlx5_ifc_query_hca_vport_context_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         other_vport[0x1];
+	u8         reserved_at_41[0xb];
+	u8         port_num[0x4];
+	u8         vport_number[0x10];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_query_hca_cap_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+
+	union mlx5_ifc_hca_cap_union_bits capability;
+};
+
+struct mlx5_ifc_query_hca_cap_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_query_flow_table_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x80];
+
+	u8         reserved_at_c0[0x8];
+	u8         level[0x8];
+	u8         reserved_at_d0[0x8];
+	u8         log_size[0x8];
+
+	u8         reserved_at_e0[0x120];
+};
+
+struct mlx5_ifc_query_flow_table_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x40];
+
+	u8         table_type[0x8];
+	u8         reserved_at_88[0x18];
+
+	u8         reserved_at_a0[0x8];
+	u8         table_id[0x18];
+
+	u8         reserved_at_c0[0x140];
+};
+
+struct mlx5_ifc_query_fte_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x1c0];
+
+	struct mlx5_ifc_flow_context_bits flow_context;
+};
+
+struct mlx5_ifc_query_fte_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x40];
+
+	u8         table_type[0x8];
+	u8         reserved_at_88[0x18];
+
+	u8         reserved_at_a0[0x8];
+	u8         table_id[0x18];
+
+	u8         reserved_at_c0[0x40];
+
+	u8         flow_index[0x20];
+
+	u8         reserved_at_120[0xe0];
+};
+
+enum {
+	MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_OUTER_HEADERS    = 0x0,
+	MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS  = 0x1,
+	MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_INNER_HEADERS    = 0x2,
+};
+
+struct mlx5_ifc_query_flow_group_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0xa0];
+
+	u8         start_flow_index[0x20];
+
+	u8         reserved_at_100[0x20];
+
+	u8         end_flow_index[0x20];
+
+	u8         reserved_at_140[0xa0];
+
+	u8         reserved_at_1e0[0x18];
+	u8         match_criteria_enable[0x8];
+
+	struct mlx5_ifc_fte_match_param_bits match_criteria;
+
+	u8         reserved_at_1200[0xe00];
+};
+
+struct mlx5_ifc_query_flow_group_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x40];
+
+	u8         table_type[0x8];
+	u8         reserved_at_88[0x18];
+
+	u8         reserved_at_a0[0x8];
+	u8         table_id[0x18];
+
+	u8         group_id[0x20];
+
+	u8         reserved_at_e0[0x120];
+};
+
+struct mlx5_ifc_query_flow_counter_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+
+	struct mlx5_ifc_traffic_counter_bits flow_statistics[0];
+};
+
+struct mlx5_ifc_query_flow_counter_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x80];
+
+	u8         clear[0x1];
+	u8         reserved_at_c1[0xf];
+	u8         num_of_counters[0x10];
+
+	u8         reserved_at_e0[0x10];
+	u8         flow_counter_id[0x10];
+};
+
+struct mlx5_ifc_query_esw_vport_context_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+
+	struct mlx5_ifc_esw_vport_context_bits esw_vport_context;
+};
+
+struct mlx5_ifc_query_esw_vport_context_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         other_vport[0x1];
+	u8         reserved_at_41[0xf];
+	u8         vport_number[0x10];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_modify_esw_vport_context_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_esw_vport_context_fields_select_bits {
+	u8         reserved_at_0[0x1c];
+	u8         vport_cvlan_insert[0x1];
+	u8         vport_svlan_insert[0x1];
+	u8         vport_cvlan_strip[0x1];
+	u8         vport_svlan_strip[0x1];
+};
+
+struct mlx5_ifc_modify_esw_vport_context_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         other_vport[0x1];
+	u8         reserved_at_41[0xf];
+	u8         vport_number[0x10];
+
+	struct mlx5_ifc_esw_vport_context_fields_select_bits field_select;
+
+	struct mlx5_ifc_esw_vport_context_bits esw_vport_context;
+};
+
+struct mlx5_ifc_query_eq_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+
+	struct mlx5_ifc_eqc_bits eq_context_entry;
+
+	u8         reserved_at_280[0x40];
+
+	u8         event_bitmask[0x40];
+
+	u8         reserved_at_300[0x580];
+
+	u8         pas[0][0x40];
+};
+
+struct mlx5_ifc_query_eq_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x18];
+	u8         eq_number[0x8];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_encap_header_in_bits {
+	u8         reserved_at_0[0x5];
+	u8         header_type[0x3];
+	u8         reserved_at_8[0xe];
+	u8         encap_header_size[0xa];
+
+	u8         reserved_at_20[0x10];
+	u8         encap_header[2][0x8];
+
+	u8         more_encap_header[0][0x8];
+};
+
+struct mlx5_ifc_query_encap_header_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0xa0];
+
+	struct mlx5_ifc_encap_header_in_bits encap_header[0];
+};
+
+struct mlx5_ifc_query_encap_header_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         encap_id[0x20];
+
+	u8         reserved_at_60[0xa0];
+};
+
+struct mlx5_ifc_alloc_encap_header_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         encap_id[0x20];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_alloc_encap_header_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0xa0];
+
+	struct mlx5_ifc_encap_header_in_bits encap_header;
+};
+
+struct mlx5_ifc_dealloc_encap_header_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_dealloc_encap_header_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         encap_id[0x20];
+
+	u8         reserved_60[0x20];
+};
+
+struct mlx5_ifc_query_dct_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+
+	struct mlx5_ifc_dctc_bits dct_context_entry;
+
+	u8         reserved_at_280[0x180];
+};
+
+struct mlx5_ifc_query_dct_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x8];
+	u8         dctn[0x18];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_query_cq_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+
+	struct mlx5_ifc_cqc_bits cq_context;
+
+	u8         reserved_at_280[0x600];
+
+	u8         pas[0][0x40];
+};
+
+struct mlx5_ifc_query_cq_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x8];
+	u8         cqn[0x18];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_query_cong_status_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x20];
+
+	u8         enable[0x1];
+	u8         tag_enable[0x1];
+	u8         reserved_at_62[0x1e];
+};
+
+struct mlx5_ifc_query_cong_status_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x18];
+	u8         priority[0x4];
+	u8         cong_protocol[0x4];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_query_cong_statistics_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+
+	u8         cur_flows[0x20];
+
+	u8         sum_flows[0x20];
+
+	u8         cnp_ignored_high[0x20];
+
+	u8         cnp_ignored_low[0x20];
+
+	u8         cnp_handled_high[0x20];
+
+	u8         cnp_handled_low[0x20];
+
+	u8         reserved_at_140[0x100];
+
+	u8         time_stamp_high[0x20];
+
+	u8         time_stamp_low[0x20];
+
+	u8         accumulators_period[0x20];
+
+	u8         ecn_marked_roce_packets_high[0x20];
+
+	u8         ecn_marked_roce_packets_low[0x20];
+
+	u8         cnps_sent_high[0x20];
+
+	u8         cnps_sent_low[0x20];
+
+	u8         reserved_at_320[0x560];
+};
+
+struct mlx5_ifc_query_cong_statistics_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         clear[0x1];
+	u8         reserved_at_41[0x1f];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_query_cong_params_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+
+	union mlx5_ifc_cong_control_roce_ecn_auto_bits congestion_parameters;
+};
+
+struct mlx5_ifc_query_cong_params_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x1c];
+	u8         cong_protocol[0x4];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_query_adapter_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+
+	struct mlx5_ifc_query_adapter_param_block_bits query_adapter_struct;
+};
+
+struct mlx5_ifc_query_adapter_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_qp_2rst_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_qp_2rst_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x8];
+	u8         qpn[0x18];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_qp_2err_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_qp_2err_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x8];
+	u8         qpn[0x18];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_page_fault_resume_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_page_fault_resume_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         error[0x1];
+	u8         reserved_at_41[0x4];
+	u8         page_fault_type[0x3];
+	u8         wq_number[0x18];
+
+	u8         reserved_at_60[0x8];
+	u8         token[0x18];
+};
+
+struct mlx5_ifc_nop_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_nop_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_modify_vport_state_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_modify_vport_state_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         other_vport[0x1];
+	u8         reserved_at_41[0xf];
+	u8         vport_number[0x10];
+
+	u8         reserved_at_60[0x18];
+	u8         admin_state[0x4];
+	u8         reserved_at_7c[0x4];
+};
+
+struct mlx5_ifc_modify_tis_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_modify_tis_bitmask_bits {
+	u8         reserved_at_0[0x20];
+
+	u8         reserved_at_20[0x1d];
+	u8         lag_tx_port_affinity[0x1];
+	u8         strict_lag_tx_port_affinity[0x1];
+	u8         prio[0x1];
+};
+
+struct mlx5_ifc_modify_tis_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x8];
+	u8         tisn[0x18];
+
+	u8         reserved_at_60[0x20];
+
+	struct mlx5_ifc_modify_tis_bitmask_bits bitmask;
+
+	u8         reserved_at_c0[0x40];
+
+	struct mlx5_ifc_tisc_bits ctx;
+};
+
+struct mlx5_ifc_modify_tir_bitmask_bits {
+	u8	   reserved_at_0[0x20];
+
+	u8         reserved_at_20[0x1b];
+	u8         self_lb_en[0x1];
+	u8         reserved_at_3c[0x1];
+	u8         hash[0x1];
+	u8         reserved_at_3e[0x1];
+	u8         lro[0x1];
+};
+
+struct mlx5_ifc_modify_tir_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_modify_tir_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x8];
+	u8         tirn[0x18];
+
+	u8         reserved_at_60[0x20];
+
+	struct mlx5_ifc_modify_tir_bitmask_bits bitmask;
+
+	u8         reserved_at_c0[0x40];
+
+	struct mlx5_ifc_tirc_bits ctx;
+};
+
+struct mlx5_ifc_modify_sq_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_modify_sq_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         sq_state[0x4];
+	u8         reserved_at_44[0x4];
+	u8         sqn[0x18];
+
+	u8         reserved_at_60[0x20];
+
+	u8         modify_bitmask[0x40];
+
+	u8         reserved_at_c0[0x40];
+
+	struct mlx5_ifc_sqc_bits ctx;
+};
+
+struct mlx5_ifc_modify_scheduling_element_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x1c0];
+};
+
+enum {
+	MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE = 0x1,
+	MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW = 0x2,
+};
+
+struct mlx5_ifc_modify_scheduling_element_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         scheduling_hierarchy[0x8];
+	u8         reserved_at_48[0x18];
+
+	u8         scheduling_element_id[0x20];
+
+	u8         reserved_at_80[0x20];
+
+	u8         modify_bitmask[0x20];
+
+	u8         reserved_at_c0[0x40];
+
+	struct mlx5_ifc_scheduling_context_bits scheduling_context;
+
+	u8         reserved_at_300[0x100];
+};
+
+struct mlx5_ifc_modify_rqt_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_rqt_bitmask_bits {
+	u8	   reserved_at_0[0x20];
+
+	u8         reserved_at_20[0x1f];
+	u8         rqn_list[0x1];
+};
+
+struct mlx5_ifc_modify_rqt_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x8];
+	u8         rqtn[0x18];
+
+	u8         reserved_at_60[0x20];
+
+	struct mlx5_ifc_rqt_bitmask_bits bitmask;
+
+	u8         reserved_at_c0[0x40];
+
+	struct mlx5_ifc_rqtc_bits ctx;
+};
+
+struct mlx5_ifc_modify_rq_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+enum {
+	MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD = 1ULL << 1,
+	MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_MODIFY_RQ_COUNTER_SET_ID = 1ULL << 3,
+};
+
+struct mlx5_ifc_modify_rq_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         rq_state[0x4];
+	u8         reserved_at_44[0x4];
+	u8         rqn[0x18];
+
+	u8         reserved_at_60[0x20];
+
+	u8         modify_bitmask[0x40];
+
+	u8         reserved_at_c0[0x40];
+
+	struct mlx5_ifc_rqc_bits ctx;
+};
+
+struct mlx5_ifc_modify_rmp_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_rmp_bitmask_bits {
+	u8	   reserved_at_0[0x20];
+
+	u8         reserved_at_20[0x1f];
+	u8         lwm[0x1];
+};
+
+struct mlx5_ifc_modify_rmp_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         rmp_state[0x4];
+	u8         reserved_at_44[0x4];
+	u8         rmpn[0x18];
+
+	u8         reserved_at_60[0x20];
+
+	struct mlx5_ifc_rmp_bitmask_bits bitmask;
+
+	u8         reserved_at_c0[0x40];
+
+	struct mlx5_ifc_rmpc_bits ctx;
+};
+
+struct mlx5_ifc_modify_nic_vport_context_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_modify_nic_vport_field_select_bits {
+	u8         reserved_at_0[0x16];
+	u8         node_guid[0x1];
+	u8         port_guid[0x1];
+	u8         min_inline[0x1];
+	u8         mtu[0x1];
+	u8         change_event[0x1];
+	u8         promisc[0x1];
+	u8         permanent_address[0x1];
+	u8         addresses_list[0x1];
+	u8         roce_en[0x1];
+	u8         reserved_at_1f[0x1];
+};
+
+struct mlx5_ifc_modify_nic_vport_context_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         other_vport[0x1];
+	u8         reserved_at_41[0xf];
+	u8         vport_number[0x10];
+
+	struct mlx5_ifc_modify_nic_vport_field_select_bits field_select;
+
+	u8         reserved_at_80[0x780];
+
+	struct mlx5_ifc_nic_vport_context_bits nic_vport_context;
+};
+
+struct mlx5_ifc_modify_hca_vport_context_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_modify_hca_vport_context_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         other_vport[0x1];
+	u8         reserved_at_41[0xb];
+	u8         port_num[0x4];
+	u8         vport_number[0x10];
+
+	u8         reserved_at_60[0x20];
+
+	struct mlx5_ifc_hca_vport_context_bits hca_vport_context;
+};
+
+struct mlx5_ifc_modify_cq_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+enum {
+	MLX5_MODIFY_CQ_IN_OP_MOD_MODIFY_CQ  = 0x0,
+	MLX5_MODIFY_CQ_IN_OP_MOD_RESIZE_CQ  = 0x1,
+};
+
+struct mlx5_ifc_modify_cq_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x8];
+	u8         cqn[0x18];
+
+	union mlx5_ifc_modify_field_select_resize_field_select_auto_bits modify_field_select_resize_field_select;
+
+	struct mlx5_ifc_cqc_bits cq_context;
+
+	u8         reserved_at_280[0x600];
+
+	u8         pas[0][0x40];
+};
+
+struct mlx5_ifc_modify_cong_status_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_modify_cong_status_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x18];
+	u8         priority[0x4];
+	u8         cong_protocol[0x4];
+
+	u8         enable[0x1];
+	u8         tag_enable[0x1];
+	u8         reserved_at_62[0x1e];
+};
+
+struct mlx5_ifc_modify_cong_params_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_modify_cong_params_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x1c];
+	u8         cong_protocol[0x4];
+
+	union mlx5_ifc_field_select_802_1_r_roce_auto_bits field_select;
+
+	u8         reserved_at_80[0x80];
+
+	union mlx5_ifc_cong_control_roce_ecn_auto_bits congestion_parameters;
+};
+
+struct mlx5_ifc_manage_pages_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         output_num_entries[0x20];
+
+	u8         reserved_at_60[0x20];
+
+	u8         pas[0][0x40];
+};
+
+enum {
+	MLX5_MANAGE_PAGES_IN_OP_MOD_ALLOCATION_FAIL     = 0x0,
+	MLX5_MANAGE_PAGES_IN_OP_MOD_ALLOCATION_SUCCESS  = 0x1,
+	MLX5_MANAGE_PAGES_IN_OP_MOD_HCA_RETURN_PAGES    = 0x2,
+};
+
+struct mlx5_ifc_manage_pages_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x10];
+	u8         function_id[0x10];
+
+	u8         input_num_entries[0x20];
+
+	u8         pas[0][0x40];
+};
+
+struct mlx5_ifc_mad_ifc_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+
+	u8         response_mad_packet[256][0x8];
+};
+
+struct mlx5_ifc_mad_ifc_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         remote_lid[0x10];
+	u8         reserved_at_50[0x8];
+	u8         port[0x8];
+
+	u8         reserved_at_60[0x20];
+
+	u8         mad[256][0x8];
+};
+
+struct mlx5_ifc_init_hca_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_init_hca_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_init2rtr_qp_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_init2rtr_qp_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x8];
+	u8         qpn[0x18];
+
+	u8         reserved_at_60[0x20];
+
+	u8         opt_param_mask[0x20];
+
+	u8         reserved_at_a0[0x20];
+
+	struct mlx5_ifc_qpc_bits qpc;
+
+	u8         reserved_at_800[0x80];
+};
+
+struct mlx5_ifc_init2init_qp_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_init2init_qp_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x8];
+	u8         qpn[0x18];
+
+	u8         reserved_at_60[0x20];
+
+	u8         opt_param_mask[0x20];
+
+	u8         reserved_at_a0[0x20];
+
+	struct mlx5_ifc_qpc_bits qpc;
+
+	u8         reserved_at_800[0x80];
+};
+
+struct mlx5_ifc_get_dropped_packet_log_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+
+	u8         packet_headers_log[128][0x8];
+
+	u8         packet_syndrome[64][0x8];
+};
+
+struct mlx5_ifc_get_dropped_packet_log_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_gen_eqe_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x18];
+	u8         eq_number[0x8];
+
+	u8         reserved_at_60[0x20];
+
+	u8         eqe[64][0x8];
+};
+
+struct mlx5_ifc_gen_eq_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_enable_hca_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x20];
+};
+
+struct mlx5_ifc_enable_hca_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x10];
+	u8         function_id[0x10];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_drain_dct_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_drain_dct_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x8];
+	u8         dctn[0x18];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_disable_hca_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x20];
+};
+
+struct mlx5_ifc_disable_hca_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x10];
+	u8         function_id[0x10];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_detach_from_mcg_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_detach_from_mcg_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x8];
+	u8         qpn[0x18];
+
+	u8         reserved_at_60[0x20];
+
+	u8         multicast_gid[16][0x8];
+};
+
+struct mlx5_ifc_destroy_xrq_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_destroy_xrq_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x8];
+	u8         xrqn[0x18];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_destroy_xrc_srq_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_destroy_xrc_srq_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x8];
+	u8         xrc_srqn[0x18];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_destroy_tis_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_destroy_tis_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x8];
+	u8         tisn[0x18];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_destroy_tir_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_destroy_tir_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x8];
+	u8         tirn[0x18];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_destroy_srq_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_destroy_srq_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x8];
+	u8         srqn[0x18];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_destroy_sq_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_destroy_sq_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x8];
+	u8         sqn[0x18];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_destroy_scheduling_element_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x1c0];
+};
+
+struct mlx5_ifc_destroy_scheduling_element_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         scheduling_hierarchy[0x8];
+	u8         reserved_at_48[0x18];
+
+	u8         scheduling_element_id[0x20];
+
+	u8         reserved_at_80[0x180];
+};
+
+struct mlx5_ifc_destroy_rqt_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_destroy_rqt_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x8];
+	u8         rqtn[0x18];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_destroy_rq_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_destroy_rq_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x8];
+	u8         rqn[0x18];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_destroy_rmp_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_destroy_rmp_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x8];
+	u8         rmpn[0x18];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_destroy_qp_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_destroy_qp_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x8];
+	u8         qpn[0x18];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_destroy_psv_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_destroy_psv_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x8];
+	u8         psvn[0x18];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_destroy_mkey_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_destroy_mkey_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x8];
+	u8         mkey_index[0x18];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_destroy_flow_table_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_destroy_flow_table_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         other_vport[0x1];
+	u8         reserved_at_41[0xf];
+	u8         vport_number[0x10];
+
+	u8         reserved_at_60[0x20];
+
+	u8         table_type[0x8];
+	u8         reserved_at_88[0x18];
+
+	u8         reserved_at_a0[0x8];
+	u8         table_id[0x18];
+
+	u8         reserved_at_c0[0x140];
+};
+
+struct mlx5_ifc_destroy_flow_group_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_destroy_flow_group_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         other_vport[0x1];
+	u8         reserved_at_41[0xf];
+	u8         vport_number[0x10];
+
+	u8         reserved_at_60[0x20];
+
+	u8         table_type[0x8];
+	u8         reserved_at_88[0x18];
+
+	u8         reserved_at_a0[0x8];
+	u8         table_id[0x18];
+
+	u8         group_id[0x20];
+
+	u8         reserved_at_e0[0x120];
+};
+
+struct mlx5_ifc_destroy_eq_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_destroy_eq_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x18];
+	u8         eq_number[0x8];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_destroy_dct_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_destroy_dct_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x8];
+	u8         dctn[0x18];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_destroy_cq_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_destroy_cq_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x8];
+	u8         cqn[0x18];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_delete_vxlan_udp_dport_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_delete_vxlan_udp_dport_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x20];
+
+	u8         reserved_at_60[0x10];
+	u8         vxlan_udp_port[0x10];
+};
+
+struct mlx5_ifc_delete_l2_table_entry_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_delete_l2_table_entry_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x60];
+
+	u8         reserved_at_a0[0x8];
+	u8         table_index[0x18];
+
+	u8         reserved_at_c0[0x140];
+};
+
+struct mlx5_ifc_delete_fte_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_delete_fte_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         other_vport[0x1];
+	u8         reserved_at_41[0xf];
+	u8         vport_number[0x10];
+
+	u8         reserved_at_60[0x20];
+
+	u8         table_type[0x8];
+	u8         reserved_at_88[0x18];
+
+	u8         reserved_at_a0[0x8];
+	u8         table_id[0x18];
+
+	u8         reserved_at_c0[0x40];
+
+	u8         flow_index[0x20];
+
+	u8         reserved_at_120[0xe0];
+};
+
+struct mlx5_ifc_dealloc_xrcd_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_dealloc_xrcd_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x8];
+	u8         xrcd[0x18];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_dealloc_uar_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_dealloc_uar_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x8];
+	u8         uar[0x18];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_dealloc_transport_domain_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_dealloc_transport_domain_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x8];
+	u8         transport_domain[0x18];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_dealloc_q_counter_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_dealloc_q_counter_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x18];
+	u8         counter_set_id[0x8];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_dealloc_pd_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_dealloc_pd_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x8];
+	u8         pd[0x18];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_dealloc_flow_counter_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_dealloc_flow_counter_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x10];
+	u8         flow_counter_id[0x10];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_create_xrq_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x8];
+	u8         xrqn[0x18];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_create_xrq_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x40];
+
+	struct mlx5_ifc_xrqc_bits xrq_context;
+};
+
+struct mlx5_ifc_create_xrc_srq_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x8];
+	u8         xrc_srqn[0x18];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_create_xrc_srq_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x40];
+
+	struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry;
+
+	u8         reserved_at_280[0x600];
+
+	u8         pas[0][0x40];
+};
+
+struct mlx5_ifc_create_tis_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x8];
+	u8         tisn[0x18];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_create_tis_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0xc0];
+
+	struct mlx5_ifc_tisc_bits ctx;
+};
+
+struct mlx5_ifc_create_tir_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x8];
+	u8         tirn[0x18];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_create_tir_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0xc0];
+
+	struct mlx5_ifc_tirc_bits ctx;
+};
+
+struct mlx5_ifc_create_srq_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x8];
+	u8         srqn[0x18];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_create_srq_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x40];
+
+	struct mlx5_ifc_srqc_bits srq_context_entry;
+
+	u8         reserved_at_280[0x600];
+
+	u8         pas[0][0x40];
+};
+
+struct mlx5_ifc_create_sq_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x8];
+	u8         sqn[0x18];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_create_sq_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0xc0];
+
+	struct mlx5_ifc_sqc_bits ctx;
+};
+
+struct mlx5_ifc_create_scheduling_element_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+
+	u8         scheduling_element_id[0x20];
+
+	u8         reserved_at_a0[0x160];
+};
+
+struct mlx5_ifc_create_scheduling_element_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         scheduling_hierarchy[0x8];
+	u8         reserved_at_48[0x18];
+
+	u8         reserved_at_60[0xa0];
+
+	struct mlx5_ifc_scheduling_context_bits scheduling_context;
+
+	u8         reserved_at_300[0x100];
+};
+
+struct mlx5_ifc_create_rqt_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x8];
+	u8         rqtn[0x18];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_create_rqt_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0xc0];
+
+	struct mlx5_ifc_rqtc_bits rqt_context;
+};
+
+struct mlx5_ifc_create_rq_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x8];
+	u8         rqn[0x18];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_create_rq_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0xc0];
+
+	struct mlx5_ifc_rqc_bits ctx;
+};
+
+struct mlx5_ifc_create_rmp_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x8];
+	u8         rmpn[0x18];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_create_rmp_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0xc0];
+
+	struct mlx5_ifc_rmpc_bits ctx;
+};
+
+struct mlx5_ifc_create_qp_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x8];
+	u8         qpn[0x18];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_create_qp_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x40];
+
+	u8         opt_param_mask[0x20];
+
+	u8         reserved_at_a0[0x20];
+
+	struct mlx5_ifc_qpc_bits qpc;
+
+	u8         reserved_at_800[0x80];
+
+	u8         pas[0][0x40];
+};
+
+struct mlx5_ifc_create_psv_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+
+	u8         reserved_at_80[0x8];
+	u8         psv0_index[0x18];
+
+	u8         reserved_at_a0[0x8];
+	u8         psv1_index[0x18];
+
+	u8         reserved_at_c0[0x8];
+	u8         psv2_index[0x18];
+
+	u8         reserved_at_e0[0x8];
+	u8         psv3_index[0x18];
+};
+
+struct mlx5_ifc_create_psv_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         num_psv[0x4];
+	u8         reserved_at_44[0x4];
+	u8         pd[0x18];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_create_mkey_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x8];
+	u8         mkey_index[0x18];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_create_mkey_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x20];
+
+	u8         pg_access[0x1];
+	u8         reserved_at_61[0x1f];
+
+	struct mlx5_ifc_mkc_bits memory_key_mkey_entry;
+
+	u8         reserved_at_280[0x80];
+
+	u8         translations_octword_actual_size[0x20];
+
+	u8         reserved_at_320[0x560];
+
+	u8         klm_pas_mtt[0][0x20];
+};
+
+struct mlx5_ifc_create_flow_table_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x8];
+	u8         table_id[0x18];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_create_flow_table_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         other_vport[0x1];
+	u8         reserved_at_41[0xf];
+	u8         vport_number[0x10];
+
+	u8         reserved_at_60[0x20];
+
+	u8         table_type[0x8];
+	u8         reserved_at_88[0x18];
+
+	u8         reserved_at_a0[0x20];
+
+	u8         encap_en[0x1];
+	u8         decap_en[0x1];
+	u8         reserved_at_c2[0x2];
+	u8         table_miss_mode[0x4];
+	u8         level[0x8];
+	u8         reserved_at_d0[0x8];
+	u8         log_size[0x8];
+
+	u8         reserved_at_e0[0x8];
+	u8         table_miss_id[0x18];
+
+	u8         reserved_at_100[0x8];
+	u8         lag_master_next_table_id[0x18];
+
+	u8         reserved_at_120[0x80];
+};
+
+struct mlx5_ifc_create_flow_group_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x8];
+	u8         group_id[0x18];
+
+	u8         reserved_at_60[0x20];
+};
+
+enum {
+	MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS    = 0x0,
+	MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS  = 0x1,
+	MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS    = 0x2,
+};
+
+struct mlx5_ifc_create_flow_group_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         other_vport[0x1];
+	u8         reserved_at_41[0xf];
+	u8         vport_number[0x10];
+
+	u8         reserved_at_60[0x20];
+
+	u8         table_type[0x8];
+	u8         reserved_at_88[0x18];
+
+	u8         reserved_at_a0[0x8];
+	u8         table_id[0x18];
+
+	u8         reserved_at_c0[0x20];
+
+	u8         start_flow_index[0x20];
+
+	u8         reserved_at_100[0x20];
+
+	u8         end_flow_index[0x20];
+
+	u8         reserved_at_140[0xa0];
+
+	u8         reserved_at_1e0[0x18];
+	u8         match_criteria_enable[0x8];
+
+	struct mlx5_ifc_fte_match_param_bits match_criteria;
+
+	u8         reserved_at_1200[0xe00];
+};
+
+struct mlx5_ifc_create_eq_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x18];
+	u8         eq_number[0x8];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_create_eq_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x40];
+
+	struct mlx5_ifc_eqc_bits eq_context_entry;
+
+	u8         reserved_at_280[0x40];
+
+	u8         event_bitmask[0x40];
+
+	u8         reserved_at_300[0x580];
+
+	u8         pas[0][0x40];
+};
+
+struct mlx5_ifc_create_dct_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x8];
+	u8         dctn[0x18];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_create_dct_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x40];
+
+	struct mlx5_ifc_dctc_bits dct_context_entry;
+
+	u8         reserved_at_280[0x180];
+};
+
+struct mlx5_ifc_create_cq_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x8];
+	u8         cqn[0x18];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_create_cq_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x40];
+
+	struct mlx5_ifc_cqc_bits cq_context;
+
+	u8         reserved_at_280[0x600];
+
+	u8         pas[0][0x40];
+};
+
+struct mlx5_ifc_config_int_moderation_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x4];
+	u8         min_delay[0xc];
+	u8         int_vector[0x10];
+
+	u8         reserved_at_60[0x20];
+};
+
+enum {
+	MLX5_CONFIG_INT_MODERATION_IN_OP_MOD_WRITE  = 0x0,
+	MLX5_CONFIG_INT_MODERATION_IN_OP_MOD_READ   = 0x1,
+};
+
+struct mlx5_ifc_config_int_moderation_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x4];
+	u8         min_delay[0xc];
+	u8         int_vector[0x10];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_attach_to_mcg_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_attach_to_mcg_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x8];
+	u8         qpn[0x18];
+
+	u8         reserved_at_60[0x20];
+
+	u8         multicast_gid[16][0x8];
+};
+
+struct mlx5_ifc_arm_xrq_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_arm_xrq_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x8];
+	u8         xrqn[0x18];
+
+	u8         reserved_at_60[0x10];
+	u8         lwm[0x10];
+};
+
+struct mlx5_ifc_arm_xrc_srq_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+enum {
+	MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ  = 0x1,
+};
+
+struct mlx5_ifc_arm_xrc_srq_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x8];
+	u8         xrc_srqn[0x18];
+
+	u8         reserved_at_60[0x10];
+	u8         lwm[0x10];
+};
+
+struct mlx5_ifc_arm_rq_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+enum {
+	MLX5_ARM_RQ_IN_OP_MOD_SRQ = 0x1,
+	MLX5_ARM_RQ_IN_OP_MOD_XRQ = 0x2,
+};
+
+struct mlx5_ifc_arm_rq_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x8];
+	u8         srq_number[0x18];
+
+	u8         reserved_at_60[0x10];
+	u8         lwm[0x10];
+};
+
+struct mlx5_ifc_arm_dct_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_arm_dct_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x8];
+	u8         dct_number[0x18];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_alloc_xrcd_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x8];
+	u8         xrcd[0x18];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_alloc_xrcd_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_alloc_uar_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x8];
+	u8         uar[0x18];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_alloc_uar_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_alloc_transport_domain_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x8];
+	u8         transport_domain[0x18];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_alloc_transport_domain_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_alloc_q_counter_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x18];
+	u8         counter_set_id[0x8];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_alloc_q_counter_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_alloc_pd_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x8];
+	u8         pd[0x18];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_alloc_pd_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_alloc_flow_counter_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x10];
+	u8         flow_counter_id[0x10];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_alloc_flow_counter_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_add_vxlan_udp_dport_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_add_vxlan_udp_dport_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x20];
+
+	u8         reserved_at_60[0x10];
+	u8         vxlan_udp_port[0x10];
+};
+
+struct mlx5_ifc_set_rate_limit_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_set_rate_limit_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x10];
+	u8         rate_limit_index[0x10];
+
+	u8         reserved_at_60[0x20];
+
+	u8         rate_limit[0x20];
+};
+
+struct mlx5_ifc_access_register_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+
+	u8         register_data[0][0x20];
+};
+
+enum {
+	MLX5_ACCESS_REGISTER_IN_OP_MOD_WRITE  = 0x0,
+	MLX5_ACCESS_REGISTER_IN_OP_MOD_READ   = 0x1,
+};
+
+struct mlx5_ifc_access_register_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x10];
+	u8         register_id[0x10];
+
+	u8         argument[0x20];
+
+	u8         register_data[0][0x20];
+};
+
+struct mlx5_ifc_sltp_reg_bits {
+	u8         status[0x4];
+	u8         version[0x4];
+	u8         local_port[0x8];
+	u8         pnat[0x2];
+	u8         reserved_at_12[0x2];
+	u8         lane[0x4];
+	u8         reserved_at_18[0x8];
+
+	u8         reserved_at_20[0x20];
+
+	u8         reserved_at_40[0x7];
+	u8         polarity[0x1];
+	u8         ob_tap0[0x8];
+	u8         ob_tap1[0x8];
+	u8         ob_tap2[0x8];
+
+	u8         reserved_at_60[0xc];
+	u8         ob_preemp_mode[0x4];
+	u8         ob_reg[0x8];
+	u8         ob_bias[0x8];
+
+	u8         reserved_at_80[0x20];
+};
+
+struct mlx5_ifc_slrg_reg_bits {
+	u8         status[0x4];
+	u8         version[0x4];
+	u8         local_port[0x8];
+	u8         pnat[0x2];
+	u8         reserved_at_12[0x2];
+	u8         lane[0x4];
+	u8         reserved_at_18[0x8];
+
+	u8         time_to_link_up[0x10];
+	u8         reserved_at_30[0xc];
+	u8         grade_lane_speed[0x4];
+
+	u8         grade_version[0x8];
+	u8         grade[0x18];
+
+	u8         reserved_at_60[0x4];
+	u8         height_grade_type[0x4];
+	u8         height_grade[0x18];
+
+	u8         height_dz[0x10];
+	u8         height_dv[0x10];
+
+	u8         reserved_at_a0[0x10];
+	u8         height_sigma[0x10];
+
+	u8         reserved_at_c0[0x20];
+
+	u8         reserved_at_e0[0x4];
+	u8         phase_grade_type[0x4];
+	u8         phase_grade[0x18];
+
+	u8         reserved_at_100[0x8];
+	u8         phase_eo_pos[0x8];
+	u8         reserved_at_110[0x8];
+	u8         phase_eo_neg[0x8];
+
+	u8         ffe_set_tested[0x10];
+	u8         test_errors_per_lane[0x10];
+};
+
+struct mlx5_ifc_pvlc_reg_bits {
+	u8         reserved_at_0[0x8];
+	u8         local_port[0x8];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x1c];
+	u8         vl_hw_cap[0x4];
+
+	u8         reserved_at_40[0x1c];
+	u8         vl_admin[0x4];
+
+	u8         reserved_at_60[0x1c];
+	u8         vl_operational[0x4];
+};
+
+struct mlx5_ifc_pude_reg_bits {
+	u8         swid[0x8];
+	u8         local_port[0x8];
+	u8         reserved_at_10[0x4];
+	u8         admin_status[0x4];
+	u8         reserved_at_18[0x4];
+	u8         oper_status[0x4];
+
+	u8         reserved_at_20[0x60];
+};
+
+struct mlx5_ifc_ptys_reg_bits {
+	u8         reserved_at_0[0x1];
+	u8         an_disable_admin[0x1];
+	u8         an_disable_cap[0x1];
+	u8         reserved_at_3[0x5];
+	u8         local_port[0x8];
+	u8         reserved_at_10[0xd];
+	u8         proto_mask[0x3];
+
+	u8         an_status[0x4];
+	u8         reserved_at_24[0x3c];
+
+	u8         eth_proto_capability[0x20];
+
+	u8         ib_link_width_capability[0x10];
+	u8         ib_proto_capability[0x10];
+
+	u8         reserved_at_a0[0x20];
+
+	u8         eth_proto_admin[0x20];
+
+	u8         ib_link_width_admin[0x10];
+	u8         ib_proto_admin[0x10];
+
+	u8         reserved_at_100[0x20];
+
+	u8         eth_proto_oper[0x20];
+
+	u8         ib_link_width_oper[0x10];
+	u8         ib_proto_oper[0x10];
+
+	u8         reserved_at_160[0x20];
+
+	u8         eth_proto_lp_advertise[0x20];
+
+	u8         reserved_at_1a0[0x60];
+};
+
+struct mlx5_ifc_mlcr_reg_bits {
+	u8         reserved_at_0[0x8];
+	u8         local_port[0x8];
+	u8         reserved_at_10[0x20];
+
+	u8         beacon_duration[0x10];
+	u8         reserved_at_40[0x10];
+
+	u8         beacon_remain[0x10];
+};
+
+struct mlx5_ifc_ptas_reg_bits {
+	u8         reserved_at_0[0x20];
+
+	u8         algorithm_options[0x10];
+	u8         reserved_at_30[0x4];
+	u8         repetitions_mode[0x4];
+	u8         num_of_repetitions[0x8];
+
+	u8         grade_version[0x8];
+	u8         height_grade_type[0x4];
+	u8         phase_grade_type[0x4];
+	u8         height_grade_weight[0x8];
+	u8         phase_grade_weight[0x8];
+
+	u8         gisim_measure_bits[0x10];
+	u8         adaptive_tap_measure_bits[0x10];
+
+	u8         ber_bath_high_error_threshold[0x10];
+	u8         ber_bath_mid_error_threshold[0x10];
+
+	u8         ber_bath_low_error_threshold[0x10];
+	u8         one_ratio_high_threshold[0x10];
+
+	u8         one_ratio_high_mid_threshold[0x10];
+	u8         one_ratio_low_mid_threshold[0x10];
+
+	u8         one_ratio_low_threshold[0x10];
+	u8         ndeo_error_threshold[0x10];
+
+	u8         mixer_offset_step_size[0x10];
+	u8         reserved_at_110[0x8];
+	u8         mix90_phase_for_voltage_bath[0x8];
+
+	u8         mixer_offset_start[0x10];
+	u8         mixer_offset_end[0x10];
+
+	u8         reserved_at_140[0x15];
+	u8         ber_test_time[0xb];
+};
+
+struct mlx5_ifc_pspa_reg_bits {
+	u8         swid[0x8];
+	u8         local_port[0x8];
+	u8         sub_port[0x8];
+	u8         reserved_at_18[0x8];
+
+	u8         reserved_at_20[0x20];
+};
+
+struct mlx5_ifc_pqdr_reg_bits {
+	u8         reserved_at_0[0x8];
+	u8         local_port[0x8];
+	u8         reserved_at_10[0x5];
+	u8         prio[0x3];
+	u8         reserved_at_18[0x6];
+	u8         mode[0x2];
+
+	u8         reserved_at_20[0x20];
+
+	u8         reserved_at_40[0x10];
+	u8         min_threshold[0x10];
+
+	u8         reserved_at_60[0x10];
+	u8         max_threshold[0x10];
+
+	u8         reserved_at_80[0x10];
+	u8         mark_probability_denominator[0x10];
+
+	u8         reserved_at_a0[0x60];
+};
+
+struct mlx5_ifc_ppsc_reg_bits {
+	u8         reserved_at_0[0x8];
+	u8         local_port[0x8];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x60];
+
+	u8         reserved_at_80[0x1c];
+	u8         wrps_admin[0x4];
+
+	u8         reserved_at_a0[0x1c];
+	u8         wrps_status[0x4];
+
+	u8         reserved_at_c0[0x8];
+	u8         up_threshold[0x8];
+	u8         reserved_at_d0[0x8];
+	u8         down_threshold[0x8];
+
+	u8         reserved_at_e0[0x20];
+
+	u8         reserved_at_100[0x1c];
+	u8         srps_admin[0x4];
+
+	u8         reserved_at_120[0x1c];
+	u8         srps_status[0x4];
+
+	u8         reserved_at_140[0x40];
+};
+
+struct mlx5_ifc_pplr_reg_bits {
+	u8         reserved_at_0[0x8];
+	u8         local_port[0x8];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x8];
+	u8         lb_cap[0x8];
+	u8         reserved_at_30[0x8];
+	u8         lb_en[0x8];
+};
+
+struct mlx5_ifc_pplm_reg_bits {
+	u8         reserved_at_0[0x8];
+	u8         local_port[0x8];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x20];
+
+	u8         port_profile_mode[0x8];
+	u8         static_port_profile[0x8];
+	u8         active_port_profile[0x8];
+	u8         reserved_at_58[0x8];
+
+	u8         retransmission_active[0x8];
+	u8         fec_mode_active[0x18];
+
+	u8         reserved_at_80[0x20];
+};
+
+struct mlx5_ifc_ppcnt_reg_bits {
+	u8         swid[0x8];
+	u8         local_port[0x8];
+	u8         pnat[0x2];
+	u8         reserved_at_12[0x8];
+	u8         grp[0x6];
+
+	u8         clr[0x1];
+	u8         reserved_at_21[0x1c];
+	u8         prio_tc[0x3];
+
+	union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits counter_set;
+};
+
+struct mlx5_ifc_mpcnt_reg_bits {
+	u8         reserved_at_0[0x8];
+	u8         pcie_index[0x8];
+	u8         reserved_at_10[0xa];
+	u8         grp[0x6];
+
+	u8         clr[0x1];
+	u8         reserved_at_21[0x1f];
+
+	union mlx5_ifc_pcie_cntrs_grp_data_layout_auto_bits counter_set;
+};
+
+struct mlx5_ifc_ppad_reg_bits {
+	u8         reserved_at_0[0x3];
+	u8         single_mac[0x1];
+	u8         reserved_at_4[0x4];
+	u8         local_port[0x8];
+	u8         mac_47_32[0x10];
+
+	u8         mac_31_0[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_pmtu_reg_bits {
+	u8         reserved_at_0[0x8];
+	u8         local_port[0x8];
+	u8         reserved_at_10[0x10];
+
+	u8         max_mtu[0x10];
+	u8         reserved_at_30[0x10];
+
+	u8         admin_mtu[0x10];
+	u8         reserved_at_50[0x10];
+
+	u8         oper_mtu[0x10];
+	u8         reserved_at_70[0x10];
+};
+
+struct mlx5_ifc_pmpr_reg_bits {
+	u8         reserved_at_0[0x8];
+	u8         module[0x8];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x18];
+	u8         attenuation_5g[0x8];
+
+	u8         reserved_at_40[0x18];
+	u8         attenuation_7g[0x8];
+
+	u8         reserved_at_60[0x18];
+	u8         attenuation_12g[0x8];
+};
+
+struct mlx5_ifc_pmpe_reg_bits {
+	u8         reserved_at_0[0x8];
+	u8         module[0x8];
+	u8         reserved_at_10[0xc];
+	u8         module_status[0x4];
+
+	u8         reserved_at_20[0x60];
+};
+
+struct mlx5_ifc_pmpc_reg_bits {
+	u8         module_state_updated[32][0x8];
+};
+
+struct mlx5_ifc_pmlpn_reg_bits {
+	u8         reserved_at_0[0x4];
+	u8         mlpn_status[0x4];
+	u8         local_port[0x8];
+	u8         reserved_at_10[0x10];
+
+	u8         e[0x1];
+	u8         reserved_at_21[0x1f];
+};
+
+struct mlx5_ifc_pmlp_reg_bits {
+	u8         rxtx[0x1];
+	u8         reserved_at_1[0x7];
+	u8         local_port[0x8];
+	u8         reserved_at_10[0x8];
+	u8         width[0x8];
+
+	u8         lane0_module_mapping[0x20];
+
+	u8         lane1_module_mapping[0x20];
+
+	u8         lane2_module_mapping[0x20];
+
+	u8         lane3_module_mapping[0x20];
+
+	u8         reserved_at_a0[0x160];
+};
+
+struct mlx5_ifc_pmaos_reg_bits {
+	u8         reserved_at_0[0x8];
+	u8         module[0x8];
+	u8         reserved_at_10[0x4];
+	u8         admin_status[0x4];
+	u8         reserved_at_18[0x4];
+	u8         oper_status[0x4];
+
+	u8         ase[0x1];
+	u8         ee[0x1];
+	u8         reserved_at_22[0x1c];
+	u8         e[0x2];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_plpc_reg_bits {
+	u8         reserved_at_0[0x4];
+	u8         profile_id[0xc];
+	u8         reserved_at_10[0x4];
+	u8         proto_mask[0x4];
+	u8         reserved_at_18[0x8];
+
+	u8         reserved_at_20[0x10];
+	u8         lane_speed[0x10];
+
+	u8         reserved_at_40[0x17];
+	u8         lpbf[0x1];
+	u8         fec_mode_policy[0x8];
+
+	u8         retransmission_capability[0x8];
+	u8         fec_mode_capability[0x18];
+
+	u8         retransmission_support_admin[0x8];
+	u8         fec_mode_support_admin[0x18];
+
+	u8         retransmission_request_admin[0x8];
+	u8         fec_mode_request_admin[0x18];
+
+	u8         reserved_at_c0[0x80];
+};
+
+struct mlx5_ifc_plib_reg_bits {
+	u8         reserved_at_0[0x8];
+	u8         local_port[0x8];
+	u8         reserved_at_10[0x8];
+	u8         ib_port[0x8];
+
+	u8         reserved_at_20[0x60];
+};
+
+struct mlx5_ifc_plbf_reg_bits {
+	u8         reserved_at_0[0x8];
+	u8         local_port[0x8];
+	u8         reserved_at_10[0xd];
+	u8         lbf_mode[0x3];
+
+	u8         reserved_at_20[0x20];
+};
+
+struct mlx5_ifc_pipg_reg_bits {
+	u8         reserved_at_0[0x8];
+	u8         local_port[0x8];
+	u8         reserved_at_10[0x10];
+
+	u8         dic[0x1];
+	u8         reserved_at_21[0x19];
+	u8         ipg[0x4];
+	u8         reserved_at_3e[0x2];
+};
+
+struct mlx5_ifc_pifr_reg_bits {
+	u8         reserved_at_0[0x8];
+	u8         local_port[0x8];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0xe0];
+
+	u8         port_filter[8][0x20];
+
+	u8         port_filter_update_en[8][0x20];
+};
+
+struct mlx5_ifc_pfcc_reg_bits {
+	u8         reserved_at_0[0x8];
+	u8         local_port[0x8];
+	u8         reserved_at_10[0x10];
+
+	u8         ppan[0x4];
+	u8         reserved_at_24[0x4];
+	u8         prio_mask_tx[0x8];
+	u8         reserved_at_30[0x8];
+	u8         prio_mask_rx[0x8];
+
+	u8         pptx[0x1];
+	u8         aptx[0x1];
+	u8         reserved_at_42[0x6];
+	u8         pfctx[0x8];
+	u8         reserved_at_50[0x10];
+
+	u8         pprx[0x1];
+	u8         aprx[0x1];
+	u8         reserved_at_62[0x6];
+	u8         pfcrx[0x8];
+	u8         reserved_at_70[0x10];
+
+	u8         reserved_at_80[0x80];
+};
+
+struct mlx5_ifc_pelc_reg_bits {
+	u8         op[0x4];
+	u8         reserved_at_4[0x4];
+	u8         local_port[0x8];
+	u8         reserved_at_10[0x10];
+
+	u8         op_admin[0x8];
+	u8         op_capability[0x8];
+	u8         op_request[0x8];
+	u8         op_active[0x8];
+
+	u8         admin[0x40];
+
+	u8         capability[0x40];
+
+	u8         request[0x40];
+
+	u8         active[0x40];
+
+	u8         reserved_at_140[0x80];
+};
+
+struct mlx5_ifc_peir_reg_bits {
+	u8         reserved_at_0[0x8];
+	u8         local_port[0x8];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0xc];
+	u8         error_count[0x4];
+	u8         reserved_at_30[0x10];
+
+	u8         reserved_at_40[0xc];
+	u8         lane[0x4];
+	u8         reserved_at_50[0x8];
+	u8         error_type[0x8];
+};
+
+struct mlx5_ifc_pcap_reg_bits {
+	u8         reserved_at_0[0x8];
+	u8         local_port[0x8];
+	u8         reserved_at_10[0x10];
+
+	u8         port_capability_mask[4][0x20];
+};
+
+struct mlx5_ifc_paos_reg_bits {
+	u8         swid[0x8];
+	u8         local_port[0x8];
+	u8         reserved_at_10[0x4];
+	u8         admin_status[0x4];
+	u8         reserved_at_18[0x4];
+	u8         oper_status[0x4];
+
+	u8         ase[0x1];
+	u8         ee[0x1];
+	u8         reserved_at_22[0x1c];
+	u8         e[0x2];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_pamp_reg_bits {
+	u8         reserved_at_0[0x8];
+	u8         opamp_group[0x8];
+	u8         reserved_at_10[0xc];
+	u8         opamp_group_type[0x4];
+
+	u8         start_index[0x10];
+	u8         reserved_at_30[0x4];
+	u8         num_of_indices[0xc];
+
+	u8         index_data[18][0x10];
+};
+
+struct mlx5_ifc_pcmr_reg_bits {
+	u8         reserved_at_0[0x8];
+	u8         local_port[0x8];
+	u8         reserved_at_10[0x2e];
+	u8         fcs_cap[0x1];
+	u8         reserved_at_3f[0x1f];
+	u8         fcs_chk[0x1];
+	u8         reserved_at_5f[0x1];
+};
+
+struct mlx5_ifc_lane_2_module_mapping_bits {
+	u8         reserved_at_0[0x6];
+	u8         rx_lane[0x2];
+	u8         reserved_at_8[0x6];
+	u8         tx_lane[0x2];
+	u8         reserved_at_10[0x8];
+	u8         module[0x8];
+};
+
+struct mlx5_ifc_bufferx_reg_bits {
+	u8         reserved_at_0[0x6];
+	u8         lossy[0x1];
+	u8         epsb[0x1];
+	u8         reserved_at_8[0xc];
+	u8         size[0xc];
+
+	u8         xoff_threshold[0x10];
+	u8         xon_threshold[0x10];
+};
+
+struct mlx5_ifc_set_node_in_bits {
+	u8         node_description[64][0x8];
+};
+
+struct mlx5_ifc_register_power_settings_bits {
+	u8         reserved_at_0[0x18];
+	u8         power_settings_level[0x8];
+
+	u8         reserved_at_20[0x60];
+};
+
+struct mlx5_ifc_register_host_endianness_bits {
+	u8         he[0x1];
+	u8         reserved_at_1[0x1f];
+
+	u8         reserved_at_20[0x60];
+};
+
+struct mlx5_ifc_umr_pointer_desc_argument_bits {
+	u8         reserved_at_0[0x20];
+
+	u8         mkey[0x20];
+
+	u8         addressh_63_32[0x20];
+
+	u8         addressl_31_0[0x20];
+};
+
+struct mlx5_ifc_ud_adrs_vector_bits {
+	u8         dc_key[0x40];
+
+	u8         ext[0x1];
+	u8         reserved_at_41[0x7];
+	u8         destination_qp_dct[0x18];
+
+	u8         static_rate[0x4];
+	u8         sl_eth_prio[0x4];
+	u8         fl[0x1];
+	u8         mlid[0x7];
+	u8         rlid_udp_sport[0x10];
+
+	u8         reserved_at_80[0x20];
+
+	u8         rmac_47_16[0x20];
+
+	u8         rmac_15_0[0x10];
+	u8         tclass[0x8];
+	u8         hop_limit[0x8];
+
+	u8         reserved_at_e0[0x1];
+	u8         grh[0x1];
+	u8         reserved_at_e2[0x2];
+	u8         src_addr_index[0x8];
+	u8         flow_label[0x14];
+
+	u8         rgid_rip[16][0x8];
+};
+
+struct mlx5_ifc_pages_req_event_bits {
+	u8         reserved_at_0[0x10];
+	u8         function_id[0x10];
+
+	u8         num_pages[0x20];
+
+	u8         reserved_at_40[0xa0];
+};
+
+struct mlx5_ifc_eqe_bits {
+	u8         reserved_at_0[0x8];
+	u8         event_type[0x8];
+	u8         reserved_at_10[0x8];
+	u8         event_sub_type[0x8];
+
+	u8         reserved_at_20[0xe0];
+
+	union mlx5_ifc_event_auto_bits event_data;
+
+	u8         reserved_at_1e0[0x10];
+	u8         signature[0x8];
+	u8         reserved_at_1f8[0x7];
+	u8         owner[0x1];
+};
+
+enum {
+	MLX5_CMD_QUEUE_ENTRY_TYPE_PCIE_CMD_IF_TRANSPORT  = 0x7,
+};
+
+struct mlx5_ifc_cmd_queue_entry_bits {
+	u8         type[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         input_length[0x20];
+
+	u8         input_mailbox_pointer_63_32[0x20];
+
+	u8         input_mailbox_pointer_31_9[0x17];
+	u8         reserved_at_77[0x9];
+
+	u8         command_input_inline_data[16][0x8];
+
+	u8         command_output_inline_data[16][0x8];
+
+	u8         output_mailbox_pointer_63_32[0x20];
+
+	u8         output_mailbox_pointer_31_9[0x17];
+	u8         reserved_at_1b7[0x9];
+
+	u8         output_length[0x20];
+
+	u8         token[0x8];
+	u8         signature[0x8];
+	u8         reserved_at_1f0[0x8];
+	u8         status[0x7];
+	u8         ownership[0x1];
+};
+
+struct mlx5_ifc_cmd_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         command_output[0x20];
+};
+
+struct mlx5_ifc_cmd_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         command[0][0x20];
+};
+
+struct mlx5_ifc_cmd_if_box_bits {
+	u8         mailbox_data[512][0x8];
+
+	u8         reserved_at_1000[0x180];
+
+	u8         next_pointer_63_32[0x20];
+
+	u8         next_pointer_31_10[0x16];
+	u8         reserved_at_11b6[0xa];
+
+	u8         block_number[0x20];
+
+	u8         reserved_at_11e0[0x8];
+	u8         token[0x8];
+	u8         ctrl_signature[0x8];
+	u8         signature[0x8];
+};
+
+struct mlx5_ifc_mtt_bits {
+	u8         ptag_63_32[0x20];
+
+	u8         ptag_31_8[0x18];
+	u8         reserved_at_38[0x6];
+	u8         wr_en[0x1];
+	u8         rd_en[0x1];
+};
+
+struct mlx5_ifc_query_wol_rol_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x10];
+	u8         rol_mode[0x8];
+	u8         wol_mode[0x8];
+
+	u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_query_wol_rol_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_set_wol_rol_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_set_wol_rol_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         rol_mode_valid[0x1];
+	u8         wol_mode_valid[0x1];
+	u8         reserved_at_42[0xe];
+	u8         rol_mode[0x8];
+	u8         wol_mode[0x8];
+
+	u8         reserved_at_60[0x20];
+};
+
+enum {
+	MLX5_INITIAL_SEG_NIC_INTERFACE_FULL_DRIVER  = 0x0,
+	MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED     = 0x1,
+	MLX5_INITIAL_SEG_NIC_INTERFACE_NO_DRAM_NIC  = 0x2,
+};
+
+enum {
+	MLX5_INITIAL_SEG_NIC_INTERFACE_SUPPORTED_FULL_DRIVER  = 0x0,
+	MLX5_INITIAL_SEG_NIC_INTERFACE_SUPPORTED_DISABLED     = 0x1,
+	MLX5_INITIAL_SEG_NIC_INTERFACE_SUPPORTED_NO_DRAM_NIC  = 0x2,
+};
+
+enum {
+	MLX5_INITIAL_SEG_HEALTH_SYNDROME_FW_INTERNAL_ERR              = 0x1,
+	MLX5_INITIAL_SEG_HEALTH_SYNDROME_DEAD_IRISC                   = 0x7,
+	MLX5_INITIAL_SEG_HEALTH_SYNDROME_HW_FATAL_ERR                 = 0x8,
+	MLX5_INITIAL_SEG_HEALTH_SYNDROME_FW_CRC_ERR                   = 0x9,
+	MLX5_INITIAL_SEG_HEALTH_SYNDROME_ICM_FETCH_PCI_ERR            = 0xa,
+	MLX5_INITIAL_SEG_HEALTH_SYNDROME_ICM_PAGE_ERR                 = 0xb,
+	MLX5_INITIAL_SEG_HEALTH_SYNDROME_ASYNCHRONOUS_EQ_BUF_OVERRUN  = 0xc,
+	MLX5_INITIAL_SEG_HEALTH_SYNDROME_EQ_IN_ERR                    = 0xd,
+	MLX5_INITIAL_SEG_HEALTH_SYNDROME_EQ_INV                       = 0xe,
+	MLX5_INITIAL_SEG_HEALTH_SYNDROME_FFSER_ERR                    = 0xf,
+	MLX5_INITIAL_SEG_HEALTH_SYNDROME_HIGH_TEMP_ERR                = 0x10,
+};
+
+struct mlx5_ifc_initial_seg_bits {
+	u8         fw_rev_minor[0x10];
+	u8         fw_rev_major[0x10];
+
+	u8         cmd_interface_rev[0x10];
+	u8         fw_rev_subminor[0x10];
+
+	u8         reserved_at_40[0x40];
+
+	u8         cmdq_phy_addr_63_32[0x20];
+
+	u8         cmdq_phy_addr_31_12[0x14];
+	u8         reserved_at_b4[0x2];
+	u8         nic_interface[0x2];
+	u8         log_cmdq_size[0x4];
+	u8         log_cmdq_stride[0x4];
+
+	u8         command_doorbell_vector[0x20];
+
+	u8         reserved_at_e0[0xf00];
+
+	u8         initializing[0x1];
+	u8         reserved_at_fe1[0x4];
+	u8         nic_interface_supported[0x3];
+	u8         reserved_at_fe8[0x18];
+
+	struct mlx5_ifc_health_buffer_bits health_buffer;
+
+	u8         no_dram_nic_offset[0x20];
+
+	u8         reserved_at_1220[0x6e40];
+
+	u8         reserved_at_8060[0x1f];
+	u8         clear_int[0x1];
+
+	u8         health_syndrome[0x8];
+	u8         health_counter[0x18];
+
+	u8         reserved_at_80a0[0x17fc0];
+};
+
+union mlx5_ifc_ports_control_registers_document_bits {
+	struct mlx5_ifc_bufferx_reg_bits bufferx_reg;
+	struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout;
+	struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits eth_2863_cntrs_grp_data_layout;
+	struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits eth_3635_cntrs_grp_data_layout;
+	struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits eth_802_3_cntrs_grp_data_layout;
+	struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits eth_extended_cntrs_grp_data_layout;
+	struct mlx5_ifc_eth_per_prio_grp_data_layout_bits eth_per_prio_grp_data_layout;
+	struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits eth_per_traffic_grp_data_layout;
+	struct mlx5_ifc_lane_2_module_mapping_bits lane_2_module_mapping;
+	struct mlx5_ifc_pamp_reg_bits pamp_reg;
+	struct mlx5_ifc_paos_reg_bits paos_reg;
+	struct mlx5_ifc_pcap_reg_bits pcap_reg;
+	struct mlx5_ifc_peir_reg_bits peir_reg;
+	struct mlx5_ifc_pelc_reg_bits pelc_reg;
+	struct mlx5_ifc_pfcc_reg_bits pfcc_reg;
+	struct mlx5_ifc_ib_port_cntrs_grp_data_layout_bits ib_port_cntrs_grp_data_layout;
+	struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs;
+	struct mlx5_ifc_pifr_reg_bits pifr_reg;
+	struct mlx5_ifc_pipg_reg_bits pipg_reg;
+	struct mlx5_ifc_plbf_reg_bits plbf_reg;
+	struct mlx5_ifc_plib_reg_bits plib_reg;
+	struct mlx5_ifc_plpc_reg_bits plpc_reg;
+	struct mlx5_ifc_pmaos_reg_bits pmaos_reg;
+	struct mlx5_ifc_pmlp_reg_bits pmlp_reg;
+	struct mlx5_ifc_pmlpn_reg_bits pmlpn_reg;
+	struct mlx5_ifc_pmpc_reg_bits pmpc_reg;
+	struct mlx5_ifc_pmpe_reg_bits pmpe_reg;
+	struct mlx5_ifc_pmpr_reg_bits pmpr_reg;
+	struct mlx5_ifc_pmtu_reg_bits pmtu_reg;
+	struct mlx5_ifc_ppad_reg_bits ppad_reg;
+	struct mlx5_ifc_ppcnt_reg_bits ppcnt_reg;
+	struct mlx5_ifc_mpcnt_reg_bits mpcnt_reg;
+	struct mlx5_ifc_pplm_reg_bits pplm_reg;
+	struct mlx5_ifc_pplr_reg_bits pplr_reg;
+	struct mlx5_ifc_ppsc_reg_bits ppsc_reg;
+	struct mlx5_ifc_pqdr_reg_bits pqdr_reg;
+	struct mlx5_ifc_pspa_reg_bits pspa_reg;
+	struct mlx5_ifc_ptas_reg_bits ptas_reg;
+	struct mlx5_ifc_ptys_reg_bits ptys_reg;
+	struct mlx5_ifc_mlcr_reg_bits mlcr_reg;
+	struct mlx5_ifc_pude_reg_bits pude_reg;
+	struct mlx5_ifc_pvlc_reg_bits pvlc_reg;
+	struct mlx5_ifc_slrg_reg_bits slrg_reg;
+	struct mlx5_ifc_sltp_reg_bits sltp_reg;
+	u8         reserved_at_0[0x60e0];
+};
+
+union mlx5_ifc_debug_enhancements_document_bits {
+	struct mlx5_ifc_health_buffer_bits health_buffer;
+	u8         reserved_at_0[0x200];
+};
+
+union mlx5_ifc_uplink_pci_interface_document_bits {
+	struct mlx5_ifc_initial_seg_bits initial_seg;
+	u8         reserved_at_0[0x20060];
+};
+
+struct mlx5_ifc_set_flow_table_root_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_set_flow_table_root_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         other_vport[0x1];
+	u8         reserved_at_41[0xf];
+	u8         vport_number[0x10];
+
+	u8         reserved_at_60[0x20];
+
+	u8         table_type[0x8];
+	u8         reserved_at_88[0x18];
+
+	u8         reserved_at_a0[0x8];
+	u8         table_id[0x18];
+
+	u8         reserved_at_c0[0x140];
+};
+
+enum {
+	MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID     = (1UL << 0),
+	MLX5_MODIFY_FLOW_TABLE_LAG_NEXT_TABLE_ID = (1UL << 15),
+};
+
+struct mlx5_ifc_modify_flow_table_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_modify_flow_table_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         other_vport[0x1];
+	u8         reserved_at_41[0xf];
+	u8         vport_number[0x10];
+
+	u8         reserved_at_60[0x10];
+	u8         modify_field_select[0x10];
+
+	u8         table_type[0x8];
+	u8         reserved_at_88[0x18];
+
+	u8         reserved_at_a0[0x8];
+	u8         table_id[0x18];
+
+	u8         reserved_at_c0[0x4];
+	u8         table_miss_mode[0x4];
+	u8         reserved_at_c8[0x18];
+
+	u8         reserved_at_e0[0x8];
+	u8         table_miss_id[0x18];
+
+	u8         reserved_at_100[0x8];
+	u8         lag_master_next_table_id[0x18];
+
+	u8         reserved_at_120[0x80];
+};
+
+struct mlx5_ifc_ets_tcn_config_reg_bits {
+	u8         g[0x1];
+	u8         b[0x1];
+	u8         r[0x1];
+	u8         reserved_at_3[0x9];
+	u8         group[0x4];
+	u8         reserved_at_10[0x9];
+	u8         bw_allocation[0x7];
+
+	u8         reserved_at_20[0xc];
+	u8         max_bw_units[0x4];
+	u8         reserved_at_30[0x8];
+	u8         max_bw_value[0x8];
+};
+
+struct mlx5_ifc_ets_global_config_reg_bits {
+	u8         reserved_at_0[0x2];
+	u8         r[0x1];
+	u8         reserved_at_3[0x1d];
+
+	u8         reserved_at_20[0xc];
+	u8         max_bw_units[0x4];
+	u8         reserved_at_30[0x8];
+	u8         max_bw_value[0x8];
+};
+
+struct mlx5_ifc_qetc_reg_bits {
+	u8                                         reserved_at_0[0x8];
+	u8                                         port_number[0x8];
+	u8                                         reserved_at_10[0x30];
+
+	struct mlx5_ifc_ets_tcn_config_reg_bits    tc_configuration[0x8];
+	struct mlx5_ifc_ets_global_config_reg_bits global_configuration;
+};
+
+struct mlx5_ifc_qtct_reg_bits {
+	u8         reserved_at_0[0x8];
+	u8         port_number[0x8];
+	u8         reserved_at_10[0xd];
+	u8         prio[0x3];
+
+	u8         reserved_at_20[0x1d];
+	u8         tclass[0x3];
+};
+
+struct mlx5_ifc_mcia_reg_bits {
+	u8         l[0x1];
+	u8         reserved_at_1[0x7];
+	u8         module[0x8];
+	u8         reserved_at_10[0x8];
+	u8         status[0x8];
+
+	u8         i2c_device_address[0x8];
+	u8         page_number[0x8];
+	u8         device_address[0x10];
+
+	u8         reserved_at_40[0x10];
+	u8         size[0x10];
+
+	u8         reserved_at_60[0x20];
+
+	u8         dword_0[0x20];
+	u8         dword_1[0x20];
+	u8         dword_2[0x20];
+	u8         dword_3[0x20];
+	u8         dword_4[0x20];
+	u8         dword_5[0x20];
+	u8         dword_6[0x20];
+	u8         dword_7[0x20];
+	u8         dword_8[0x20];
+	u8         dword_9[0x20];
+	u8         dword_10[0x20];
+	u8         dword_11[0x20];
+};
+
+struct mlx5_ifc_dcbx_param_bits {
+	u8         dcbx_cee_cap[0x1];
+	u8         dcbx_ieee_cap[0x1];
+	u8         dcbx_standby_cap[0x1];
+	u8         reserved_at_0[0x5];
+	u8         port_number[0x8];
+	u8         reserved_at_10[0xa];
+	u8         max_application_table_size[6];
+	u8         reserved_at_20[0x15];
+	u8         version_oper[0x3];
+	u8         reserved_at_38[5];
+	u8         version_admin[0x3];
+	u8         willing_admin[0x1];
+	u8         reserved_at_41[0x3];
+	u8         pfc_cap_oper[0x4];
+	u8         reserved_at_48[0x4];
+	u8         pfc_cap_admin[0x4];
+	u8         reserved_at_50[0x4];
+	u8         num_of_tc_oper[0x4];
+	u8         reserved_at_58[0x4];
+	u8         num_of_tc_admin[0x4];
+	u8         remote_willing[0x1];
+	u8         reserved_at_61[3];
+	u8         remote_pfc_cap[4];
+	u8         reserved_at_68[0x14];
+	u8         remote_num_of_tc[0x4];
+	u8         reserved_at_80[0x18];
+	u8         error[0x8];
+	u8         reserved_at_a0[0x160];
+};
+
+struct mlx5_ifc_lagc_bits {
+	u8         reserved_at_0[0x1d];
+	u8         lag_state[0x3];
+
+	u8         reserved_at_20[0x14];
+	u8         tx_remap_affinity_2[0x4];
+	u8         reserved_at_38[0x4];
+	u8         tx_remap_affinity_1[0x4];
+};
+
+struct mlx5_ifc_create_lag_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_create_lag_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	struct mlx5_ifc_lagc_bits ctx;
+};
+
+struct mlx5_ifc_modify_lag_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_modify_lag_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x20];
+	u8         field_select[0x20];
+
+	struct mlx5_ifc_lagc_bits ctx;
+};
+
+struct mlx5_ifc_query_lag_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+
+	struct mlx5_ifc_lagc_bits ctx;
+};
+
+struct mlx5_ifc_query_lag_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_destroy_lag_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_destroy_lag_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_create_vport_lag_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_create_vport_lag_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_destroy_vport_lag_out_bits {
+	u8         status[0x8];
+	u8         reserved_at_8[0x18];
+
+	u8         syndrome[0x20];
+
+	u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_destroy_vport_lag_in_bits {
+	u8         opcode[0x10];
+	u8         reserved_at_10[0x10];
+
+	u8         reserved_at_20[0x10];
+	u8         op_mod[0x10];
+
+	u8         reserved_at_40[0x40];
+};
+
+#endif /* MLX5_IFC_H */
diff --git a/drivers/net/ethernet/mellanox/mlx5/lib/mr.c b/drivers/net/ethernet/mellanox/mlx5/lib/mr.c
new file mode 100644
index 0000000..f5eb120
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/lib/mr.c
@@ -0,0 +1,210 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include "lib/driver.h"
+#include "lib/cmd.h"
+#include "mlx5.h"
+
+void mlx5_init_mkey_table(struct mlx5_core_dev *dev)
+{
+	struct mlx5_mkey_table *table = &dev->priv.mkey_table;
+
+	memset(table, 0, sizeof(*table));
+	rwlock_init(&table->lock);
+	INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
+}
+
+void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev)
+{
+}
+
+int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
+			     struct mlx5_core_mkey *mkey,
+			     u32 *in, int inlen,
+			     u32 *out, int outlen,
+			     mlx5_cmd_cbk_t callback, void *context)
+{
+	struct mlx5_mkey_table *table = &dev->priv.mkey_table;
+	u32 lout[MLX5_ST_SZ_DW(create_mkey_out)] = {0};
+	u32 mkey_index;
+	void *mkc;
+	int err;
+	u8 key;
+
+	spin_lock_irq(&dev->priv.mkey_lock);
+	key = dev->priv.mkey_key++;
+	spin_unlock_irq(&dev->priv.mkey_lock);
+	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+
+	MLX5_SET(create_mkey_in, in, opcode, MLX5_CMD_OP_CREATE_MKEY);
+	MLX5_SET(mkc, mkc, mkey_7_0, key);
+
+	if (callback)
+		return mlx5_cmd_exec_cb(dev, in, inlen, out, outlen,
+					callback, context);
+
+	err = mlx5_cmd_exec(dev, in, inlen, lout, sizeof(lout));
+	if (err)
+		return err;
+
+	mkey_index = MLX5_GET(create_mkey_out, lout, mkey_index);
+	mkey->iova = MLX5_GET64(mkc, mkc, start_addr);
+	mkey->size = MLX5_GET64(mkc, mkc, len);
+	mkey->key = mlx5_idx_to_mkey(mkey_index) | key;
+	mkey->pd = MLX5_GET(mkc, mkc, pd);
+
+	mlx5_core_dbg(dev, "out 0x%x, key 0x%x, mkey 0x%x\n",
+		      mkey_index, key, mkey->key);
+
+	/* connect to mkey tree */
+	write_lock_irq(&table->lock);
+	err = radix_tree_insert(&table->tree, mlx5_base_mkey(mkey->key), mkey);
+	write_unlock_irq(&table->lock);
+	if (err) {
+		mlx5_core_warn(dev, "failed radix tree insert of mkey 0x%x, %d\n",
+			       mlx5_base_mkey(mkey->key), err);
+		mlx5_core_destroy_mkey(dev, mkey);
+	}
+
+	return err;
+}
+EXPORT_SYMBOL(mlx5_core_create_mkey_cb);
+
+int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
+			  struct mlx5_core_mkey *mkey,
+			  u32 *in, int inlen)
+{
+	return mlx5_core_create_mkey_cb(dev, mkey, in, inlen,
+					NULL, 0, NULL, NULL);
+}
+EXPORT_SYMBOL(mlx5_core_create_mkey);
+
+int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
+			   struct mlx5_core_mkey *mkey)
+{
+	struct mlx5_mkey_table *table = &dev->priv.mkey_table;
+	u32 out[MLX5_ST_SZ_DW(destroy_mkey_out)] = {0};
+	u32 in[MLX5_ST_SZ_DW(destroy_mkey_in)]   = {0};
+	struct mlx5_core_mkey *deleted_mkey;
+	unsigned long flags;
+
+	write_lock_irqsave(&table->lock, flags);
+	deleted_mkey = radix_tree_delete(&table->tree, mlx5_base_mkey(mkey->key));
+	write_unlock_irqrestore(&table->lock, flags);
+	if (!deleted_mkey) {
+		mlx5_core_warn(dev, "failed radix tree delete of mkey 0x%x\n",
+			       mlx5_base_mkey(mkey->key));
+		return -ENOENT;
+	}
+
+	MLX5_SET(destroy_mkey_in, in, opcode, MLX5_CMD_OP_DESTROY_MKEY);
+	MLX5_SET(destroy_mkey_in, in, mkey_index, mlx5_mkey_to_idx(mkey->key));
+	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+EXPORT_SYMBOL(mlx5_core_destroy_mkey);
+
+int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey,
+			 u32 *out, int outlen)
+{
+	u32 in[MLX5_ST_SZ_DW(query_mkey_in)] = {0};
+
+	memset(out, 0, outlen);
+	MLX5_SET(query_mkey_in, in, opcode, MLX5_CMD_OP_QUERY_MKEY);
+	MLX5_SET(query_mkey_in, in, mkey_index, mlx5_mkey_to_idx(mkey->key));
+	return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
+}
+EXPORT_SYMBOL(mlx5_core_query_mkey);
+
+int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *_mkey,
+			     u32 *mkey)
+{
+	u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {0};
+	u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)]   = {0};
+	int err;
+
+	MLX5_SET(query_special_contexts_in, in, opcode,
+		 MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
+	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+	if (!err)
+		*mkey = MLX5_GET(query_special_contexts_out, out,
+				 dump_fill_mkey);
+	return err;
+}
+EXPORT_SYMBOL(mlx5_core_dump_fill_mkey);
+
+static inline u32 mlx5_get_psv(u32 *out, int psv_index)
+{
+	switch (psv_index) {
+	case 1: return MLX5_GET(create_psv_out, out, psv1_index);
+	case 2: return MLX5_GET(create_psv_out, out, psv2_index);
+	case 3: return MLX5_GET(create_psv_out, out, psv3_index);
+	default: return MLX5_GET(create_psv_out, out, psv0_index);
+	}
+}
+
+int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn,
+			 int npsvs, u32 *sig_index)
+{
+	u32 out[MLX5_ST_SZ_DW(create_psv_out)] = {0};
+	u32 in[MLX5_ST_SZ_DW(create_psv_in)]   = {0};
+	int i, err;
+
+	if (npsvs > MLX5_MAX_PSVS)
+		return -EINVAL;
+
+	MLX5_SET(create_psv_in, in, opcode, MLX5_CMD_OP_CREATE_PSV);
+	MLX5_SET(create_psv_in, in, pd, pdn);
+	MLX5_SET(create_psv_in, in, num_psv, npsvs);
+
+	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+	if (err)
+		return err;
+
+	for (i = 0; i < npsvs; i++)
+		sig_index[i] = mlx5_get_psv(out, i);
+
+	return err;
+}
+EXPORT_SYMBOL(mlx5_core_create_psv);
+
+int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num)
+{
+	u32 out[MLX5_ST_SZ_DW(destroy_psv_out)] = {0};
+	u32 in[MLX5_ST_SZ_DW(destroy_psv_in)]   = {0};
+
+	MLX5_SET(destroy_psv_in, in, opcode, MLX5_CMD_OP_DESTROY_PSV);
+	MLX5_SET(destroy_psv_in, in, psvn, psv_num);
+	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+EXPORT_SYMBOL(mlx5_core_destroy_psv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/lib/pd.c b/drivers/net/ethernet/mellanox/mlx5/lib/pd.c
new file mode 100644
index 0000000..1a55fc3
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/lib/pd.c
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include "lib/driver.h"
+#include "lib/cmd.h"
+#include "mlx5.h"
+
+int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn)
+{
+	u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {0};
+	u32 in[MLX5_ST_SZ_DW(alloc_pd_in)]   = {0};
+	int err;
+
+	MLX5_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD);
+	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+	if (!err)
+		*pdn = MLX5_GET(alloc_pd_out, out, pd);
+	return err;
+}
+EXPORT_SYMBOL(mlx5_core_alloc_pd);
+
+int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn)
+{
+	u32 out[MLX5_ST_SZ_DW(dealloc_pd_out)] = {0};
+	u32 in[MLX5_ST_SZ_DW(dealloc_pd_in)]   = {0};
+
+	MLX5_SET(dealloc_pd_in, in, opcode, MLX5_CMD_OP_DEALLOC_PD);
+	MLX5_SET(dealloc_pd_in, in, pd, pdn);
+	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+EXPORT_SYMBOL(mlx5_core_dealloc_pd);
diff --git a/drivers/net/ethernet/mellanox/mlx5/lib/port.c b/drivers/net/ethernet/mellanox/mlx5/lib/port.c
new file mode 100644
index 0000000..d6bb70d
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/lib/port.c
@@ -0,0 +1,868 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include "lib/driver.h"
+#include "lib/port.h"
+#include "lib/cmd.h"
+#include "mlx5.h"
+
+int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
+			 int size_in, void *data_out, int size_out,
+			 u16 reg_id, int arg, int write)
+{
+	int outlen = MLX5_ST_SZ_BYTES(access_register_out) + size_out;
+	int inlen = MLX5_ST_SZ_BYTES(access_register_in) + size_in;
+	int err = -ENOMEM;
+	u32 *out = NULL;
+	u32 *in = NULL;
+	void *data;
+
+	in = mlx5_vzalloc(inlen);
+	out = mlx5_vzalloc(outlen);
+	if (!in || !out)
+		goto out;
+
+	data = MLX5_ADDR_OF(access_register_in, in, register_data);
+	memcpy(data, data_in, size_in);
+
+	MLX5_SET(access_register_in, in, opcode, MLX5_CMD_OP_ACCESS_REG);
+	MLX5_SET(access_register_in, in, op_mod, !write);
+	MLX5_SET(access_register_in, in, argument, arg);
+	MLX5_SET(access_register_in, in, register_id, reg_id);
+
+	err = mlx5_cmd_exec(dev, in, inlen, out, outlen);
+	if (err)
+		goto out;
+
+	data = MLX5_ADDR_OF(access_register_out, out, register_data);
+	memcpy(data_out, data, size_out);
+
+out:
+	kvfree(out);
+	kvfree(in);
+	return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_core_access_reg);
+
+struct mlx5_reg_pcap {
+	u8			rsvd0;
+	u8			port_num;
+	u8			rsvd1[2];
+	__be32			caps_127_96;
+	__be32			caps_95_64;
+	__be32			caps_63_32;
+	__be32			caps_31_0;
+};
+
+int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps)
+{
+	struct mlx5_reg_pcap in;
+	struct mlx5_reg_pcap out;
+
+	memset(&in, 0, sizeof(in));
+	in.caps_127_96 = cpu_to_be32(caps);
+	in.port_num = port_num;
+
+	return mlx5_core_access_reg(dev, &in, sizeof(in), &out,
+				    sizeof(out), MLX5_REG_PCAP, 0, 1);
+}
+EXPORT_SYMBOL_GPL(mlx5_set_port_caps);
+
+int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
+			 int ptys_size, int proto_mask, u8 local_port)
+{
+	u32 in[MLX5_ST_SZ_DW(ptys_reg)] = {0};
+
+	MLX5_SET(ptys_reg, in, local_port, local_port);
+	MLX5_SET(ptys_reg, in, proto_mask, proto_mask);
+	return mlx5_core_access_reg(dev, in, sizeof(in), ptys,
+				    ptys_size, MLX5_REG_PTYS, 0, 0);
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_ptys);
+
+int mlx5_set_port_beacon(struct mlx5_core_dev *dev, u16 beacon_duration)
+{
+	u32 in[MLX5_ST_SZ_DW(mlcr_reg)]  = {0};
+	u32 out[MLX5_ST_SZ_DW(mlcr_reg)];
+
+	MLX5_SET(mlcr_reg, in, local_port, 1);
+	MLX5_SET(mlcr_reg, in, beacon_duration, beacon_duration);
+	return mlx5_core_access_reg(dev, in, sizeof(in), out,
+				    sizeof(out), MLX5_REG_MLCR, 0, 1);
+}
+
+int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev,
+			      u32 *proto_cap, int proto_mask)
+{
+	u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+	int err;
+
+	err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask, 1);
+	if (err)
+		return err;
+
+	if (proto_mask == MLX5_PTYS_EN)
+		*proto_cap = MLX5_GET(ptys_reg, out, eth_proto_capability);
+	else
+		*proto_cap = MLX5_GET(ptys_reg, out, ib_proto_capability);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_proto_cap);
+
+int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev,
+				u32 *proto_admin, int proto_mask)
+{
+	u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+	int err;
+
+	err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask, 1);
+	if (err)
+		return err;
+
+	if (proto_mask == MLX5_PTYS_EN)
+		*proto_admin = MLX5_GET(ptys_reg, out, eth_proto_admin);
+	else
+		*proto_admin = MLX5_GET(ptys_reg, out, ib_proto_admin);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_proto_admin);
+
+int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev,
+				    u8 *link_width_oper, u8 local_port)
+{
+	u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+	int err;
+
+	err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_IB, local_port);
+	if (err)
+		return err;
+
+	*link_width_oper = MLX5_GET(ptys_reg, out, ib_link_width_oper);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_link_width_oper);
+
+int mlx5_query_port_eth_proto_oper(struct mlx5_core_dev *dev,
+				   u32 *proto_oper, u8 local_port)
+{
+	u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+	int err;
+
+	err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_EN,
+				   local_port);
+	if (err)
+		return err;
+
+	*proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper);
+
+	return 0;
+}
+EXPORT_SYMBOL(mlx5_query_port_eth_proto_oper);
+
+int mlx5_query_port_ib_proto_oper(struct mlx5_core_dev *dev,
+				  u8 *proto_oper, u8 local_port)
+{
+	u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+	int err;
+
+	err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_IB,
+				   local_port);
+	if (err)
+		return err;
+
+	*proto_oper = MLX5_GET(ptys_reg, out, ib_proto_oper);
+
+	return 0;
+}
+EXPORT_SYMBOL(mlx5_query_port_ib_proto_oper);
+
+int mlx5_set_port_ptys(struct mlx5_core_dev *dev, bool an_disable,
+		       u32 proto_admin, int proto_mask)
+{
+	u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+	u32 in[MLX5_ST_SZ_DW(ptys_reg)];
+	u8 an_disable_admin;
+	u8 an_disable_cap;
+	u8 an_status;
+
+	mlx5_query_port_autoneg(dev, proto_mask, &an_status,
+				&an_disable_cap, &an_disable_admin);
+	if (!an_disable_cap && an_disable)
+		return -EPERM;
+
+	memset(in, 0, sizeof(in));
+
+	MLX5_SET(ptys_reg, in, local_port, 1);
+	MLX5_SET(ptys_reg, in, an_disable_admin, an_disable);
+	MLX5_SET(ptys_reg, in, proto_mask, proto_mask);
+	if (proto_mask == MLX5_PTYS_EN)
+		MLX5_SET(ptys_reg, in, eth_proto_admin, proto_admin);
+	else
+		MLX5_SET(ptys_reg, in, ib_proto_admin, proto_admin);
+
+	return mlx5_core_access_reg(dev, in, sizeof(in), out,
+				    sizeof(out), MLX5_REG_PTYS, 0, 1);
+}
+EXPORT_SYMBOL_GPL(mlx5_set_port_ptys);
+
+/* This function should be used after setting a port register only */
+void mlx5_toggle_port_link(struct mlx5_core_dev *dev)
+{
+	enum mlx5_port_status ps;
+
+	mlx5_query_port_admin_status(dev, &ps);
+	mlx5_set_port_admin_status(dev, MLX5_PORT_DOWN);
+	if (ps == MLX5_PORT_UP)
+		mlx5_set_port_admin_status(dev, MLX5_PORT_UP);
+}
+EXPORT_SYMBOL_GPL(mlx5_toggle_port_link);
+
+int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
+			       enum mlx5_port_status status)
+{
+	u32 in[MLX5_ST_SZ_DW(paos_reg)] = {0};
+	u32 out[MLX5_ST_SZ_DW(paos_reg)];
+
+	MLX5_SET(paos_reg, in, local_port, 1);
+	MLX5_SET(paos_reg, in, admin_status, status);
+	MLX5_SET(paos_reg, in, ase, 1);
+	return mlx5_core_access_reg(dev, in, sizeof(in), out,
+				    sizeof(out), MLX5_REG_PAOS, 0, 1);
+}
+EXPORT_SYMBOL_GPL(mlx5_set_port_admin_status);
+
+int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
+				 enum mlx5_port_status *status)
+{
+	u32 in[MLX5_ST_SZ_DW(paos_reg)] = {0};
+	u32 out[MLX5_ST_SZ_DW(paos_reg)];
+	int err;
+
+	MLX5_SET(paos_reg, in, local_port, 1);
+	err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+				   sizeof(out), MLX5_REG_PAOS, 0, 0);
+	if (err)
+		return err;
+	*status = MLX5_GET(paos_reg, out, admin_status);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_admin_status);
+
+static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, u16 *admin_mtu,
+				u16 *max_mtu, u16 *oper_mtu, u8 port)
+{
+	u32 in[MLX5_ST_SZ_DW(pmtu_reg)] = {0};
+	u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
+
+	MLX5_SET(pmtu_reg, in, local_port, port);
+	mlx5_core_access_reg(dev, in, sizeof(in), out,
+			     sizeof(out), MLX5_REG_PMTU, 0, 0);
+
+	if (max_mtu)
+		*max_mtu  = MLX5_GET(pmtu_reg, out, max_mtu);
+	if (oper_mtu)
+		*oper_mtu = MLX5_GET(pmtu_reg, out, oper_mtu);
+	if (admin_mtu)
+		*admin_mtu = MLX5_GET(pmtu_reg, out, admin_mtu);
+}
+
+int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port)
+{
+	u32 in[MLX5_ST_SZ_DW(pmtu_reg)] = {0};
+	u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
+
+	MLX5_SET(pmtu_reg, in, admin_mtu, mtu);
+	MLX5_SET(pmtu_reg, in, local_port, port);
+	return mlx5_core_access_reg(dev, in, sizeof(in), out,
+				   sizeof(out), MLX5_REG_PMTU, 0, 1);
+}
+EXPORT_SYMBOL_GPL(mlx5_set_port_mtu);
+
+void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu,
+			     u8 port)
+{
+	mlx5_query_port_mtu(dev, NULL, max_mtu, NULL, port);
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_max_mtu);
+
+void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu,
+			      u8 port)
+{
+	mlx5_query_port_mtu(dev, NULL, NULL, oper_mtu, port);
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_oper_mtu);
+
+static int mlx5_query_module_num(struct mlx5_core_dev *dev, int *module_num)
+{
+	u32 in[MLX5_ST_SZ_DW(pmlp_reg)] = {0};
+	u32 out[MLX5_ST_SZ_DW(pmlp_reg)];
+	int module_mapping;
+	int err;
+
+	MLX5_SET(pmlp_reg, in, local_port, 1);
+	err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out),
+				   MLX5_REG_PMLP, 0, 0);
+	if (err)
+		return err;
+
+	module_mapping = MLX5_GET(pmlp_reg, out, lane0_module_mapping);
+	*module_num = module_mapping & MLX5_EEPROM_IDENTIFIER_BYTE_MASK;
+
+	return 0;
+}
+
+int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
+			     u16 offset, u16 size, u8 *data)
+{
+	u32 out[MLX5_ST_SZ_DW(mcia_reg)];
+	u32 in[MLX5_ST_SZ_DW(mcia_reg)];
+	int module_num;
+	u16 i2c_addr;
+	int status;
+	int err;
+	void *ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0);
+
+	err = mlx5_query_module_num(dev, &module_num);
+	if (err)
+		return err;
+
+	memset(in, 0, sizeof(in));
+	size = min_t(int, size, MLX5_EEPROM_MAX_BYTES);
+
+	if (offset < MLX5_EEPROM_PAGE_LENGTH &&
+	    offset + size > MLX5_EEPROM_PAGE_LENGTH)
+		/* Cross pages read, read until offset 256 in low page */
+		size -= offset + size - MLX5_EEPROM_PAGE_LENGTH;
+
+	i2c_addr = MLX5_I2C_ADDR_LOW;
+	if (offset >= MLX5_EEPROM_PAGE_LENGTH) {
+		i2c_addr = MLX5_I2C_ADDR_HIGH;
+		offset -= MLX5_EEPROM_PAGE_LENGTH;
+	}
+
+	MLX5_SET(mcia_reg, in, l, 0);
+	MLX5_SET(mcia_reg, in, module, module_num);
+	MLX5_SET(mcia_reg, in, i2c_device_address, i2c_addr);
+	MLX5_SET(mcia_reg, in, page_number, 0);
+	MLX5_SET(mcia_reg, in, device_address, offset);
+	MLX5_SET(mcia_reg, in, size, size);
+
+	err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+				   sizeof(out), MLX5_REG_MCIA, 0, 0);
+	if (err)
+		return err;
+
+	status = MLX5_GET(mcia_reg, out, status);
+	if (status) {
+		mlx5_core_err(dev, "query_mcia_reg failed: status: 0x%x\n",
+			      status);
+		return -EIO;
+	}
+
+	memcpy(data, ptr, size);
+
+	return size;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_module_eeprom);
+
+static int mlx5_query_port_pvlc(struct mlx5_core_dev *dev, u32 *pvlc,
+				int pvlc_size,  u8 local_port)
+{
+	u32 in[MLX5_ST_SZ_DW(pvlc_reg)] = {0};
+
+	MLX5_SET(pvlc_reg, in, local_port, local_port);
+	return mlx5_core_access_reg(dev, in, sizeof(in), pvlc,
+				    pvlc_size, MLX5_REG_PVLC, 0, 0);
+}
+
+int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
+			      u8 *vl_hw_cap, u8 local_port)
+{
+	u32 out[MLX5_ST_SZ_DW(pvlc_reg)];
+	int err;
+
+	err = mlx5_query_port_pvlc(dev, out, sizeof(out), local_port);
+	if (err)
+		return err;
+
+	*vl_hw_cap = MLX5_GET(pvlc_reg, out, vl_hw_cap);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_vl_hw_cap);
+
+int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev,
+			     u8 port_num, void *out, size_t sz)
+{
+	u32 *in;
+	int err;
+
+	in  = mlx5_vzalloc(sz);
+	if (!in) {
+		err = -ENOMEM;
+		return err;
+	}
+
+	MLX5_SET(ppcnt_reg, in, local_port, port_num);
+
+	MLX5_SET(ppcnt_reg, in, grp, MLX5_INFINIBAND_PORT_COUNTERS_GROUP);
+	err = mlx5_core_access_reg(dev, in, sz, out,
+				   sz, MLX5_REG_PPCNT, 0, 0);
+
+	kvfree(in);
+	return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_core_query_ib_ppcnt);
+
+int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause)
+{
+	u32 in[MLX5_ST_SZ_DW(pfcc_reg)] = {0};
+	u32 out[MLX5_ST_SZ_DW(pfcc_reg)];
+
+	MLX5_SET(pfcc_reg, in, local_port, 1);
+	MLX5_SET(pfcc_reg, in, pptx, tx_pause);
+	MLX5_SET(pfcc_reg, in, pprx, rx_pause);
+
+	return mlx5_core_access_reg(dev, in, sizeof(in), out,
+				    sizeof(out), MLX5_REG_PFCC, 0, 1);
+}
+EXPORT_SYMBOL_GPL(mlx5_set_port_pause);
+
+int mlx5_query_port_pause(struct mlx5_core_dev *dev,
+			  u32 *rx_pause, u32 *tx_pause)
+{
+	u32 in[MLX5_ST_SZ_DW(pfcc_reg)] = {0};
+	u32 out[MLX5_ST_SZ_DW(pfcc_reg)];
+	int err;
+
+	MLX5_SET(pfcc_reg, in, local_port, 1);
+	err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+				   sizeof(out), MLX5_REG_PFCC, 0, 0);
+	if (err)
+		return err;
+
+	if (rx_pause)
+		*rx_pause = MLX5_GET(pfcc_reg, out, pprx);
+
+	if (tx_pause)
+		*tx_pause = MLX5_GET(pfcc_reg, out, pptx);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_pause);
+
+int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx)
+{
+	u32 in[MLX5_ST_SZ_DW(pfcc_reg)] = {0};
+	u32 out[MLX5_ST_SZ_DW(pfcc_reg)];
+
+	MLX5_SET(pfcc_reg, in, local_port, 1);
+	MLX5_SET(pfcc_reg, in, pfctx, pfc_en_tx);
+	MLX5_SET(pfcc_reg, in, pfcrx, pfc_en_rx);
+	MLX5_SET_TO_ONES(pfcc_reg, in, prio_mask_tx);
+	MLX5_SET_TO_ONES(pfcc_reg, in, prio_mask_rx);
+
+	return mlx5_core_access_reg(dev, in, sizeof(in), out,
+				    sizeof(out), MLX5_REG_PFCC, 0, 1);
+}
+EXPORT_SYMBOL_GPL(mlx5_set_port_pfc);
+
+int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, u8 *pfc_en_rx)
+{
+	u32 in[MLX5_ST_SZ_DW(pfcc_reg)] = {0};
+	u32 out[MLX5_ST_SZ_DW(pfcc_reg)];
+	int err;
+
+	MLX5_SET(pfcc_reg, in, local_port, 1);
+	err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+				   sizeof(out), MLX5_REG_PFCC, 0, 0);
+	if (err)
+		return err;
+
+	if (pfc_en_tx)
+		*pfc_en_tx = MLX5_GET(pfcc_reg, out, pfctx);
+
+	if (pfc_en_rx)
+		*pfc_en_rx = MLX5_GET(pfcc_reg, out, pfcrx);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_pfc);
+
+void mlx5_query_port_autoneg(struct mlx5_core_dev *dev, int proto_mask,
+			     u8 *an_status,
+			     u8 *an_disable_cap, u8 *an_disable_admin)
+{
+	u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+
+	*an_status = 0;
+	*an_disable_cap = 0;
+	*an_disable_admin = 0;
+
+	if (mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask, 1))
+		return;
+
+	*an_status = MLX5_GET(ptys_reg, out, an_status);
+	*an_disable_cap = MLX5_GET(ptys_reg, out, an_disable_cap);
+	*an_disable_admin = MLX5_GET(ptys_reg, out, an_disable_admin);
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_autoneg);
+
+int mlx5_max_tc(struct mlx5_core_dev *mdev)
+{
+	u8 num_tc = MLX5_CAP_GEN(mdev, max_tc) ? : 8;
+
+	return num_tc - 1;
+}
+
+int mlx5_query_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *out)
+{
+	u32 in[MLX5_ST_SZ_DW(dcbx_param)] = {0};
+
+	MLX5_SET(dcbx_param, in, port_number, 1);
+
+	return  mlx5_core_access_reg(mdev, in, sizeof(in), out,
+				    sizeof(in), MLX5_REG_DCBX_PARAM, 0, 0);
+}
+
+int mlx5_set_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *in)
+{
+	u32 out[MLX5_ST_SZ_DW(dcbx_param)];
+
+	MLX5_SET(dcbx_param, in, port_number, 1);
+
+	return mlx5_core_access_reg(mdev, in, sizeof(out), out,
+				    sizeof(out), MLX5_REG_DCBX_PARAM, 0, 1);
+}
+
+int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc)
+{
+	u32 in[MLX5_ST_SZ_DW(qtct_reg)] = {0};
+	u32 out[MLX5_ST_SZ_DW(qtct_reg)];
+	int err;
+	int i;
+
+	for (i = 0; i < 8; i++) {
+		if (prio_tc[i] > mlx5_max_tc(mdev))
+			return -EINVAL;
+
+		MLX5_SET(qtct_reg, in, prio, i);
+		MLX5_SET(qtct_reg, in, tclass, prio_tc[i]);
+
+		err = mlx5_core_access_reg(mdev, in, sizeof(in), out,
+					   sizeof(out), MLX5_REG_QTCT, 0, 1);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_set_port_prio_tc);
+
+int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev,
+			    u8 prio, u8 *tc)
+{
+	u32 in[MLX5_ST_SZ_DW(qtct_reg)];
+	u32 out[MLX5_ST_SZ_DW(qtct_reg)];
+	int err;
+
+	memset(in, 0, sizeof(in));
+	memset(out, 0, sizeof(out));
+
+	MLX5_SET(qtct_reg, in, port_number, 1);
+	MLX5_SET(qtct_reg, in, prio, prio);
+
+	err = mlx5_core_access_reg(mdev, in, sizeof(in), out,
+				   sizeof(out), MLX5_REG_QTCT, 0, 0);
+	if (!err)
+		*tc = MLX5_GET(qtct_reg, out, tclass);
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_prio_tc);
+
+static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in,
+				   int inlen)
+{
+	u32 out[MLX5_ST_SZ_DW(qtct_reg)];
+
+	if (!MLX5_CAP_GEN(mdev, ets))
+		return -ENOTSUPP;
+
+	return mlx5_core_access_reg(mdev, in, inlen, out, sizeof(out),
+				    MLX5_REG_QETCR, 0, 1);
+}
+
+static int mlx5_query_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *out,
+				     int outlen)
+{
+	u32 in[MLX5_ST_SZ_DW(qtct_reg)];
+
+	if (!MLX5_CAP_GEN(mdev, ets))
+		return -ENOTSUPP;
+
+	memset(in, 0, sizeof(in));
+	return mlx5_core_access_reg(mdev, in, sizeof(in), out, outlen,
+				    MLX5_REG_QETCR, 0, 0);
+}
+
+int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group)
+{
+	u32 in[MLX5_ST_SZ_DW(qetc_reg)] = {0};
+	int i;
+
+	for (i = 0; i <= mlx5_max_tc(mdev); i++) {
+		MLX5_SET(qetc_reg, in, tc_configuration[i].g, 1);
+		MLX5_SET(qetc_reg, in, tc_configuration[i].group, tc_group[i]);
+	}
+
+	return mlx5_set_port_qetcr_reg(mdev, in, sizeof(in));
+}
+EXPORT_SYMBOL_GPL(mlx5_set_port_tc_group);
+
+int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw)
+{
+	u32 in[MLX5_ST_SZ_DW(qetc_reg)] = {0};
+	int i;
+
+	for (i = 0; i <= mlx5_max_tc(mdev); i++) {
+		MLX5_SET(qetc_reg, in, tc_configuration[i].b, 1);
+		MLX5_SET(qetc_reg, in, tc_configuration[i].bw_allocation, tc_bw[i]);
+	}
+
+	return mlx5_set_port_qetcr_reg(mdev, in, sizeof(in));
+}
+EXPORT_SYMBOL_GPL(mlx5_set_port_tc_bw_alloc);
+
+int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev,
+				u8 tc, u8 *bw_pct)
+{
+	u32 out[MLX5_ST_SZ_DW(qetc_reg)];
+	void *ets_tcn_conf;
+	int err;
+
+	err = mlx5_query_port_qetcr_reg(mdev, out, sizeof(out));
+	if (err)
+		return err;
+
+	ets_tcn_conf = MLX5_ADDR_OF(qetc_reg, out,
+				    tc_configuration[tc]);
+
+	*bw_pct = MLX5_GET(ets_tcn_config_reg, ets_tcn_conf,
+			   bw_allocation);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_tc_bw_alloc);
+
+int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev,
+				    u8 *max_bw_value,
+				    u8 *max_bw_units)
+{
+	u32 in[MLX5_ST_SZ_DW(qetc_reg)] = {0};
+	void *ets_tcn_conf;
+	int i;
+
+	MLX5_SET(qetc_reg, in, port_number, 1);
+
+	for (i = 0; i <= mlx5_max_tc(mdev); i++) {
+		ets_tcn_conf = MLX5_ADDR_OF(qetc_reg, in, tc_configuration[i]);
+
+		MLX5_SET(ets_tcn_config_reg, ets_tcn_conf, r, 1);
+		MLX5_SET(ets_tcn_config_reg, ets_tcn_conf, max_bw_units,
+			 max_bw_units[i]);
+		MLX5_SET(ets_tcn_config_reg, ets_tcn_conf, max_bw_value,
+			 max_bw_value[i]);
+	}
+
+	return mlx5_set_port_qetcr_reg(mdev, in, sizeof(in));
+}
+EXPORT_SYMBOL_GPL(mlx5_modify_port_ets_rate_limit);
+
+int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev,
+				   u8 *max_bw_value,
+				   u8 *max_bw_units)
+{
+	u32 out[MLX5_ST_SZ_DW(qetc_reg)];
+	void *ets_tcn_conf;
+	int err;
+	int i;
+
+	err = mlx5_query_port_qetcr_reg(mdev, out, sizeof(out));
+	if (err)
+		return err;
+
+	for (i = 0; i <= mlx5_max_tc(mdev); i++) {
+		ets_tcn_conf = MLX5_ADDR_OF(qetc_reg, out, tc_configuration[i]);
+
+		max_bw_value[i] = MLX5_GET(ets_tcn_config_reg, ets_tcn_conf,
+					   max_bw_value);
+		max_bw_units[i] = MLX5_GET(ets_tcn_config_reg, ets_tcn_conf,
+					   max_bw_units);
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_ets_rate_limit);
+
+int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode)
+{
+	u32 in[MLX5_ST_SZ_DW(set_wol_rol_in)]   = {0};
+	u32 out[MLX5_ST_SZ_DW(set_wol_rol_out)] = {0};
+
+	MLX5_SET(set_wol_rol_in, in, opcode, MLX5_CMD_OP_SET_WOL_ROL);
+	MLX5_SET(set_wol_rol_in, in, wol_mode_valid, 1);
+	MLX5_SET(set_wol_rol_in, in, wol_mode, wol_mode);
+	return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+}
+EXPORT_SYMBOL_GPL(mlx5_set_port_wol);
+
+int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode)
+{
+	u32 in[MLX5_ST_SZ_DW(query_wol_rol_in)]   = {0};
+	u32 out[MLX5_ST_SZ_DW(query_wol_rol_out)] = {0};
+	int err;
+
+	MLX5_SET(query_wol_rol_in, in, opcode, MLX5_CMD_OP_QUERY_WOL_ROL);
+	err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+	if (!err)
+		*wol_mode = MLX5_GET(query_wol_rol_out, out, wol_mode);
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_wol);
+
+static int mlx5_query_ports_check(struct mlx5_core_dev *mdev, u32 *out,
+				  int outlen)
+{
+	u32 in[MLX5_ST_SZ_DW(pcmr_reg)] = {0};
+
+	MLX5_SET(pcmr_reg, in, local_port, 1);
+	return mlx5_core_access_reg(mdev, in, sizeof(in), out,
+				    outlen, MLX5_REG_PCMR, 0, 0);
+}
+
+static int mlx5_set_ports_check(struct mlx5_core_dev *mdev, u32 *in, int inlen)
+{
+	u32 out[MLX5_ST_SZ_DW(pcmr_reg)];
+
+	return mlx5_core_access_reg(mdev, in, inlen, out,
+				    sizeof(out), MLX5_REG_PCMR, 0, 1);
+}
+
+int mlx5_set_port_fcs(struct mlx5_core_dev *mdev, u8 enable)
+{
+	u32 in[MLX5_ST_SZ_DW(pcmr_reg)] = {0};
+
+	MLX5_SET(pcmr_reg, in, local_port, 1);
+	MLX5_SET(pcmr_reg, in, fcs_chk, enable);
+	return mlx5_set_ports_check(mdev, in, sizeof(in));
+}
+
+void mlx5_query_port_fcs(struct mlx5_core_dev *mdev, bool *supported,
+			 bool *enabled)
+{
+	u32 out[MLX5_ST_SZ_DW(pcmr_reg)];
+	/* Default values for FW which do not support MLX5_REG_PCMR */
+	*supported = false;
+	*enabled = true;
+
+	if (!MLX5_CAP_GEN(mdev, ports_check))
+		return;
+
+	if (mlx5_query_ports_check(mdev, out, sizeof(out)))
+		return;
+
+	*supported = !!(MLX5_GET(pcmr_reg, out, fcs_cap));
+	*enabled = !!(MLX5_GET(pcmr_reg, out, fcs_chk));
+}
+
+static const char *mlx5_pme_status[MLX5_MODULE_STATUS_NUM] = {
+	"Cable plugged",   /* MLX5_MODULE_STATUS_PLUGGED    = 0x1 */
+	"Cable unplugged", /* MLX5_MODULE_STATUS_UNPLUGGED  = 0x2 */
+	"Cable error",     /* MLX5_MODULE_STATUS_ERROR      = 0x3 */
+};
+
+static const char *mlx5_pme_error[MLX5_MODULE_EVENT_ERROR_NUM] = {
+	"Power budget exceeded",
+	"Long Range for non MLNX cable",
+	"Bus stuck(I2C or data shorted)",
+	"No EEPROM/retry timeout",
+	"Enforce part number list",
+	"Unknown identifier",
+	"High Temperature",
+	"Bad or shorted cable/module",
+	"Unknown status",
+};
+
+void mlx5_port_module_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe)
+{
+	enum port_module_event_status_type module_status;
+	enum port_module_event_error_type error_type;
+	struct mlx5_eqe_port_module *module_event_eqe;
+	struct mlx5_priv *priv = &dev->priv;
+	u8 module_num;
+
+	module_event_eqe = &eqe->data.port_module;
+	module_num = module_event_eqe->module;
+	module_status = module_event_eqe->module_status &
+			PORT_MODULE_EVENT_MODULE_STATUS_MASK;
+	error_type = module_event_eqe->error_type &
+		     PORT_MODULE_EVENT_ERROR_TYPE_MASK;
+
+	if (module_status < MLX5_MODULE_STATUS_ERROR) {
+		priv->pme_stats.status_counters[module_status - 1]++;
+	} else if (module_status == MLX5_MODULE_STATUS_ERROR) {
+		if (error_type >= MLX5_MODULE_EVENT_ERROR_UNKNOWN)
+			/* Unknown error type */
+			error_type = MLX5_MODULE_EVENT_ERROR_UNKNOWN;
+		priv->pme_stats.error_counters[error_type]++;
+	}
+
+	if (!printk_ratelimit())
+		return;
+
+	if (module_status < MLX5_MODULE_STATUS_ERROR)
+		mlx5_core_info(dev,
+			       "Port module event: module %u, %s\n",
+			       module_num, mlx5_pme_status[module_status - 1]);
+
+	else if (module_status == MLX5_MODULE_STATUS_ERROR)
+		mlx5_core_info(dev,
+			       "Port module event[error]: module %u, %s, %s\n",
+			       module_num, mlx5_pme_status[module_status - 1],
+			       mlx5_pme_error[error_type]);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/lib/port.h b/drivers/net/ethernet/mellanox/mlx5/lib/port.h
new file mode 100644
index 0000000..f9fff17
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/lib/port.h
@@ -0,0 +1,167 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __MLX5_PORT_H__
+#define __MLX5_PORT_H__
+
+#include "driver.h"
+
+enum mlx5_beacon_duration {
+	MLX5_BEACON_DURATION_OFF = 0x0,
+	MLX5_BEACON_DURATION_INF = 0xffff,
+};
+
+enum mlx5_module_id {
+	MLX5_MODULE_ID_SFP              = 0x3,
+	MLX5_MODULE_ID_QSFP             = 0xC,
+	MLX5_MODULE_ID_QSFP_PLUS        = 0xD,
+	MLX5_MODULE_ID_QSFP28           = 0x11,
+};
+
+enum mlx5_an_status {
+	MLX5_AN_UNAVAILABLE = 0,
+	MLX5_AN_COMPLETE    = 1,
+	MLX5_AN_FAILED      = 2,
+	MLX5_AN_LINK_UP     = 3,
+	MLX5_AN_LINK_DOWN   = 4,
+};
+
+#define MLX5_EEPROM_MAX_BYTES			32
+#define MLX5_EEPROM_IDENTIFIER_BYTE_MASK	0x000000ff
+#define MLX5_I2C_ADDR_LOW		0x50
+#define MLX5_I2C_ADDR_HIGH		0x51
+#define MLX5_EEPROM_PAGE_LENGTH		256
+
+enum mlx5e_link_mode {
+	MLX5E_1000BASE_CX_SGMII	 = 0,
+	MLX5E_1000BASE_KX	 = 1,
+	MLX5E_10GBASE_CX4	 = 2,
+	MLX5E_10GBASE_KX4	 = 3,
+	MLX5E_10GBASE_KR	 = 4,
+	MLX5E_20GBASE_KR2	 = 5,
+	MLX5E_40GBASE_CR4	 = 6,
+	MLX5E_40GBASE_KR4	 = 7,
+	MLX5E_56GBASE_R4	 = 8,
+	MLX5E_10GBASE_CR	 = 12,
+	MLX5E_10GBASE_SR	 = 13,
+	MLX5E_10GBASE_ER	 = 14,
+	MLX5E_40GBASE_SR4	 = 15,
+	MLX5E_40GBASE_LR4	 = 16,
+	MLX5E_50GBASE_SR2	 = 18,
+	MLX5E_100GBASE_CR4	 = 20,
+	MLX5E_100GBASE_SR4	 = 21,
+	MLX5E_100GBASE_KR4	 = 22,
+	MLX5E_100GBASE_LR4	 = 23,
+	MLX5E_100BASE_TX	 = 24,
+	MLX5E_1000BASE_T	 = 25,
+	MLX5E_10GBASE_T		 = 26,
+	MLX5E_25GBASE_CR	 = 27,
+	MLX5E_25GBASE_KR	 = 28,
+	MLX5E_25GBASE_SR	 = 29,
+	MLX5E_50GBASE_CR2	 = 30,
+	MLX5E_50GBASE_KR2	 = 31,
+	MLX5E_LINK_MODES_NUMBER,
+};
+
+#define MLX5E_PROT_MASK(link_mode) (1 << link_mode)
+
+#define PORT_MODULE_EVENT_MODULE_STATUS_MASK 0xF
+#define PORT_MODULE_EVENT_ERROR_TYPE_MASK         0xF
+
+int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps);
+int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
+			 int ptys_size, int proto_mask, u8 local_port);
+int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev,
+			      u32 *proto_cap, int proto_mask);
+int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev,
+				u32 *proto_admin, int proto_mask);
+int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev,
+				    u8 *link_width_oper, u8 local_port);
+int mlx5_query_port_ib_proto_oper(struct mlx5_core_dev *dev,
+				  u8 *proto_oper, u8 local_port);
+int mlx5_query_port_eth_proto_oper(struct mlx5_core_dev *dev,
+				   u32 *proto_oper, u8 local_port);
+int mlx5_set_port_ptys(struct mlx5_core_dev *dev, bool an_disable,
+		       u32 proto_admin, int proto_mask);
+void mlx5_toggle_port_link(struct mlx5_core_dev *dev);
+int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
+			       enum mlx5_port_status status);
+int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
+				 enum mlx5_port_status *status);
+int mlx5_set_port_beacon(struct mlx5_core_dev *dev, u16 beacon_duration);
+void mlx5_query_port_autoneg(struct mlx5_core_dev *dev, int proto_mask,
+			     u8 *an_status,
+			     u8 *an_disable_cap, u8 *an_disable_admin);
+
+int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port);
+void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu, u8 port);
+void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu,
+			      u8 port);
+
+int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
+			      u8 *vl_hw_cap, u8 local_port);
+
+int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause);
+int mlx5_query_port_pause(struct mlx5_core_dev *dev,
+			  u32 *rx_pause, u32 *tx_pause);
+
+int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx);
+int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx,
+			u8 *pfc_en_rx);
+
+int mlx5_max_tc(struct mlx5_core_dev *mdev);
+
+int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc);
+int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev,
+			    u8 prio, u8 *tc);
+int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group);
+int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw);
+int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev,
+				u8 tc, u8 *bw_pct);
+int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev,
+				    u8 *max_bw_value,
+				    u8 *max_bw_unit);
+int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev,
+				   u8 *max_bw_value,
+				   u8 *max_bw_unit);
+int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode);
+int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode);
+
+int mlx5_set_port_fcs(struct mlx5_core_dev *mdev, u8 enable);
+void mlx5_query_port_fcs(struct mlx5_core_dev *mdev, bool *supported,
+			 bool *enabled);
+int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
+			     u16 offset, u16 size, u8 *data);
+
+int mlx5_query_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *out);
+int mlx5_set_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *in);
+#endif /* __MLX5_PORT_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/lib/qp.c b/drivers/net/ethernet/mellanox/mlx5/lib/qp.c
new file mode 100644
index 0000000..9dcc3b4
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/lib/qp.c
@@ -0,0 +1,540 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+
+#include <linux/gfp.h>
+#include <linux/export.h>
+#include "lib/cmd.h"
+#include "lib/qp.h"
+#include "lib/driver.h"
+#include "lib/transobj.h"
+#include "mlx5.h"
+
+static struct mlx5_core_rsc_common *mlx5_get_rsc(struct mlx5_core_dev *dev,
+						 u32 rsn)
+{
+	struct mlx5_qp_table *table = &dev->priv.qp_table;
+	struct mlx5_core_rsc_common *common;
+
+	spin_lock(&table->lock);
+
+	common = radix_tree_lookup(&table->tree, rsn);
+	if (common)
+		atomic_inc(&common->refcount);
+
+	spin_unlock(&table->lock);
+
+	if (!common) {
+		mlx5_core_warn(dev, "Async event for bogus resource 0x%x\n",
+			       rsn);
+		return NULL;
+	}
+	return common;
+}
+
+void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common)
+{
+	if (atomic_dec_and_test(&common->refcount))
+		complete(&common->free);
+}
+
+static u64 qp_allowed_event_types(void)
+{
+	u64 mask;
+
+	mask = BIT(MLX5_EVENT_TYPE_PATH_MIG) |
+	       BIT(MLX5_EVENT_TYPE_COMM_EST) |
+	       BIT(MLX5_EVENT_TYPE_SQ_DRAINED) |
+	       BIT(MLX5_EVENT_TYPE_SRQ_LAST_WQE) |
+	       BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR) |
+	       BIT(MLX5_EVENT_TYPE_PATH_MIG_FAILED) |
+	       BIT(MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR) |
+	       BIT(MLX5_EVENT_TYPE_WQ_ACCESS_ERROR);
+
+	return mask;
+}
+
+static u64 rq_allowed_event_types(void)
+{
+	u64 mask;
+
+	mask = BIT(MLX5_EVENT_TYPE_SRQ_LAST_WQE) |
+	       BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR);
+
+	return mask;
+}
+
+static u64 sq_allowed_event_types(void)
+{
+	return BIT(MLX5_EVENT_TYPE_WQ_CATAS_ERROR);
+}
+
+static bool is_event_type_allowed(int rsc_type, int event_type)
+{
+	switch (rsc_type) {
+	case MLX5_EVENT_QUEUE_TYPE_QP:
+		return BIT(event_type) & qp_allowed_event_types();
+	case MLX5_EVENT_QUEUE_TYPE_RQ:
+		return BIT(event_type) & rq_allowed_event_types();
+	case MLX5_EVENT_QUEUE_TYPE_SQ:
+		return BIT(event_type) & sq_allowed_event_types();
+	default:
+		WARN(1, "Event arrived for unknown resource type");
+		return false;
+	}
+}
+
+void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type)
+{
+	struct mlx5_core_rsc_common *common = mlx5_get_rsc(dev, rsn);
+	struct mlx5_core_qp *qp;
+
+	if (!common)
+		return;
+
+	if (!is_event_type_allowed((rsn >> MLX5_USER_INDEX_LEN), event_type)) {
+		mlx5_core_warn(dev, "event 0x%.2x is not allowed on resource 0x%.8x\n",
+			       event_type, rsn);
+		return;
+	}
+
+	switch (common->res) {
+	case MLX5_RES_QP:
+	case MLX5_RES_RQ:
+	case MLX5_RES_SQ:
+		qp = (struct mlx5_core_qp *)common;
+		qp->event(qp, event_type);
+		break;
+
+	default:
+		mlx5_core_warn(dev, "invalid resource type for 0x%x\n", rsn);
+	}
+
+	mlx5_core_put_rsc(common);
+}
+
+static int create_qprqsq_common(struct mlx5_core_dev *dev,
+				struct mlx5_core_qp *qp,
+				int rsc_type)
+{
+	struct mlx5_qp_table *table = &dev->priv.qp_table;
+	int err;
+
+	qp->common.res = rsc_type;
+	spin_lock_irq(&table->lock);
+	err = radix_tree_insert(&table->tree,
+				qp->qpn | (rsc_type << MLX5_USER_INDEX_LEN),
+				qp);
+	spin_unlock_irq(&table->lock);
+	if (err)
+		return err;
+
+	atomic_set(&qp->common.refcount, 1);
+	init_completion(&qp->common.free);
+	qp->pid = current->pid;
+
+	return 0;
+}
+
+static void destroy_qprqsq_common(struct mlx5_core_dev *dev,
+				  struct mlx5_core_qp *qp)
+{
+	struct mlx5_qp_table *table = &dev->priv.qp_table;
+	unsigned long flags;
+
+	spin_lock_irqsave(&table->lock, flags);
+	radix_tree_delete(&table->tree,
+			  qp->qpn | (qp->common.res << MLX5_USER_INDEX_LEN));
+	spin_unlock_irqrestore(&table->lock, flags);
+	mlx5_core_put_rsc((struct mlx5_core_rsc_common *)qp);
+	wait_for_completion(&qp->common.free);
+}
+
+int mlx5_core_create_qp(struct mlx5_core_dev *dev,
+			struct mlx5_core_qp *qp,
+			u32 *in, int inlen)
+{
+	u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {0};
+	u32 dout[MLX5_ST_SZ_DW(destroy_qp_out)];
+	u32 din[MLX5_ST_SZ_DW(destroy_qp_in)];
+	int err;
+
+	MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
+
+	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+	if (err)
+		return err;
+
+	qp->qpn = MLX5_GET(create_qp_out, out, qpn);
+	mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn);
+
+	err = create_qprqsq_common(dev, qp, MLX5_RES_QP);
+	if (err)
+		goto err_cmd;
+
+	err = mlx5_debug_qp_add(dev, qp);
+	if (err)
+		mlx5_core_dbg(dev, "failed adding QP 0x%x to debug file system\n",
+			      qp->qpn);
+
+	atomic_inc(&dev->num_qps);
+
+	return 0;
+
+err_cmd:
+	memset(din, 0, sizeof(din));
+	memset(dout, 0, sizeof(dout));
+	MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
+	MLX5_SET(destroy_qp_in, in, qpn, qp->qpn);
+	mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout));
+	return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_core_create_qp);
+
+int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
+			 struct mlx5_core_qp *qp)
+{
+	u32 out[MLX5_ST_SZ_DW(destroy_qp_out)] = {0};
+	u32 in[MLX5_ST_SZ_DW(destroy_qp_in)]   = {0};
+	int err;
+
+	mlx5_debug_qp_remove(dev, qp);
+
+	destroy_qprqsq_common(dev, qp);
+
+	MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
+	MLX5_SET(destroy_qp_in, in, qpn, qp->qpn);
+	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+	if (err)
+		return err;
+
+	atomic_dec(&dev->num_qps);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_core_destroy_qp);
+
+struct mbox_info {
+	u32 *in;
+	u32 *out;
+	int inlen;
+	int outlen;
+};
+
+static int mbox_alloc(struct mbox_info *mbox, int inlen, int outlen)
+{
+	mbox->inlen  = inlen;
+	mbox->outlen = outlen;
+	mbox->in = kzalloc(mbox->inlen, GFP_KERNEL);
+	mbox->out = kzalloc(mbox->outlen, GFP_KERNEL);
+	if (!mbox->in || !mbox->out) {
+		kfree(mbox->in);
+		kfree(mbox->out);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void mbox_free(struct mbox_info *mbox)
+{
+	kfree(mbox->in);
+	kfree(mbox->out);
+}
+
+static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn,
+				u32 opt_param_mask, void *qpc,
+				struct mbox_info *mbox)
+{
+	mbox->out = NULL;
+	mbox->in = NULL;
+
+#define MBOX_ALLOC(mbox, typ)  \
+	mbox_alloc(mbox, MLX5_ST_SZ_BYTES(typ##_in), MLX5_ST_SZ_BYTES(typ##_out))
+
+#define MOD_QP_IN_SET(typ, in, _opcode, _qpn) \
+	MLX5_SET(typ##_in, in, opcode, _opcode); \
+	MLX5_SET(typ##_in, in, qpn, _qpn)
+
+#define MOD_QP_IN_SET_QPC(typ, in, _opcode, _qpn, _opt_p, _qpc) \
+	MOD_QP_IN_SET(typ, in, _opcode, _qpn); \
+	MLX5_SET(typ##_in, in, opt_param_mask, _opt_p); \
+	memcpy(MLX5_ADDR_OF(typ##_in, in, qpc), _qpc, MLX5_ST_SZ_BYTES(qpc))
+
+	switch (opcode) {
+	/* 2RST & 2ERR */
+	case MLX5_CMD_OP_2RST_QP:
+		if (MBOX_ALLOC(mbox, qp_2rst))
+			return -ENOMEM;
+		MOD_QP_IN_SET(qp_2rst, mbox->in, opcode, qpn);
+		break;
+	case MLX5_CMD_OP_2ERR_QP:
+		if (MBOX_ALLOC(mbox, qp_2err))
+			return -ENOMEM;
+		MOD_QP_IN_SET(qp_2err, mbox->in, opcode, qpn);
+		break;
+
+	/* MODIFY with QPC */
+	case MLX5_CMD_OP_RST2INIT_QP:
+		if (MBOX_ALLOC(mbox, rst2init_qp))
+			return -ENOMEM;
+		 MOD_QP_IN_SET_QPC(rst2init_qp, mbox->in, opcode, qpn,
+				   opt_param_mask, qpc);
+		 break;
+	case MLX5_CMD_OP_INIT2RTR_QP:
+		if (MBOX_ALLOC(mbox, init2rtr_qp))
+			return -ENOMEM;
+		 MOD_QP_IN_SET_QPC(init2rtr_qp, mbox->in, opcode, qpn,
+				   opt_param_mask, qpc);
+		 break;
+	case MLX5_CMD_OP_RTR2RTS_QP:
+		if (MBOX_ALLOC(mbox, rtr2rts_qp))
+			return -ENOMEM;
+		 MOD_QP_IN_SET_QPC(rtr2rts_qp, mbox->in, opcode, qpn,
+				   opt_param_mask, qpc);
+		 break;
+	case MLX5_CMD_OP_RTS2RTS_QP:
+		if (MBOX_ALLOC(mbox, rts2rts_qp))
+			return -ENOMEM;
+		MOD_QP_IN_SET_QPC(rts2rts_qp, mbox->in, opcode, qpn,
+				  opt_param_mask, qpc);
+		break;
+	case MLX5_CMD_OP_SQERR2RTS_QP:
+		if (MBOX_ALLOC(mbox, sqerr2rts_qp))
+			return -ENOMEM;
+		MOD_QP_IN_SET_QPC(sqerr2rts_qp, mbox->in, opcode, qpn,
+				  opt_param_mask, qpc);
+		break;
+	case MLX5_CMD_OP_INIT2INIT_QP:
+		if (MBOX_ALLOC(mbox, init2init_qp))
+			return -ENOMEM;
+		MOD_QP_IN_SET_QPC(init2init_qp, mbox->in, opcode, qpn,
+				  opt_param_mask, qpc);
+		break;
+	default:
+		mlx5_core_err(dev, "Unknown transition for modify QP: OP(0x%x) QPN(0x%x)\n",
+			      opcode, qpn);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 opcode,
+			u32 opt_param_mask, void *qpc,
+			struct mlx5_core_qp *qp)
+{
+	struct mbox_info mbox;
+	int err;
+
+	err = modify_qp_mbox_alloc(dev, opcode, qp->qpn,
+				   opt_param_mask, qpc, &mbox);
+	if (err)
+		return err;
+
+	err = mlx5_cmd_exec(dev, mbox.in, mbox.inlen, mbox.out, mbox.outlen);
+	mbox_free(&mbox);
+	return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_core_qp_modify);
+
+void mlx5_init_qp_table(struct mlx5_core_dev *dev)
+{
+	struct mlx5_qp_table *table = &dev->priv.qp_table;
+
+	memset(table, 0, sizeof(*table));
+	spin_lock_init(&table->lock);
+	INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
+	mlx5_qp_debugfs_init(dev);
+}
+
+void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev)
+{
+	mlx5_qp_debugfs_cleanup(dev);
+}
+
+int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
+		       u32 *out, int outlen)
+{
+	u32 in[MLX5_ST_SZ_DW(query_qp_in)] = {0};
+
+	MLX5_SET(query_qp_in, in, opcode, MLX5_CMD_OP_QUERY_QP);
+	MLX5_SET(query_qp_in, in, qpn, qp->qpn);
+	return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
+}
+EXPORT_SYMBOL_GPL(mlx5_core_qp_query);
+
+int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn)
+{
+	u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)] = {0};
+	u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)]   = {0};
+	int err;
+
+	MLX5_SET(alloc_xrcd_in, in, opcode, MLX5_CMD_OP_ALLOC_XRCD);
+	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+	if (!err)
+		*xrcdn = MLX5_GET(alloc_xrcd_out, out, xrcd);
+	return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_core_xrcd_alloc);
+
+int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn)
+{
+	u32 out[MLX5_ST_SZ_DW(dealloc_xrcd_out)] = {0};
+	u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)]   = {0};
+
+	MLX5_SET(dealloc_xrcd_in, in, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
+	MLX5_SET(dealloc_xrcd_in, in, xrcd, xrcdn);
+	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc);
+
+int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
+				struct mlx5_core_qp *rq)
+{
+	int err;
+	u32 rqn;
+
+	err = mlx5_core_create_rq(dev, in, inlen, &rqn);
+	if (err)
+		return err;
+
+	rq->qpn = rqn;
+	err = create_qprqsq_common(dev, rq, MLX5_RES_RQ);
+	if (err)
+		goto err_destroy_rq;
+
+	return 0;
+
+err_destroy_rq:
+	mlx5_core_destroy_rq(dev, rq->qpn);
+
+	return err;
+}
+EXPORT_SYMBOL(mlx5_core_create_rq_tracked);
+
+void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev,
+				  struct mlx5_core_qp *rq)
+{
+	destroy_qprqsq_common(dev, rq);
+	mlx5_core_destroy_rq(dev, rq->qpn);
+}
+EXPORT_SYMBOL(mlx5_core_destroy_rq_tracked);
+
+int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
+				struct mlx5_core_qp *sq)
+{
+	int err;
+	u32 sqn;
+
+	err = mlx5_core_create_sq(dev, in, inlen, &sqn);
+	if (err)
+		return err;
+
+	sq->qpn = sqn;
+	err = create_qprqsq_common(dev, sq, MLX5_RES_SQ);
+	if (err)
+		goto err_destroy_sq;
+
+	return 0;
+
+err_destroy_sq:
+	mlx5_core_destroy_sq(dev, sq->qpn);
+
+	return err;
+}
+EXPORT_SYMBOL(mlx5_core_create_sq_tracked);
+
+void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev,
+				  struct mlx5_core_qp *sq)
+{
+	destroy_qprqsq_common(dev, sq);
+	mlx5_core_destroy_sq(dev, sq->qpn);
+}
+EXPORT_SYMBOL(mlx5_core_destroy_sq_tracked);
+
+int mlx5_core_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id)
+{
+	u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)]   = {0};
+	u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {0};
+	int err;
+
+	MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER);
+	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+	if (!err)
+		*counter_id = MLX5_GET(alloc_q_counter_out, out,
+				       counter_set_id);
+	return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_core_alloc_q_counter);
+
+int mlx5_core_dealloc_q_counter(struct mlx5_core_dev *dev, u16 counter_id)
+{
+	u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)]   = {0};
+	u32 out[MLX5_ST_SZ_DW(dealloc_q_counter_out)] = {0};
+
+	MLX5_SET(dealloc_q_counter_in, in, opcode,
+		 MLX5_CMD_OP_DEALLOC_Q_COUNTER);
+	MLX5_SET(dealloc_q_counter_in, in, counter_set_id, counter_id);
+	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+EXPORT_SYMBOL_GPL(mlx5_core_dealloc_q_counter);
+
+int mlx5_core_query_q_counter(struct mlx5_core_dev *dev, u16 counter_id,
+			      int reset, void *out, int out_size)
+{
+	u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {0};
+
+	MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
+	MLX5_SET(query_q_counter_in, in, clear, reset);
+	MLX5_SET(query_q_counter_in, in, counter_set_id, counter_id);
+	return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size);
+}
+EXPORT_SYMBOL_GPL(mlx5_core_query_q_counter);
+
+int mlx5_core_query_out_of_buffer(struct mlx5_core_dev *dev, u16 counter_id,
+				  u32 *out_of_buffer)
+{
+	int outlen = MLX5_ST_SZ_BYTES(query_q_counter_out);
+	void *out;
+	int err;
+
+	out = mlx5_vzalloc(outlen);
+	if (!out)
+		return -ENOMEM;
+
+	err = mlx5_core_query_q_counter(dev, counter_id, 0, out, outlen);
+	if (!err)
+		*out_of_buffer = MLX5_GET(query_q_counter_out, out,
+					  out_of_buffer);
+
+	kfree(out);
+	return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/lib/qp.h b/drivers/net/ethernet/mellanox/mlx5/lib/qp.h
new file mode 100644
index 0000000..a161d69
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/lib/qp.h
@@ -0,0 +1,598 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef MLX5_QP_H
+#define MLX5_QP_H
+
+#include "lib/device.h"
+#include "lib/driver.h"
+
+#define MLX5_INVALID_LKEY	0x100
+#define MLX5_SIG_WQE_SIZE	(MLX5_SEND_WQE_BB * 5)
+#define MLX5_DIF_SIZE		8
+#define MLX5_STRIDE_BLOCK_OP	0x400
+#define MLX5_CPY_GRD_MASK	0xc0
+#define MLX5_CPY_APP_MASK	0x30
+#define MLX5_CPY_REF_MASK	0x0f
+#define MLX5_BSF_INC_REFTAG	(1 << 6)
+#define MLX5_BSF_INL_VALID	(1 << 15)
+#define MLX5_BSF_REFRESH_DIF	(1 << 14)
+#define MLX5_BSF_REPEAT_BLOCK	(1 << 7)
+#define MLX5_BSF_APPTAG_ESCAPE	0x1
+#define MLX5_BSF_APPREF_ESCAPE	0x2
+
+enum mlx5_qp_optpar {
+	MLX5_QP_OPTPAR_ALT_ADDR_PATH		= 1 << 0,
+	MLX5_QP_OPTPAR_RRE			= 1 << 1,
+	MLX5_QP_OPTPAR_RAE			= 1 << 2,
+	MLX5_QP_OPTPAR_RWE			= 1 << 3,
+	MLX5_QP_OPTPAR_PKEY_INDEX		= 1 << 4,
+	MLX5_QP_OPTPAR_Q_KEY			= 1 << 5,
+	MLX5_QP_OPTPAR_RNR_TIMEOUT		= 1 << 6,
+	MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH	= 1 << 7,
+	MLX5_QP_OPTPAR_SRA_MAX			= 1 << 8,
+	MLX5_QP_OPTPAR_RRA_MAX			= 1 << 9,
+	MLX5_QP_OPTPAR_PM_STATE			= 1 << 10,
+	MLX5_QP_OPTPAR_RETRY_COUNT		= 1 << 12,
+	MLX5_QP_OPTPAR_RNR_RETRY		= 1 << 13,
+	MLX5_QP_OPTPAR_ACK_TIMEOUT		= 1 << 14,
+	MLX5_QP_OPTPAR_PRI_PORT			= 1 << 16,
+	MLX5_QP_OPTPAR_SRQN			= 1 << 18,
+	MLX5_QP_OPTPAR_CQN_RCV			= 1 << 19,
+	MLX5_QP_OPTPAR_DC_HS			= 1 << 20,
+	MLX5_QP_OPTPAR_DC_KEY			= 1 << 21,
+};
+
+enum mlx5_qp_state {
+	MLX5_QP_STATE_RST			= 0,
+	MLX5_QP_STATE_INIT			= 1,
+	MLX5_QP_STATE_RTR			= 2,
+	MLX5_QP_STATE_RTS			= 3,
+	MLX5_QP_STATE_SQER			= 4,
+	MLX5_QP_STATE_SQD			= 5,
+	MLX5_QP_STATE_ERR			= 6,
+	MLX5_QP_STATE_SQ_DRAINING		= 7,
+	MLX5_QP_STATE_SUSPENDED			= 9,
+	MLX5_QP_NUM_STATE,
+	MLX5_QP_STATE,
+	MLX5_QP_STATE_BAD,
+};
+
+enum {
+	MLX5_SQ_STATE_NA	= MLX5_SQC_STATE_ERR + 1,
+	MLX5_SQ_NUM_STATE	= MLX5_SQ_STATE_NA + 1,
+	MLX5_RQ_STATE_NA	= MLX5_RQC_STATE_ERR + 1,
+	MLX5_RQ_NUM_STATE	= MLX5_RQ_STATE_NA + 1,
+};
+
+enum {
+	MLX5_QP_ST_RC				= 0x0,
+	MLX5_QP_ST_UC				= 0x1,
+	MLX5_QP_ST_UD				= 0x2,
+	MLX5_QP_ST_XRC				= 0x3,
+	MLX5_QP_ST_MLX				= 0x4,
+	MLX5_QP_ST_DCI				= 0x5,
+	MLX5_QP_ST_DCT				= 0x6,
+	MLX5_QP_ST_QP0				= 0x7,
+	MLX5_QP_ST_QP1				= 0x8,
+	MLX5_QP_ST_RAW_ETHERTYPE		= 0x9,
+	MLX5_QP_ST_RAW_IPV6			= 0xa,
+	MLX5_QP_ST_SNIFFER			= 0xb,
+	MLX5_QP_ST_SYNC_UMR			= 0xe,
+	MLX5_QP_ST_PTP_1588			= 0xd,
+	MLX5_QP_ST_REG_UMR			= 0xc,
+	MLX5_QP_ST_MAX
+};
+
+enum {
+	MLX5_QP_PM_MIGRATED			= 0x3,
+	MLX5_QP_PM_ARMED			= 0x0,
+	MLX5_QP_PM_REARM			= 0x1
+};
+
+enum {
+	MLX5_NON_ZERO_RQ	= 0x0,
+	MLX5_SRQ_RQ		= 0x1,
+	MLX5_CRQ_RQ		= 0x2,
+	MLX5_ZERO_LEN_RQ	= 0x3
+};
+
+/* TODO REM */
+enum {
+	/* params1 */
+	MLX5_QP_BIT_SRE				= 1 << 15,
+	MLX5_QP_BIT_SWE				= 1 << 14,
+	MLX5_QP_BIT_SAE				= 1 << 13,
+	/* params2 */
+	MLX5_QP_BIT_RRE				= 1 << 15,
+	MLX5_QP_BIT_RWE				= 1 << 14,
+	MLX5_QP_BIT_RAE				= 1 << 13,
+	MLX5_QP_BIT_RIC				= 1 <<	4,
+	MLX5_QP_BIT_CC_SLAVE_RECV		= 1 <<  2,
+	MLX5_QP_BIT_CC_SLAVE_SEND		= 1 <<  1,
+	MLX5_QP_BIT_CC_MASTER			= 1 <<  0
+};
+
+enum {
+	MLX5_WQE_CTRL_CQ_UPDATE		= 2 << 2,
+	MLX5_WQE_CTRL_CQ_UPDATE_AND_EQE	= 3 << 2,
+	MLX5_WQE_CTRL_SOLICITED		= 1 << 1,
+};
+
+enum {
+	MLX5_SEND_WQE_DS	= 16,
+	MLX5_SEND_WQE_BB	= 64,
+};
+
+#define MLX5_SEND_WQEBB_NUM_DS	(MLX5_SEND_WQE_BB / MLX5_SEND_WQE_DS)
+
+enum {
+	MLX5_SEND_WQE_MAX_WQEBBS	= 16,
+};
+
+enum {
+	MLX5_WQE_FMR_PERM_LOCAL_READ	= 1 << 27,
+	MLX5_WQE_FMR_PERM_LOCAL_WRITE	= 1 << 28,
+	MLX5_WQE_FMR_PERM_REMOTE_READ	= 1 << 29,
+	MLX5_WQE_FMR_PERM_REMOTE_WRITE	= 1 << 30,
+	MLX5_WQE_FMR_PERM_ATOMIC	= 1 << 31
+};
+
+enum {
+	MLX5_FENCE_MODE_NONE			= 0 << 5,
+	MLX5_FENCE_MODE_INITIATOR_SMALL		= 1 << 5,
+	MLX5_FENCE_MODE_FENCE			= 2 << 5,
+	MLX5_FENCE_MODE_STRONG_ORDERING		= 3 << 5,
+	MLX5_FENCE_MODE_SMALL_AND_FENCE		= 4 << 5,
+};
+
+enum {
+	MLX5_RCV_DBR	= 0,
+	MLX5_SND_DBR	= 1,
+};
+
+enum {
+	MLX5_FLAGS_INLINE	= 1<<7,
+	MLX5_FLAGS_CHECK_FREE   = 1<<5,
+};
+
+struct mlx5_wqe_fmr_seg {
+	__be32			flags;
+	__be32			mem_key;
+	__be64			buf_list;
+	__be64			start_addr;
+	__be64			reg_len;
+	__be32			offset;
+	__be32			page_size;
+	u32			reserved[2];
+};
+
+struct mlx5_wqe_ctrl_seg {
+	__be32			opmod_idx_opcode;
+	__be32			qpn_ds;
+	u8			signature;
+	u8			rsvd[2];
+	u8			fm_ce_se;
+	__be32			imm;
+};
+
+#define MLX5_WQE_CTRL_DS_MASK 0x3f
+#define MLX5_WQE_CTRL_QPN_MASK 0xffffff00
+#define MLX5_WQE_CTRL_QPN_SHIFT 8
+#define MLX5_WQE_DS_UNITS 16
+#define MLX5_WQE_CTRL_OPCODE_MASK 0xff
+#define MLX5_WQE_CTRL_WQE_INDEX_MASK 0x00ffff00
+#define MLX5_WQE_CTRL_WQE_INDEX_SHIFT 8
+#define MLX5_WQE_AV_EXT 0x80000000
+
+enum {
+	MLX5_ETH_WQE_L3_INNER_CSUM      = 1 << 4,
+	MLX5_ETH_WQE_L4_INNER_CSUM      = 1 << 5,
+	MLX5_ETH_WQE_L3_CSUM            = 1 << 6,
+	MLX5_ETH_WQE_L4_CSUM            = 1 << 7,
+};
+
+struct mlx5_wqe_eth_seg {
+	u8              rsvd0[4];
+	u8              cs_flags;
+	u8              rsvd1;
+	__be16          mss;
+	__be32          rsvd2;
+	__be16          inline_hdr_sz;
+	u8              inline_hdr_start[2];
+};
+
+struct mlx5_wqe_xrc_seg {
+	__be32			xrc_srqn;
+	u8			rsvd[12];
+};
+
+struct mlx5_wqe_masked_atomic_seg {
+	__be64			swap_add;
+	__be64			compare;
+	__be64			swap_add_mask;
+	__be64			compare_mask;
+};
+
+struct mlx5_base_av {
+	union {
+		struct {
+			__be32	qkey;
+			__be32	reserved;
+		} qkey;
+		__be64	dc_key;
+	} key;
+	__be32	dqp_dct;
+	u8	stat_rate_sl;
+	u8	fl_mlid;
+	union {
+		__be16	rlid;
+		__be16  udp_sport;
+	};
+};
+
+struct mlx5_av {
+	union {
+		struct {
+			__be32	qkey;
+			__be32	reserved;
+		} qkey;
+		__be64	dc_key;
+	} key;
+	__be32	dqp_dct;
+	u8	stat_rate_sl;
+	u8	fl_mlid;
+	union {
+		__be16	rlid;
+		__be16  udp_sport;
+	};
+	u8	reserved0[4];
+	u8	rmac[6];
+	u8	tclass;
+	u8	hop_limit;
+	__be32	grh_gid_fl;
+	u8	rgid[16];
+};
+
+struct mlx5_wqe_datagram_seg {
+	struct mlx5_av	av;
+};
+
+struct mlx5_wqe_raddr_seg {
+	__be64			raddr;
+	__be32			rkey;
+	u32			reserved;
+};
+
+struct mlx5_wqe_atomic_seg {
+	__be64			swap_add;
+	__be64			compare;
+};
+
+struct mlx5_wqe_data_seg {
+	__be32			byte_count;
+	__be32			lkey;
+	__be64			addr;
+};
+
+struct mlx5_wqe_umr_ctrl_seg {
+	u8		flags;
+	u8		rsvd0[3];
+	__be16		xlt_octowords;
+	union {
+		__be16	xlt_offset;
+		__be16	bsf_octowords;
+	};
+	__be64		mkey_mask;
+	__be32		xlt_offset_47_16;
+	u8		rsvd1[28];
+};
+
+struct mlx5_seg_set_psv {
+	__be32		psv_num;
+	__be16		syndrome;
+	__be16		status;
+	__be32		transient_sig;
+	__be32		ref_tag;
+};
+
+struct mlx5_seg_get_psv {
+	u8		rsvd[19];
+	u8		num_psv;
+	__be32		l_key;
+	__be64		va;
+	__be32		psv_index[4];
+};
+
+struct mlx5_seg_check_psv {
+	u8		rsvd0[2];
+	__be16		err_coalescing_op;
+	u8		rsvd1[2];
+	__be16		xport_err_op;
+	u8		rsvd2[2];
+	__be16		xport_err_mask;
+	u8		rsvd3[7];
+	u8		num_psv;
+	__be32		l_key;
+	__be64		va;
+	__be32		psv_index[4];
+};
+
+struct mlx5_rwqe_sig {
+	u8	rsvd0[4];
+	u8	signature;
+	u8	rsvd1[11];
+};
+
+struct mlx5_wqe_signature_seg {
+	u8	rsvd0[4];
+	u8	signature;
+	u8	rsvd1[11];
+};
+
+#define MLX5_WQE_INLINE_SEG_BYTE_COUNT_MASK 0x3ff
+
+struct mlx5_wqe_inline_seg {
+	__be32	byte_count;
+};
+
+enum mlx5_sig_type {
+	MLX5_DIF_CRC = 0x1,
+	MLX5_DIF_IPCS = 0x2,
+};
+
+struct mlx5_bsf_inl {
+	__be16		vld_refresh;
+	__be16		dif_apptag;
+	__be32		dif_reftag;
+	u8		sig_type;
+	u8		rp_inv_seed;
+	u8		rsvd[3];
+	u8		dif_inc_ref_guard_check;
+	__be16		dif_app_bitmask_check;
+};
+
+struct mlx5_bsf {
+	struct mlx5_bsf_basic {
+		u8		bsf_size_sbs;
+		u8		check_byte_mask;
+		union {
+			u8	copy_byte_mask;
+			u8	bs_selector;
+			u8	rsvd_wflags;
+		} wire;
+		union {
+			u8	bs_selector;
+			u8	rsvd_mflags;
+		} mem;
+		__be32		raw_data_size;
+		__be32		w_bfs_psv;
+		__be32		m_bfs_psv;
+	} basic;
+	struct mlx5_bsf_ext {
+		__be32		t_init_gen_pro_size;
+		__be32		rsvd_epi_size;
+		__be32		w_tfs_psv;
+		__be32		m_tfs_psv;
+	} ext;
+	struct mlx5_bsf_inl	w_inl;
+	struct mlx5_bsf_inl	m_inl;
+};
+
+struct mlx5_mtt {
+	__be64		ptag;
+};
+
+struct mlx5_klm {
+	__be32		bcount;
+	__be32		key;
+	__be64		va;
+};
+
+struct mlx5_stride_block_entry {
+	__be16		stride;
+	__be16		bcount;
+	__be32		key;
+	__be64		va;
+};
+
+struct mlx5_stride_block_ctrl_seg {
+	__be32		bcount_per_cycle;
+	__be32		op;
+	__be32		repeat_count;
+	u16		rsvd;
+	__be16		num_entries;
+};
+
+struct mlx5_core_qp {
+	struct mlx5_core_rsc_common	common; /* must be first */
+	void (*event)		(struct mlx5_core_qp *, int);
+	int			qpn;
+	struct mlx5_rsc_debug	*dbg;
+	int			pid;
+};
+
+struct mlx5_qp_path {
+	u8			fl_free_ar;
+	u8			rsvd3;
+	__be16			pkey_index;
+	u8			rsvd0;
+	u8			grh_mlid;
+	__be16			rlid;
+	u8			ackto_lt;
+	u8			mgid_index;
+	u8			static_rate;
+	u8			hop_limit;
+	__be32			tclass_flowlabel;
+	union {
+		u8		rgid[16];
+		u8		rip[16];
+	};
+	u8			f_dscp_ecn_prio;
+	u8			ecn_dscp;
+	__be16			udp_sport;
+	u8			dci_cfi_prio_sl;
+	u8			port;
+	u8			rmac[6];
+};
+
+/* FIXME: use mlx5_ifc.h qpc */
+struct mlx5_qp_context {
+	__be32			flags;
+	__be32			flags_pd;
+	u8			mtu_msgmax;
+	u8			rq_size_stride;
+	__be16			sq_crq_size;
+	__be32			qp_counter_set_usr_page;
+	__be32			wire_qpn;
+	__be32			log_pg_sz_remote_qpn;
+	struct			mlx5_qp_path pri_path;
+	struct			mlx5_qp_path alt_path;
+	__be32			params1;
+	u8			reserved2[4];
+	__be32			next_send_psn;
+	__be32			cqn_send;
+	__be32			deth_sqpn;
+	u8			reserved3[4];
+	__be32			last_acked_psn;
+	__be32			ssn;
+	__be32			params2;
+	__be32			rnr_nextrecvpsn;
+	__be32			xrcd;
+	__be32			cqn_recv;
+	__be64			db_rec_addr;
+	__be32			qkey;
+	__be32			rq_type_srqn;
+	__be32			rmsn;
+	__be16			hw_sq_wqe_counter;
+	__be16			sw_sq_wqe_counter;
+	__be16			hw_rcyclic_byte_counter;
+	__be16			hw_rq_counter;
+	__be16			sw_rcyclic_byte_counter;
+	__be16			sw_rq_counter;
+	u8			rsvd0[5];
+	u8			cgs;
+	u8			cs_req;
+	u8			cs_res;
+	__be64			dc_access_key;
+	u8			rsvd1[24];
+};
+
+static inline struct mlx5_core_qp *__mlx5_qp_lookup(struct mlx5_core_dev *dev, u32 qpn)
+{
+	return radix_tree_lookup(&dev->priv.qp_table.tree, qpn);
+}
+
+static inline struct mlx5_core_mkey *__mlx5_mr_lookup(struct mlx5_core_dev *dev, u32 key)
+{
+	return radix_tree_lookup(&dev->priv.mkey_table.tree, key);
+}
+
+int mlx5_core_create_qp(struct mlx5_core_dev *dev,
+			struct mlx5_core_qp *qp,
+			u32 *in,
+			int inlen);
+int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 opcode,
+			u32 opt_param_mask, void *qpc,
+			struct mlx5_core_qp *qp);
+int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
+			 struct mlx5_core_qp *qp);
+int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
+		       u32 *out, int outlen);
+
+int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn);
+int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn);
+void mlx5_init_qp_table(struct mlx5_core_dev *dev);
+void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev);
+int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
+void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
+int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
+				struct mlx5_core_qp *rq);
+void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev,
+				  struct mlx5_core_qp *rq);
+int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
+				struct mlx5_core_qp *sq);
+void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev,
+				  struct mlx5_core_qp *sq);
+int mlx5_core_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id);
+int mlx5_core_dealloc_q_counter(struct mlx5_core_dev *dev, u16 counter_id);
+int mlx5_core_query_q_counter(struct mlx5_core_dev *dev, u16 counter_id,
+			      int reset, void *out, int out_size);
+int mlx5_core_query_out_of_buffer(struct mlx5_core_dev *dev, u16 counter_id,
+				  u32 *out_of_buffer);
+
+static inline const char *mlx5_qp_type_str(int type)
+{
+	switch (type) {
+	case MLX5_QP_ST_RC: return "RC";
+	case MLX5_QP_ST_UC: return "C";
+	case MLX5_QP_ST_UD: return "UD";
+	case MLX5_QP_ST_XRC: return "XRC";
+	case MLX5_QP_ST_MLX: return "MLX";
+	case MLX5_QP_ST_QP0: return "QP0";
+	case MLX5_QP_ST_QP1: return "QP1";
+	case MLX5_QP_ST_RAW_ETHERTYPE: return "RAW_ETHERTYPE";
+	case MLX5_QP_ST_RAW_IPV6: return "RAW_IPV6";
+	case MLX5_QP_ST_SNIFFER: return "SNIFFER";
+	case MLX5_QP_ST_SYNC_UMR: return "SYNC_UMR";
+	case MLX5_QP_ST_PTP_1588: return "PTP_1588";
+	case MLX5_QP_ST_REG_UMR: return "REG_UMR";
+	default: return "Invalid transport type";
+	}
+}
+
+static inline const char *mlx5_qp_state_str(int state)
+{
+	switch (state) {
+	case MLX5_QP_STATE_RST:
+	return "RST";
+	case MLX5_QP_STATE_INIT:
+	return "INIT";
+	case MLX5_QP_STATE_RTR:
+	return "RTR";
+	case MLX5_QP_STATE_RTS:
+	return "RTS";
+	case MLX5_QP_STATE_SQER:
+	return "SQER";
+	case MLX5_QP_STATE_SQD:
+	return "SQD";
+	case MLX5_QP_STATE_ERR:
+	return "ERR";
+	case MLX5_QP_STATE_SQ_DRAINING:
+	return "SQ_DRAINING";
+	case MLX5_QP_STATE_SUSPENDED:
+	return "SUSPENDED";
+	default: return "Invalid QP state";
+	}
+}
+
+#endif /* MLX5_QP_H */
diff --git a/drivers/net/ethernet/mellanox/mlx5/lib/rl.c b/drivers/net/ethernet/mellanox/mlx5/lib/rl.c
new file mode 100644
index 0000000..796c7e6
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/lib/rl.c
@@ -0,0 +1,269 @@
+/*
+ * Copyright (c) 2013-2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include "lib/driver.h"
+#include "lib/cmd.h"
+#include "mlx5.h"
+
+/* Scheduling element fw management */
+int mlx5_create_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
+				       void *ctx, u32 *element_id)
+{
+	u32 in[MLX5_ST_SZ_DW(create_scheduling_element_in)]  = {0};
+	u32 out[MLX5_ST_SZ_DW(create_scheduling_element_in)] = {0};
+	void *schedc;
+	int err;
+
+	schedc = MLX5_ADDR_OF(create_scheduling_element_in, in,
+			      scheduling_context);
+	MLX5_SET(create_scheduling_element_in, in, opcode,
+		 MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT);
+	MLX5_SET(create_scheduling_element_in, in, scheduling_hierarchy,
+		 hierarchy);
+	memcpy(schedc, ctx, MLX5_ST_SZ_BYTES(scheduling_context));
+
+	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+	if (err)
+		return err;
+
+	*element_id = MLX5_GET(create_scheduling_element_out, out,
+			       scheduling_element_id);
+	return 0;
+}
+
+int mlx5_modify_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
+				       void *ctx, u32 element_id,
+				       u32 modify_bitmask)
+{
+	u32 in[MLX5_ST_SZ_DW(modify_scheduling_element_in)]  = {0};
+	u32 out[MLX5_ST_SZ_DW(modify_scheduling_element_in)] = {0};
+	void *schedc;
+
+	schedc = MLX5_ADDR_OF(modify_scheduling_element_in, in,
+			      scheduling_context);
+	MLX5_SET(modify_scheduling_element_in, in, opcode,
+		 MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT);
+	MLX5_SET(modify_scheduling_element_in, in, scheduling_element_id,
+		 element_id);
+	MLX5_SET(modify_scheduling_element_in, in, modify_bitmask,
+		 modify_bitmask);
+	MLX5_SET(modify_scheduling_element_in, in, scheduling_hierarchy,
+		 hierarchy);
+	memcpy(schedc, ctx, MLX5_ST_SZ_BYTES(scheduling_context));
+
+	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
+					u32 element_id)
+{
+	u32 in[MLX5_ST_SZ_DW(destroy_scheduling_element_in)]  = {0};
+	u32 out[MLX5_ST_SZ_DW(destroy_scheduling_element_in)] = {0};
+
+	MLX5_SET(destroy_scheduling_element_in, in, opcode,
+		 MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT);
+	MLX5_SET(destroy_scheduling_element_in, in, scheduling_element_id,
+		 element_id);
+	MLX5_SET(destroy_scheduling_element_in, in, scheduling_hierarchy,
+		 hierarchy);
+
+	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
+/* Finds an entry where we can register the given rate
+ * If the rate already exists, return the entry where it is registered,
+ * otherwise return the first available entry.
+ * If the table is full, return NULL
+ */
+static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table,
+					   u32 rate)
+{
+	struct mlx5_rl_entry *ret_entry = NULL;
+	bool empty_found = false;
+	int i;
+
+	for (i = 0; i < table->max_size; i++) {
+		if (table->rl_entry[i].rate == rate)
+			return &table->rl_entry[i];
+		if (!empty_found && !table->rl_entry[i].rate) {
+			empty_found = true;
+			ret_entry = &table->rl_entry[i];
+		}
+	}
+
+	return ret_entry;
+}
+
+static int mlx5_set_rate_limit_cmd(struct mlx5_core_dev *dev,
+				   u32 rate, u16 index)
+{
+	u32 in[MLX5_ST_SZ_DW(set_rate_limit_in)]   = {0};
+	u32 out[MLX5_ST_SZ_DW(set_rate_limit_out)] = {0};
+
+	MLX5_SET(set_rate_limit_in, in, opcode,
+		 MLX5_CMD_OP_SET_RATE_LIMIT);
+	MLX5_SET(set_rate_limit_in, in, rate_limit_index, index);
+	MLX5_SET(set_rate_limit_in, in, rate_limit, rate);
+	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
+bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate)
+{
+	struct mlx5_rl_table *table = &dev->priv.rl_table;
+
+	return (rate <= table->max_rate && rate >= table->min_rate);
+}
+EXPORT_SYMBOL(mlx5_rl_is_in_range);
+
+int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u32 rate, u16 *index)
+{
+	struct mlx5_rl_table *table = &dev->priv.rl_table;
+	struct mlx5_rl_entry *entry;
+	int err = 0;
+
+	mutex_lock(&table->rl_lock);
+
+	if (!rate || !mlx5_rl_is_in_range(dev, rate)) {
+		mlx5_core_err(dev, "Invalid rate: %u, should be %u to %u\n",
+			      rate, table->min_rate, table->max_rate);
+		err = -EINVAL;
+		goto out;
+	}
+
+	entry = find_rl_entry(table, rate);
+	if (!entry) {
+		mlx5_core_err(dev, "Max number of %u rates reached\n",
+			      table->max_size);
+		err = -ENOSPC;
+		goto out;
+	}
+	if (entry->refcount) {
+		/* rate already configured */
+		entry->refcount++;
+	} else {
+		/* new rate limit */
+		err = mlx5_set_rate_limit_cmd(dev, rate, entry->index);
+		if (err) {
+			mlx5_core_err(dev, "Failed configuring rate: %u (%d)\n",
+				      rate, err);
+			goto out;
+		}
+		entry->rate = rate;
+		entry->refcount = 1;
+	}
+	*index = entry->index;
+
+out:
+	mutex_unlock(&table->rl_lock);
+	return err;
+}
+EXPORT_SYMBOL(mlx5_rl_add_rate);
+
+void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, u32 rate)
+{
+	struct mlx5_rl_table *table = &dev->priv.rl_table;
+	struct mlx5_rl_entry *entry = NULL;
+
+	/* 0 is a reserved value for unlimited rate */
+	if (rate == 0)
+		return;
+
+	mutex_lock(&table->rl_lock);
+	entry = find_rl_entry(table, rate);
+	if (!entry || !entry->refcount) {
+		mlx5_core_warn(dev, "Rate %u is not configured\n", rate);
+		goto out;
+	}
+
+	entry->refcount--;
+	if (!entry->refcount) {
+		/* need to remove rate */
+		mlx5_set_rate_limit_cmd(dev, 0, entry->index);
+		entry->rate = 0;
+	}
+
+out:
+	mutex_unlock(&table->rl_lock);
+}
+EXPORT_SYMBOL(mlx5_rl_remove_rate);
+
+int mlx5_init_rl_table(struct mlx5_core_dev *dev)
+{
+	struct mlx5_rl_table *table = &dev->priv.rl_table;
+	int i;
+
+	mutex_init(&table->rl_lock);
+	if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, packet_pacing)) {
+		table->max_size = 0;
+		return 0;
+	}
+
+	/* First entry is reserved for unlimited rate */
+	table->max_size = MLX5_CAP_QOS(dev, packet_pacing_rate_table_size) - 1;
+	table->max_rate = MLX5_CAP_QOS(dev, packet_pacing_max_rate);
+	table->min_rate = MLX5_CAP_QOS(dev, packet_pacing_min_rate);
+
+	table->rl_entry = kcalloc(table->max_size, sizeof(struct mlx5_rl_entry),
+				  GFP_KERNEL);
+	if (!table->rl_entry)
+		return -ENOMEM;
+
+	/* The index represents the index in HW rate limit table
+	 * Index 0 is reserved for unlimited rate
+	 */
+	for (i = 0; i < table->max_size; i++)
+		table->rl_entry[i].index = i + 1;
+
+	/* Index 0 is reserved */
+	mlx5_core_info(dev, "Rate limit: %u rates are supported, range: %uMbps to %uMbps\n",
+		       table->max_size,
+		       table->min_rate >> 10,
+		       table->max_rate >> 10);
+
+	return 0;
+}
+
+void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev)
+{
+	struct mlx5_rl_table *table = &dev->priv.rl_table;
+	int i;
+
+	/* Clear all configured rates */
+	for (i = 0; i < table->max_size; i++)
+		if (table->rl_entry[i].rate)
+			mlx5_set_rate_limit_cmd(dev, 0,
+						table->rl_entry[i].index);
+
+	kfree(dev->priv.rl_table.rl_entry);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/lib/srq.c b/drivers/net/ethernet/mellanox/mlx5/lib/srq.c
new file mode 100644
index 0000000..5be9bc5
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/lib/srq.c
@@ -0,0 +1,562 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include "lib/driver.h"
+#include "lib/cmd.h"
+#include "lib/srq.h"
+#include <rdma/ib_verbs.h>
+#include "mlx5.h"
+#include "lib/transobj.h"
+
+void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type)
+{
+	struct mlx5_srq_table *table = &dev->priv.srq_table;
+	struct mlx5_core_srq *srq;
+
+	spin_lock(&table->lock);
+
+	srq = radix_tree_lookup(&table->tree, srqn);
+	if (srq)
+		atomic_inc(&srq->refcount);
+
+	spin_unlock(&table->lock);
+
+	if (!srq) {
+		mlx5_core_warn(dev, "Async event for bogus SRQ 0x%08x\n", srqn);
+		return;
+	}
+
+	srq->event(srq, event_type);
+
+	if (atomic_dec_and_test(&srq->refcount))
+		complete(&srq->free);
+}
+
+static int get_pas_size(struct mlx5_srq_attr *in)
+{
+	u32 log_page_size = in->log_page_size + 12;
+	u32 log_srq_size  = in->log_size;
+	u32 log_rq_stride = in->wqe_shift;
+	u32 page_offset   = in->page_offset;
+	u32 po_quanta	  = 1 << (log_page_size - 6);
+	u32 rq_sz	  = 1 << (log_srq_size + 4 + log_rq_stride);
+	u32 page_size	  = 1 << log_page_size;
+	u32 rq_sz_po      = rq_sz + (page_offset * po_quanta);
+	u32 rq_num_pas	  = (rq_sz_po + page_size - 1) / page_size;
+
+	return rq_num_pas * sizeof(u64);
+}
+
+static void set_wq(void *wq, struct mlx5_srq_attr *in)
+{
+	MLX5_SET(wq,   wq, wq_signature,  !!(in->flags
+		 & MLX5_SRQ_FLAG_WQ_SIG));
+	MLX5_SET(wq,   wq, log_wq_pg_sz,  in->log_page_size);
+	MLX5_SET(wq,   wq, log_wq_stride, in->wqe_shift + 4);
+	MLX5_SET(wq,   wq, log_wq_sz,     in->log_size);
+	MLX5_SET(wq,   wq, page_offset,   in->page_offset);
+	MLX5_SET(wq,   wq, lwm,		  in->lwm);
+	MLX5_SET(wq,   wq, pd,		  in->pd);
+	MLX5_SET64(wq, wq, dbr_addr,	  in->db_record);
+}
+
+static void set_srqc(void *srqc, struct mlx5_srq_attr *in)
+{
+	MLX5_SET(srqc,   srqc, wq_signature,  !!(in->flags
+		 & MLX5_SRQ_FLAG_WQ_SIG));
+	MLX5_SET(srqc,   srqc, log_page_size, in->log_page_size);
+	MLX5_SET(srqc,   srqc, log_rq_stride, in->wqe_shift);
+	MLX5_SET(srqc,   srqc, log_srq_size,  in->log_size);
+	MLX5_SET(srqc,   srqc, page_offset,   in->page_offset);
+	MLX5_SET(srqc,	 srqc, lwm,	      in->lwm);
+	MLX5_SET(srqc,	 srqc, pd,	      in->pd);
+	MLX5_SET64(srqc, srqc, dbr_addr,      in->db_record);
+	MLX5_SET(srqc,	 srqc, xrcd,	      in->xrcd);
+	MLX5_SET(srqc,	 srqc, cqn,	      in->cqn);
+}
+
+static void get_wq(void *wq, struct mlx5_srq_attr *in)
+{
+	if (MLX5_GET(wq, wq, wq_signature))
+		in->flags &= MLX5_SRQ_FLAG_WQ_SIG;
+	in->log_page_size = MLX5_GET(wq,   wq, log_wq_pg_sz);
+	in->wqe_shift	  = MLX5_GET(wq,   wq, log_wq_stride) - 4;
+	in->log_size	  = MLX5_GET(wq,   wq, log_wq_sz);
+	in->page_offset   = MLX5_GET(wq,   wq, page_offset);
+	in->lwm		  = MLX5_GET(wq,   wq, lwm);
+	in->pd		  = MLX5_GET(wq,   wq, pd);
+	in->db_record	  = MLX5_GET64(wq, wq, dbr_addr);
+}
+
+static void get_srqc(void *srqc, struct mlx5_srq_attr *in)
+{
+	if (MLX5_GET(srqc, srqc, wq_signature))
+		in->flags &= MLX5_SRQ_FLAG_WQ_SIG;
+	in->log_page_size = MLX5_GET(srqc,   srqc, log_page_size);
+	in->wqe_shift	  = MLX5_GET(srqc,   srqc, log_rq_stride);
+	in->log_size	  = MLX5_GET(srqc,   srqc, log_srq_size);
+	in->page_offset   = MLX5_GET(srqc,   srqc, page_offset);
+	in->lwm		  = MLX5_GET(srqc,   srqc, lwm);
+	in->pd		  = MLX5_GET(srqc,   srqc, pd);
+	in->db_record	  = MLX5_GET64(srqc, srqc, dbr_addr);
+}
+
+struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn)
+{
+	struct mlx5_srq_table *table = &dev->priv.srq_table;
+	struct mlx5_core_srq *srq;
+
+	spin_lock(&table->lock);
+
+	srq = radix_tree_lookup(&table->tree, srqn);
+	if (srq)
+		atomic_inc(&srq->refcount);
+
+	spin_unlock(&table->lock);
+
+	return srq;
+}
+EXPORT_SYMBOL(mlx5_core_get_srq);
+
+static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+			  struct mlx5_srq_attr *in)
+{
+	u32 create_out[MLX5_ST_SZ_DW(create_srq_out)] = {0};
+	void *create_in;
+	void *srqc;
+	void *pas;
+	int pas_size;
+	int inlen;
+	int err;
+
+	pas_size  = get_pas_size(in);
+	inlen	  = MLX5_ST_SZ_BYTES(create_srq_in) + pas_size;
+	create_in = mlx5_vzalloc(inlen);
+	if (!create_in)
+		return -ENOMEM;
+
+	srqc = MLX5_ADDR_OF(create_srq_in, create_in, srq_context_entry);
+	pas = MLX5_ADDR_OF(create_srq_in, create_in, pas);
+
+	set_srqc(srqc, in);
+	memcpy(pas, in->pas, pas_size);
+
+	MLX5_SET(create_srq_in, create_in, opcode,
+		 MLX5_CMD_OP_CREATE_SRQ);
+
+	err = mlx5_cmd_exec(dev, create_in, inlen, create_out,
+			    sizeof(create_out));
+	kvfree(create_in);
+	if (!err)
+		srq->srqn = MLX5_GET(create_srq_out, create_out, srqn);
+
+	return err;
+}
+
+static int destroy_srq_cmd(struct mlx5_core_dev *dev,
+			   struct mlx5_core_srq *srq)
+{
+	u32 srq_in[MLX5_ST_SZ_DW(destroy_srq_in)] = {0};
+	u32 srq_out[MLX5_ST_SZ_DW(destroy_srq_out)] = {0};
+
+	MLX5_SET(destroy_srq_in, srq_in, opcode,
+		 MLX5_CMD_OP_DESTROY_SRQ);
+	MLX5_SET(destroy_srq_in, srq_in, srqn, srq->srqn);
+
+	return mlx5_cmd_exec(dev, srq_in, sizeof(srq_in),
+			     srq_out, sizeof(srq_out));
+}
+
+static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+		       u16 lwm, int is_srq)
+{
+	/* arm_srq structs missing using identical xrc ones */
+	u32 srq_in[MLX5_ST_SZ_DW(arm_xrc_srq_in)] = {0};
+	u32 srq_out[MLX5_ST_SZ_DW(arm_xrc_srq_out)] = {0};
+
+	MLX5_SET(arm_xrc_srq_in, srq_in, opcode,   MLX5_CMD_OP_ARM_XRC_SRQ);
+	MLX5_SET(arm_xrc_srq_in, srq_in, xrc_srqn, srq->srqn);
+	MLX5_SET(arm_xrc_srq_in, srq_in, lwm,      lwm);
+
+	return  mlx5_cmd_exec(dev, srq_in, sizeof(srq_in),
+			      srq_out, sizeof(srq_out));
+}
+
+static int query_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+			 struct mlx5_srq_attr *out)
+{
+	u32 srq_in[MLX5_ST_SZ_DW(query_srq_in)] = {0};
+	u32 *srq_out;
+	void *srqc;
+	int err;
+
+	srq_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_srq_out));
+	if (!srq_out)
+		return -ENOMEM;
+
+	MLX5_SET(query_srq_in, srq_in, opcode,
+		 MLX5_CMD_OP_QUERY_SRQ);
+	MLX5_SET(query_srq_in, srq_in, srqn, srq->srqn);
+	err =  mlx5_cmd_exec(dev, srq_in, sizeof(srq_in),
+			     srq_out, MLX5_ST_SZ_BYTES(query_srq_out));
+	if (err)
+		goto out;
+
+	srqc = MLX5_ADDR_OF(query_srq_out, srq_out, srq_context_entry);
+	get_srqc(srqc, out);
+	if (MLX5_GET(srqc, srqc, state) != MLX5_SRQC_STATE_GOOD)
+		out->flags |= MLX5_SRQ_FLAG_ERR;
+out:
+	kvfree(srq_out);
+	return err;
+}
+
+static int create_xrc_srq_cmd(struct mlx5_core_dev *dev,
+			      struct mlx5_core_srq *srq,
+			      struct mlx5_srq_attr *in)
+{
+	u32 create_out[MLX5_ST_SZ_DW(create_xrc_srq_out)];
+	void *create_in;
+	void *xrc_srqc;
+	void *pas;
+	int pas_size;
+	int inlen;
+	int err;
+
+	pas_size  = get_pas_size(in);
+	inlen	  = MLX5_ST_SZ_BYTES(create_xrc_srq_in) + pas_size;
+	create_in = mlx5_vzalloc(inlen);
+	if (!create_in)
+		return -ENOMEM;
+
+	xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, create_in,
+				xrc_srq_context_entry);
+	pas	 = MLX5_ADDR_OF(create_xrc_srq_in, create_in, pas);
+
+	set_srqc(xrc_srqc, in);
+	MLX5_SET(xrc_srqc, xrc_srqc, user_index, in->user_index);
+	memcpy(pas, in->pas, pas_size);
+	MLX5_SET(create_xrc_srq_in, create_in, opcode,
+		 MLX5_CMD_OP_CREATE_XRC_SRQ);
+
+	memset(create_out, 0, sizeof(create_out));
+	err = mlx5_cmd_exec(dev, create_in, inlen, create_out,
+			    sizeof(create_out));
+	if (err)
+		goto out;
+
+	srq->srqn = MLX5_GET(create_xrc_srq_out, create_out, xrc_srqn);
+out:
+	kvfree(create_in);
+	return err;
+}
+
+static int destroy_xrc_srq_cmd(struct mlx5_core_dev *dev,
+			       struct mlx5_core_srq *srq)
+{
+	u32 xrcsrq_in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)]   = {0};
+	u32 xrcsrq_out[MLX5_ST_SZ_DW(destroy_xrc_srq_out)] = {0};
+
+	MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, opcode,
+		 MLX5_CMD_OP_DESTROY_XRC_SRQ);
+	MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
+
+	return mlx5_cmd_exec(dev, xrcsrq_in, sizeof(xrcsrq_in),
+			     xrcsrq_out, sizeof(xrcsrq_out));
+}
+
+static int arm_xrc_srq_cmd(struct mlx5_core_dev *dev,
+			   struct mlx5_core_srq *srq, u16 lwm)
+{
+	u32 xrcsrq_in[MLX5_ST_SZ_DW(arm_xrc_srq_in)]   = {0};
+	u32 xrcsrq_out[MLX5_ST_SZ_DW(arm_xrc_srq_out)] = {0};
+
+	MLX5_SET(arm_xrc_srq_in, xrcsrq_in, opcode,   MLX5_CMD_OP_ARM_XRC_SRQ);
+	MLX5_SET(arm_xrc_srq_in, xrcsrq_in, op_mod,   MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ);
+	MLX5_SET(arm_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
+	MLX5_SET(arm_xrc_srq_in, xrcsrq_in, lwm,      lwm);
+
+	return  mlx5_cmd_exec(dev, xrcsrq_in, sizeof(xrcsrq_in),
+			      xrcsrq_out, sizeof(xrcsrq_out));
+}
+
+static int query_xrc_srq_cmd(struct mlx5_core_dev *dev,
+			     struct mlx5_core_srq *srq,
+			     struct mlx5_srq_attr *out)
+{
+	u32 xrcsrq_in[MLX5_ST_SZ_DW(query_xrc_srq_in)];
+	u32 *xrcsrq_out;
+	void *xrc_srqc;
+	int err;
+
+	xrcsrq_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_xrc_srq_out));
+	if (!xrcsrq_out)
+		return -ENOMEM;
+	memset(xrcsrq_in, 0, sizeof(xrcsrq_in));
+
+	MLX5_SET(query_xrc_srq_in, xrcsrq_in, opcode,
+		 MLX5_CMD_OP_QUERY_XRC_SRQ);
+	MLX5_SET(query_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
+
+	err =  mlx5_cmd_exec(dev, xrcsrq_in, sizeof(xrcsrq_in), xrcsrq_out,
+			     MLX5_ST_SZ_BYTES(query_xrc_srq_out));
+	if (err)
+		goto out;
+
+	xrc_srqc = MLX5_ADDR_OF(query_xrc_srq_out, xrcsrq_out,
+				xrc_srq_context_entry);
+	get_srqc(xrc_srqc, out);
+	if (MLX5_GET(xrc_srqc, xrc_srqc, state) != MLX5_XRC_SRQC_STATE_GOOD)
+		out->flags |= MLX5_SRQ_FLAG_ERR;
+
+out:
+	kvfree(xrcsrq_out);
+	return err;
+}
+
+static int create_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+			  struct mlx5_srq_attr *in)
+{
+	void *create_in;
+	void *rmpc;
+	void *wq;
+	int pas_size;
+	int inlen;
+	int err;
+
+	pas_size = get_pas_size(in);
+	inlen = MLX5_ST_SZ_BYTES(create_rmp_in) + pas_size;
+	create_in = mlx5_vzalloc(inlen);
+	if (!create_in)
+		return -ENOMEM;
+
+	rmpc = MLX5_ADDR_OF(create_rmp_in, create_in, ctx);
+	wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
+
+	MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
+	set_wq(wq, in);
+	memcpy(MLX5_ADDR_OF(rmpc, rmpc, wq.pas), in->pas, pas_size);
+
+	err = mlx5_core_create_rmp(dev, create_in, inlen, &srq->srqn);
+
+	kvfree(create_in);
+	return err;
+}
+
+static int destroy_rmp_cmd(struct mlx5_core_dev *dev,
+			   struct mlx5_core_srq *srq)
+{
+	return mlx5_core_destroy_rmp(dev, srq->srqn);
+}
+
+static int arm_rmp_cmd(struct mlx5_core_dev *dev,
+		       struct mlx5_core_srq *srq,
+		       u16 lwm)
+{
+	void *in;
+	void *rmpc;
+	void *wq;
+	void *bitmask;
+	int err;
+
+	in = mlx5_vzalloc(MLX5_ST_SZ_BYTES(modify_rmp_in));
+	if (!in)
+		return -ENOMEM;
+
+	rmpc =	  MLX5_ADDR_OF(modify_rmp_in,   in,   ctx);
+	bitmask = MLX5_ADDR_OF(modify_rmp_in,   in,   bitmask);
+	wq   =	  MLX5_ADDR_OF(rmpc,	        rmpc, wq);
+
+	MLX5_SET(modify_rmp_in, in,	 rmp_state, MLX5_RMPC_STATE_RDY);
+	MLX5_SET(modify_rmp_in, in,	 rmpn,      srq->srqn);
+	MLX5_SET(wq,		wq,	 lwm,	    lwm);
+	MLX5_SET(rmp_bitmask,	bitmask, lwm,	    1);
+	MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
+
+	err = mlx5_core_modify_rmp(dev, in, MLX5_ST_SZ_BYTES(modify_rmp_in));
+
+	kvfree(in);
+	return err;
+}
+
+static int query_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+			 struct mlx5_srq_attr *out)
+{
+	u32 *rmp_out;
+	void *rmpc;
+	int err;
+
+	rmp_out =  mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_rmp_out));
+	if (!rmp_out)
+		return -ENOMEM;
+
+	err = mlx5_core_query_rmp(dev, srq->srqn, rmp_out);
+	if (err)
+		goto out;
+
+	rmpc = MLX5_ADDR_OF(query_rmp_out, rmp_out, rmp_context);
+	get_wq(MLX5_ADDR_OF(rmpc, rmpc, wq), out);
+	if (MLX5_GET(rmpc, rmpc, state) != MLX5_RMPC_STATE_RDY)
+		out->flags |= MLX5_SRQ_FLAG_ERR;
+
+out:
+	kvfree(rmp_out);
+	return err;
+}
+
+static int create_srq_split(struct mlx5_core_dev *dev,
+			    struct mlx5_core_srq *srq,
+			    struct mlx5_srq_attr *in)
+{
+	if (!dev->issi)
+		return create_srq_cmd(dev, srq, in);
+	else if (srq->common.res == MLX5_RES_XSRQ)
+		return create_xrc_srq_cmd(dev, srq, in);
+	else
+		return create_rmp_cmd(dev, srq, in);
+}
+
+static int destroy_srq_split(struct mlx5_core_dev *dev,
+			     struct mlx5_core_srq *srq)
+{
+	if (!dev->issi)
+		return destroy_srq_cmd(dev, srq);
+	else if (srq->common.res == MLX5_RES_XSRQ)
+		return destroy_xrc_srq_cmd(dev, srq);
+	else
+		return destroy_rmp_cmd(dev, srq);
+}
+
+int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+			 struct mlx5_srq_attr *in)
+{
+	int err;
+	struct mlx5_srq_table *table = &dev->priv.srq_table;
+
+	if (in->type == IB_SRQT_XRC)
+		srq->common.res = MLX5_RES_XSRQ;
+	else
+		srq->common.res = MLX5_RES_SRQ;
+
+	err = create_srq_split(dev, srq, in);
+	if (err)
+		return err;
+
+	atomic_set(&srq->refcount, 1);
+	init_completion(&srq->free);
+
+	spin_lock_irq(&table->lock);
+	err = radix_tree_insert(&table->tree, srq->srqn, srq);
+	spin_unlock_irq(&table->lock);
+	if (err) {
+		mlx5_core_warn(dev, "err %d, srqn 0x%x\n", err, srq->srqn);
+		goto err_destroy_srq_split;
+	}
+
+	return 0;
+
+err_destroy_srq_split:
+	destroy_srq_split(dev, srq);
+
+	return err;
+}
+EXPORT_SYMBOL(mlx5_core_create_srq);
+
+int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq)
+{
+	struct mlx5_srq_table *table = &dev->priv.srq_table;
+	struct mlx5_core_srq *tmp;
+	int err;
+
+	spin_lock_irq(&table->lock);
+	tmp = radix_tree_delete(&table->tree, srq->srqn);
+	spin_unlock_irq(&table->lock);
+	if (!tmp) {
+		mlx5_core_warn(dev, "srq 0x%x not found in tree\n", srq->srqn);
+		return -EINVAL;
+	}
+	if (tmp != srq) {
+		mlx5_core_warn(dev, "corruption on srqn 0x%x\n", srq->srqn);
+		return -EINVAL;
+	}
+
+	err = destroy_srq_split(dev, srq);
+	if (err)
+		return err;
+
+	if (atomic_dec_and_test(&srq->refcount))
+		complete(&srq->free);
+	wait_for_completion(&srq->free);
+
+	return 0;
+}
+EXPORT_SYMBOL(mlx5_core_destroy_srq);
+
+int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+			struct mlx5_srq_attr *out)
+{
+	if (!dev->issi)
+		return query_srq_cmd(dev, srq, out);
+	else if (srq->common.res == MLX5_RES_XSRQ)
+		return query_xrc_srq_cmd(dev, srq, out);
+	else
+		return query_rmp_cmd(dev, srq, out);
+}
+EXPORT_SYMBOL(mlx5_core_query_srq);
+
+int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
+		      u16 lwm, int is_srq)
+{
+	if (!dev->issi)
+		return arm_srq_cmd(dev, srq, lwm, is_srq);
+	else if (srq->common.res == MLX5_RES_XSRQ)
+		return arm_xrc_srq_cmd(dev, srq, lwm);
+	else
+		return arm_rmp_cmd(dev, srq, lwm);
+}
+EXPORT_SYMBOL(mlx5_core_arm_srq);
+
+void mlx5_init_srq_table(struct mlx5_core_dev *dev)
+{
+	struct mlx5_srq_table *table = &dev->priv.srq_table;
+
+	memset(table, 0, sizeof(*table));
+	spin_lock_init(&table->lock);
+	INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
+}
+
+void mlx5_cleanup_srq_table(struct mlx5_core_dev *dev)
+{
+	/* nothing */
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/lib/srq.h b/drivers/net/ethernet/mellanox/mlx5/lib/srq.h
new file mode 100644
index 0000000..8bbc517
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/lib/srq.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef MLX5_SRQ_H
+#define MLX5_SRQ_H
+
+#include "lib/driver.h"
+
+enum {
+	MLX5_SRQ_FLAG_ERR    = (1 << 0),
+	MLX5_SRQ_FLAG_WQ_SIG = (1 << 1),
+};
+
+struct mlx5_srq_attr {
+	u32 type;
+	u32 flags;
+	u32 log_size;
+	u32 wqe_shift;
+	u32 log_page_size;
+	u32 wqe_cnt;
+	u32 srqn;
+	u32 xrcd;
+	u32 page_offset;
+	u32 cqn;
+	u32 pd;
+	u32 lwm;
+	u32 user_index;
+	u64 db_record;
+	__be64 *pas;
+};
+
+struct mlx5_core_dev;
+
+void mlx5_init_srq_table(struct mlx5_core_dev *dev);
+void mlx5_cleanup_srq_table(struct mlx5_core_dev *dev);
+
+#endif /* MLX5_SRQ_H */
diff --git a/drivers/net/ethernet/mellanox/mlx5/lib/transobj.c b/drivers/net/ethernet/mellanox/mlx5/lib/transobj.c
new file mode 100644
index 0000000..e7ebe20
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/lib/transobj.c
@@ -0,0 +1,400 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "lib/driver.h"
+#include "mlx5.h"
+#include "lib/transobj.h"
+
+int mlx5_core_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn)
+{
+	u32 in[MLX5_ST_SZ_DW(alloc_transport_domain_in)]   = {0};
+	u32 out[MLX5_ST_SZ_DW(alloc_transport_domain_out)] = {0};
+	int err;
+
+	MLX5_SET(alloc_transport_domain_in, in, opcode,
+		 MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN);
+
+	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+	if (!err)
+		*tdn = MLX5_GET(alloc_transport_domain_out, out,
+				transport_domain);
+
+	return err;
+}
+EXPORT_SYMBOL(mlx5_core_alloc_transport_domain);
+
+void mlx5_core_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn)
+{
+	u32 in[MLX5_ST_SZ_DW(dealloc_transport_domain_in)]   = {0};
+	u32 out[MLX5_ST_SZ_DW(dealloc_transport_domain_out)] = {0};
+
+	MLX5_SET(dealloc_transport_domain_in, in, opcode,
+		 MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
+	MLX5_SET(dealloc_transport_domain_in, in, transport_domain, tdn);
+	mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+EXPORT_SYMBOL(mlx5_core_dealloc_transport_domain);
+
+int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqn)
+{
+	u32 out[MLX5_ST_SZ_DW(create_rq_out)] = {0};
+	int err;
+
+	MLX5_SET(create_rq_in, in, opcode, MLX5_CMD_OP_CREATE_RQ);
+	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+	if (!err)
+		*rqn = MLX5_GET(create_rq_out, out, rqn);
+
+	return err;
+}
+EXPORT_SYMBOL(mlx5_core_create_rq);
+
+int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen)
+{
+	u32 out[MLX5_ST_SZ_DW(modify_rq_out)];
+
+	MLX5_SET(modify_rq_in, in, rqn, rqn);
+	MLX5_SET(modify_rq_in, in, opcode, MLX5_CMD_OP_MODIFY_RQ);
+
+	memset(out, 0, sizeof(out));
+	return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+}
+EXPORT_SYMBOL(mlx5_core_modify_rq);
+
+void mlx5_core_destroy_rq(struct mlx5_core_dev *dev, u32 rqn)
+{
+	u32 in[MLX5_ST_SZ_DW(destroy_rq_in)]   = {0};
+	u32 out[MLX5_ST_SZ_DW(destroy_rq_out)] = {0};
+
+	MLX5_SET(destroy_rq_in, in, opcode, MLX5_CMD_OP_DESTROY_RQ);
+	MLX5_SET(destroy_rq_in, in, rqn, rqn);
+	mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+EXPORT_SYMBOL(mlx5_core_destroy_rq);
+
+int mlx5_core_query_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *out)
+{
+	u32 in[MLX5_ST_SZ_DW(query_rq_in)] = {0};
+	int outlen = MLX5_ST_SZ_BYTES(query_rq_out);
+
+	MLX5_SET(query_rq_in, in, opcode, MLX5_CMD_OP_QUERY_RQ);
+	MLX5_SET(query_rq_in, in, rqn, rqn);
+
+	return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
+}
+EXPORT_SYMBOL(mlx5_core_query_rq);
+
+int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *sqn)
+{
+	u32 out[MLX5_ST_SZ_DW(create_sq_out)] = {0};
+	int err;
+
+	MLX5_SET(create_sq_in, in, opcode, MLX5_CMD_OP_CREATE_SQ);
+	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+	if (!err)
+		*sqn = MLX5_GET(create_sq_out, out, sqn);
+
+	return err;
+}
+
+int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen)
+{
+	u32 out[MLX5_ST_SZ_DW(modify_sq_out)] = {0};
+
+	MLX5_SET(modify_sq_in, in, sqn, sqn);
+	MLX5_SET(modify_sq_in, in, opcode, MLX5_CMD_OP_MODIFY_SQ);
+	return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+}
+EXPORT_SYMBOL(mlx5_core_modify_sq);
+
+void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn)
+{
+	u32 in[MLX5_ST_SZ_DW(destroy_sq_in)]   = {0};
+	u32 out[MLX5_ST_SZ_DW(destroy_sq_out)] = {0};
+
+	MLX5_SET(destroy_sq_in, in, opcode, MLX5_CMD_OP_DESTROY_SQ);
+	MLX5_SET(destroy_sq_in, in, sqn, sqn);
+	mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_core_query_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *out)
+{
+	u32 in[MLX5_ST_SZ_DW(query_sq_in)] = {0};
+	int outlen = MLX5_ST_SZ_BYTES(query_sq_out);
+
+	MLX5_SET(query_sq_in, in, opcode, MLX5_CMD_OP_QUERY_SQ);
+	MLX5_SET(query_sq_in, in, sqn, sqn);
+	return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
+}
+EXPORT_SYMBOL(mlx5_core_query_sq);
+
+int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
+			 u32 *tirn)
+{
+	u32 out[MLX5_ST_SZ_DW(create_tir_out)] = {0};
+	int err;
+
+	MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR);
+
+	memset(out, 0, sizeof(out));
+	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+	if (!err)
+		*tirn = MLX5_GET(create_tir_out, out, tirn);
+
+	return err;
+}
+EXPORT_SYMBOL(mlx5_core_create_tir);
+
+int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in,
+			 int inlen)
+{
+	u32 out[MLX5_ST_SZ_DW(modify_tir_out)] = {0};
+
+	MLX5_SET(modify_tir_in, in, tirn, tirn);
+	MLX5_SET(modify_tir_in, in, opcode, MLX5_CMD_OP_MODIFY_TIR);
+	return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+}
+
+void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn)
+{
+	u32 in[MLX5_ST_SZ_DW(destroy_tir_in)]   = {0};
+	u32 out[MLX5_ST_SZ_DW(destroy_tir_out)] = {0};
+
+	MLX5_SET(destroy_tir_in, in, opcode, MLX5_CMD_OP_DESTROY_TIR);
+	MLX5_SET(destroy_tir_in, in, tirn, tirn);
+	mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+EXPORT_SYMBOL(mlx5_core_destroy_tir);
+
+int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen,
+			 u32 *tisn)
+{
+	u32 out[MLX5_ST_SZ_DW(create_tis_out)] = {0};
+	int err;
+
+	MLX5_SET(create_tis_in, in, opcode, MLX5_CMD_OP_CREATE_TIS);
+	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+	if (!err)
+		*tisn = MLX5_GET(create_tis_out, out, tisn);
+
+	return err;
+}
+EXPORT_SYMBOL(mlx5_core_create_tis);
+
+int mlx5_core_modify_tis(struct mlx5_core_dev *dev, u32 tisn, u32 *in,
+			 int inlen)
+{
+	u32 out[MLX5_ST_SZ_DW(modify_tis_out)] = {0};
+
+	MLX5_SET(modify_tis_in, in, tisn, tisn);
+	MLX5_SET(modify_tis_in, in, opcode, MLX5_CMD_OP_MODIFY_TIS);
+
+	return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+}
+EXPORT_SYMBOL(mlx5_core_modify_tis);
+
+void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn)
+{
+	u32 in[MLX5_ST_SZ_DW(destroy_tis_in)]   = {0};
+	u32 out[MLX5_ST_SZ_DW(destroy_tis_out)] = {0};
+
+	MLX5_SET(destroy_tis_in, in, opcode, MLX5_CMD_OP_DESTROY_TIS);
+	MLX5_SET(destroy_tis_in, in, tisn, tisn);
+	mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+EXPORT_SYMBOL(mlx5_core_destroy_tis);
+
+int mlx5_core_create_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen,
+			 u32 *rmpn)
+{
+	u32 out[MLX5_ST_SZ_DW(create_rmp_out)] = {0};
+	int err;
+
+	MLX5_SET(create_rmp_in, in, opcode, MLX5_CMD_OP_CREATE_RMP);
+	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+	if (!err)
+		*rmpn = MLX5_GET(create_rmp_out, out, rmpn);
+
+	return err;
+}
+
+int mlx5_core_modify_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen)
+{
+	u32 out[MLX5_ST_SZ_DW(modify_rmp_out)] = {0};
+
+	MLX5_SET(modify_rmp_in, in, opcode, MLX5_CMD_OP_MODIFY_RMP);
+	return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+}
+
+int mlx5_core_destroy_rmp(struct mlx5_core_dev *dev, u32 rmpn)
+{
+	u32 in[MLX5_ST_SZ_DW(destroy_rmp_in)]   = {0};
+	u32 out[MLX5_ST_SZ_DW(destroy_rmp_out)] = {0};
+
+	MLX5_SET(destroy_rmp_in, in, opcode, MLX5_CMD_OP_DESTROY_RMP);
+	MLX5_SET(destroy_rmp_in, in, rmpn, rmpn);
+	return mlx5_cmd_exec(dev, in, sizeof(in), out,
+					  sizeof(out));
+}
+
+int mlx5_core_query_rmp(struct mlx5_core_dev *dev, u32 rmpn, u32 *out)
+{
+	u32 in[MLX5_ST_SZ_DW(query_rmp_in)] = {0};
+	int outlen = MLX5_ST_SZ_BYTES(query_rmp_out);
+
+	MLX5_SET(query_rmp_in, in, opcode, MLX5_CMD_OP_QUERY_RMP);
+	MLX5_SET(query_rmp_in, in, rmpn,   rmpn);
+	return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
+}
+
+int mlx5_core_arm_rmp(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm)
+{
+	void *in;
+	void *rmpc;
+	void *wq;
+	void *bitmask;
+	int  err;
+
+	in = mlx5_vzalloc(MLX5_ST_SZ_BYTES(modify_rmp_in));
+	if (!in)
+		return -ENOMEM;
+
+	rmpc    = MLX5_ADDR_OF(modify_rmp_in,   in,   ctx);
+	bitmask = MLX5_ADDR_OF(modify_rmp_in,   in,   bitmask);
+	wq      = MLX5_ADDR_OF(rmpc,	        rmpc, wq);
+
+	MLX5_SET(modify_rmp_in, in,	 rmp_state, MLX5_RMPC_STATE_RDY);
+	MLX5_SET(modify_rmp_in, in,	 rmpn,      rmpn);
+	MLX5_SET(wq,		wq,	 lwm,	    lwm);
+	MLX5_SET(rmp_bitmask,	bitmask, lwm,	    1);
+	MLX5_SET(rmpc,		rmpc,	 state,	    MLX5_RMPC_STATE_RDY);
+
+	err =  mlx5_core_modify_rmp(dev, in, MLX5_ST_SZ_BYTES(modify_rmp_in));
+
+	kvfree(in);
+
+	return err;
+}
+
+int mlx5_core_create_xsrq(struct mlx5_core_dev *dev, u32 *in, int inlen,
+			  u32 *xsrqn)
+{
+	u32 out[MLX5_ST_SZ_DW(create_xrc_srq_out)] = {0};
+	int err;
+
+	MLX5_SET(create_xrc_srq_in, in, opcode,     MLX5_CMD_OP_CREATE_XRC_SRQ);
+	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+	if (!err)
+		*xsrqn = MLX5_GET(create_xrc_srq_out, out, xrc_srqn);
+
+	return err;
+}
+
+int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 xsrqn)
+{
+	u32 in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)]   = {0};
+	u32 out[MLX5_ST_SZ_DW(destroy_xrc_srq_out)] = {0};
+
+	MLX5_SET(destroy_xrc_srq_in, in, opcode,   MLX5_CMD_OP_DESTROY_XRC_SRQ);
+	MLX5_SET(destroy_xrc_srq_in, in, xrc_srqn, xsrqn);
+	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_core_query_xsrq(struct mlx5_core_dev *dev, u32 xsrqn, u32 *out)
+{
+	u32 in[MLX5_ST_SZ_DW(query_xrc_srq_in)] = {0};
+	void *srqc;
+	void *xrc_srqc;
+	int err;
+
+	MLX5_SET(query_xrc_srq_in, in, opcode,   MLX5_CMD_OP_QUERY_XRC_SRQ);
+	MLX5_SET(query_xrc_srq_in, in, xrc_srqn, xsrqn);
+	err = mlx5_cmd_exec(dev, in, sizeof(in), out,
+			    MLX5_ST_SZ_BYTES(query_xrc_srq_out));
+	if (!err) {
+		xrc_srqc = MLX5_ADDR_OF(query_xrc_srq_out, out,
+					xrc_srq_context_entry);
+		srqc = MLX5_ADDR_OF(query_srq_out, out, srq_context_entry);
+		memcpy(srqc, xrc_srqc, MLX5_ST_SZ_BYTES(srqc));
+	}
+
+	return err;
+}
+
+int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 xsrqn, u16 lwm)
+{
+	u32 in[MLX5_ST_SZ_DW(arm_xrc_srq_in)]   = {0};
+	u32 out[MLX5_ST_SZ_DW(arm_xrc_srq_out)] = {0};
+
+	MLX5_SET(arm_xrc_srq_in, in, opcode,   MLX5_CMD_OP_ARM_XRC_SRQ);
+	MLX5_SET(arm_xrc_srq_in, in, xrc_srqn, xsrqn);
+	MLX5_SET(arm_xrc_srq_in, in, lwm,      lwm);
+	MLX5_SET(arm_xrc_srq_in, in, op_mod,
+		 MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ);
+	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen,
+			 u32 *rqtn)
+{
+	u32 out[MLX5_ST_SZ_DW(create_rqt_out)] = {0};
+	int err;
+
+	MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT);
+	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+	if (!err)
+		*rqtn = MLX5_GET(create_rqt_out, out, rqtn);
+
+	return err;
+}
+EXPORT_SYMBOL(mlx5_core_create_rqt);
+
+int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in,
+			 int inlen)
+{
+	u32 out[MLX5_ST_SZ_DW(modify_rqt_out)] = {0};
+
+	MLX5_SET(modify_rqt_in, in, rqtn, rqtn);
+	MLX5_SET(modify_rqt_in, in, opcode, MLX5_CMD_OP_MODIFY_RQT);
+	return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+}
+
+void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn)
+{
+	u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)]   = {0};
+	u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)] = {0};
+
+	MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
+	MLX5_SET(destroy_rqt_in, in, rqtn, rqtn);
+	mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+EXPORT_SYMBOL(mlx5_core_destroy_rqt);
diff --git a/drivers/net/ethernet/mellanox/mlx5/lib/transobj.h b/drivers/net/ethernet/mellanox/mlx5/lib/transobj.h
new file mode 100644
index 0000000..3cf24bc
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/lib/transobj.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __TRANSOBJ_H__
+#define __TRANSOBJ_H__
+
+#include "lib/driver.h"
+
+int mlx5_core_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn);
+void mlx5_core_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn);
+int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen,
+			u32 *rqn);
+int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen);
+void mlx5_core_destroy_rq(struct mlx5_core_dev *dev, u32 rqn);
+int mlx5_core_query_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *out);
+int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen,
+			u32 *sqn);
+int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen);
+void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn);
+int mlx5_core_query_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *out);
+int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
+			 u32 *tirn);
+int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in,
+			 int inlen);
+void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn);
+int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen,
+			 u32 *tisn);
+int mlx5_core_modify_tis(struct mlx5_core_dev *dev, u32 tisn, u32 *in,
+			 int inlen);
+void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn);
+int mlx5_core_create_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen,
+			 u32 *rmpn);
+int mlx5_core_modify_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen);
+int mlx5_core_destroy_rmp(struct mlx5_core_dev *dev, u32 rmpn);
+int mlx5_core_query_rmp(struct mlx5_core_dev *dev, u32 rmpn, u32 *out);
+int mlx5_core_arm_rmp(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm);
+int mlx5_core_create_xsrq(struct mlx5_core_dev *dev, u32 *in, int inlen,
+			  u32 *rmpn);
+int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 rmpn);
+int mlx5_core_query_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u32 *out);
+int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm);
+
+int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen,
+			 u32 *rqtn);
+int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in,
+			 int inlen);
+void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn);
+
+#endif /* __TRANSOBJ_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/lib/uar.c b/drivers/net/ethernet/mellanox/mlx5/lib/uar.c
new file mode 100644
index 0000000..1e4a839
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/lib/uar.c
@@ -0,0 +1,330 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/io-mapping.h>
+#include "lib/driver.h"
+#include "lib/cmd.h"
+#include "mlx5.h"
+
+int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn)
+{
+	u32 out[MLX5_ST_SZ_DW(alloc_uar_out)] = {0};
+	u32 in[MLX5_ST_SZ_DW(alloc_uar_in)]   = {0};
+	int err;
+
+	MLX5_SET(alloc_uar_in, in, opcode, MLX5_CMD_OP_ALLOC_UAR);
+	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+	if (!err)
+		*uarn = MLX5_GET(alloc_uar_out, out, uar);
+	return err;
+}
+EXPORT_SYMBOL(mlx5_cmd_alloc_uar);
+
+int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn)
+{
+	u32 out[MLX5_ST_SZ_DW(dealloc_uar_out)] = {0};
+	u32 in[MLX5_ST_SZ_DW(dealloc_uar_in)]   = {0};
+
+	MLX5_SET(dealloc_uar_in, in, opcode, MLX5_CMD_OP_DEALLOC_UAR);
+	MLX5_SET(dealloc_uar_in, in, uar, uarn);
+	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+EXPORT_SYMBOL(mlx5_cmd_free_uar);
+
+static int uars_per_sys_page(struct mlx5_core_dev *mdev)
+{
+	if (MLX5_CAP_GEN(mdev, uar_4k))
+		return MLX5_CAP_GEN(mdev, num_of_uars_per_page);
+
+	return 1;
+}
+
+static u64 uar2pfn(struct mlx5_core_dev *mdev, u32 index)
+{
+	u32 system_page_index;
+
+	if (MLX5_CAP_GEN(mdev, uar_4k))
+		system_page_index = index >> (PAGE_SHIFT - MLX5_ADAPTER_PAGE_SHIFT);
+	else
+		system_page_index = index;
+
+	return (pci_resource_start(mdev->pdev, 0) >> PAGE_SHIFT) + system_page_index;
+}
+
+static void up_rel_func(struct kref *kref)
+{
+	struct mlx5_uars_page *up = container_of(kref, struct mlx5_uars_page, ref_count);
+
+	list_del(&up->list);
+	if (mlx5_cmd_free_uar(up->mdev, up->index))
+		mlx5_core_warn(up->mdev, "failed to free uar index %d\n", up->index);
+	kfree(up->reg_bitmap);
+	kfree(up->fp_bitmap);
+	kfree(up);
+}
+
+static struct mlx5_uars_page *alloc_uars_page(struct mlx5_core_dev *mdev,
+					      bool map_wc)
+{
+	struct mlx5_uars_page *up;
+	int err = -ENOMEM;
+	phys_addr_t pfn;
+	int bfregs;
+	int i;
+
+	bfregs = uars_per_sys_page(mdev) * MLX5_BFREGS_PER_UAR;
+	up = kzalloc(sizeof(*up), GFP_KERNEL);
+	if (!up)
+		return ERR_PTR(err);
+
+	up->mdev = mdev;
+	up->reg_bitmap = kcalloc(BITS_TO_LONGS(bfregs), sizeof(unsigned long), GFP_KERNEL);
+	if (!up->reg_bitmap)
+		goto error1;
+
+	up->fp_bitmap = kcalloc(BITS_TO_LONGS(bfregs), sizeof(unsigned long), GFP_KERNEL);
+	if (!up->fp_bitmap)
+		goto error1;
+
+	for (i = 0; i < bfregs; i++)
+		if ((i % MLX5_BFREGS_PER_UAR) < MLX5_NON_FP_BFREGS_PER_UAR)
+			set_bit(i, up->reg_bitmap);
+		else
+			set_bit(i, up->fp_bitmap);
+
+	up->bfregs = bfregs;
+	up->fp_avail = bfregs * MLX5_FP_BFREGS_PER_UAR / MLX5_BFREGS_PER_UAR;
+	up->reg_avail = bfregs * MLX5_NON_FP_BFREGS_PER_UAR / MLX5_BFREGS_PER_UAR;
+
+	err = mlx5_cmd_alloc_uar(mdev, &up->index);
+	if (err) {
+		mlx5_core_warn(mdev, "mlx5_cmd_alloc_uar() failed, %d\n", err);
+		goto error1;
+	}
+
+	pfn = uar2pfn(mdev, up->index);
+	if (map_wc) {
+		up->map = ioremap_wc(pfn << PAGE_SHIFT, PAGE_SIZE);
+		if (!up->map) {
+			err = -EAGAIN;
+			goto error2;
+		}
+	} else {
+		up->map = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
+		if (!up->map) {
+			err = -ENOMEM;
+			goto error2;
+		}
+	}
+	kref_init(&up->ref_count);
+	mlx5_core_dbg(mdev, "allocated UAR page: index %d, total bfregs %d\n",
+		      up->index, up->bfregs);
+	return up;
+
+error2:
+	if (mlx5_cmd_free_uar(mdev, up->index))
+		mlx5_core_warn(mdev, "failed to free uar index %d\n", up->index);
+error1:
+	kfree(up->fp_bitmap);
+	kfree(up->reg_bitmap);
+	kfree(up);
+	return ERR_PTR(err);
+}
+
+struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev)
+{
+	struct mlx5_uars_page *ret;
+
+	mutex_lock(&mdev->priv.bfregs.reg_head.lock);
+	if (list_empty(&mdev->priv.bfregs.reg_head.list)) {
+		ret = alloc_uars_page(mdev, false);
+		if (IS_ERR(ret)) {
+			ret = NULL;
+			goto out;
+		}
+		list_add(&ret->list, &mdev->priv.bfregs.reg_head.list);
+	} else {
+		ret = list_first_entry(&mdev->priv.bfregs.reg_head.list,
+				       struct mlx5_uars_page, list);
+		kref_get(&ret->ref_count);
+	}
+out:
+	mutex_unlock(&mdev->priv.bfregs.reg_head.lock);
+
+	return ret;
+}
+EXPORT_SYMBOL(mlx5_get_uars_page);
+
+void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up)
+{
+	mutex_lock(&mdev->priv.bfregs.reg_head.lock);
+	kref_put(&up->ref_count, up_rel_func);
+	mutex_unlock(&mdev->priv.bfregs.reg_head.lock);
+}
+EXPORT_SYMBOL(mlx5_put_uars_page);
+
+static unsigned long map_offset(struct mlx5_core_dev *mdev, int dbi)
+{
+	/* return the offset in bytes from the start of the page to the
+	 * blue flame area of the UAR
+	 */
+	return dbi / MLX5_BFREGS_PER_UAR * MLX5_ADAPTER_PAGE_SIZE +
+	       (dbi % MLX5_BFREGS_PER_UAR) *
+	       (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) + MLX5_BF_OFFSET;
+}
+
+static int alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
+		       bool map_wc, bool fast_path)
+{
+	struct mlx5_bfreg_data *bfregs;
+	struct mlx5_uars_page *up;
+	struct list_head *head;
+	unsigned long *bitmap;
+	unsigned int *avail;
+	struct mutex *lock;  /* pointer to right mutex */
+	int dbi;
+
+	bfregs = &mdev->priv.bfregs;
+	if (map_wc) {
+		head = &bfregs->wc_head.list;
+		lock = &bfregs->wc_head.lock;
+	} else {
+		head = &bfregs->reg_head.list;
+		lock = &bfregs->reg_head.lock;
+	}
+	mutex_lock(lock);
+	if (list_empty(head)) {
+		up = alloc_uars_page(mdev, map_wc);
+		if (IS_ERR(up)) {
+			mutex_unlock(lock);
+			return PTR_ERR(up);
+		}
+		list_add(&up->list, head);
+	} else {
+		up = list_entry(head->next, struct mlx5_uars_page, list);
+		kref_get(&up->ref_count);
+	}
+	if (fast_path) {
+		bitmap = up->fp_bitmap;
+		avail = &up->fp_avail;
+	} else {
+		bitmap = up->reg_bitmap;
+		avail = &up->reg_avail;
+	}
+	dbi = find_first_bit(bitmap, up->bfregs);
+	clear_bit(dbi, bitmap);
+	(*avail)--;
+	if (!(*avail))
+		list_del(&up->list);
+
+	bfreg->map = up->map + map_offset(mdev, dbi);
+	bfreg->up = up;
+	bfreg->wc = map_wc;
+	bfreg->index = up->index + dbi / MLX5_BFREGS_PER_UAR;
+	mutex_unlock(lock);
+
+	return 0;
+}
+
+int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
+		     bool map_wc, bool fast_path)
+{
+	int err;
+
+	err = alloc_bfreg(mdev, bfreg, map_wc, fast_path);
+	if (!err)
+		return 0;
+
+	if (err == -EAGAIN && map_wc)
+		return alloc_bfreg(mdev, bfreg, false, fast_path);
+
+	return err;
+}
+EXPORT_SYMBOL(mlx5_alloc_bfreg);
+
+static unsigned int addr_to_dbi_in_syspage(struct mlx5_core_dev *dev,
+					   struct mlx5_uars_page *up,
+					   struct mlx5_sq_bfreg *bfreg)
+{
+	unsigned int uar_idx;
+	unsigned int bfreg_idx;
+	unsigned int bf_reg_size;
+
+	bf_reg_size = 1 << MLX5_CAP_GEN(dev, log_bf_reg_size);
+
+	uar_idx = (bfreg->map - up->map) >> MLX5_ADAPTER_PAGE_SHIFT;
+	bfreg_idx = (((uintptr_t)bfreg->map % MLX5_ADAPTER_PAGE_SIZE) - MLX5_BF_OFFSET) / bf_reg_size;
+
+	return uar_idx * MLX5_BFREGS_PER_UAR + bfreg_idx;
+}
+
+void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg)
+{
+	struct mlx5_bfreg_data *bfregs;
+	struct mlx5_uars_page *up;
+	struct mutex *lock; /* pointer to right mutex */
+	unsigned int dbi;
+	bool fp;
+	unsigned int *avail;
+	unsigned long *bitmap;
+	struct list_head *head;
+
+	bfregs = &mdev->priv.bfregs;
+	if (bfreg->wc) {
+		head = &bfregs->wc_head.list;
+		lock = &bfregs->wc_head.lock;
+	} else {
+		head = &bfregs->reg_head.list;
+		lock = &bfregs->reg_head.lock;
+	}
+	up = bfreg->up;
+	dbi = addr_to_dbi_in_syspage(mdev, up, bfreg);
+	fp = (dbi % MLX5_BFREGS_PER_UAR) >= MLX5_NON_FP_BFREGS_PER_UAR;
+	if (fp) {
+		avail = &up->fp_avail;
+		bitmap = up->fp_bitmap;
+	} else {
+		avail = &up->reg_avail;
+		bitmap = up->reg_bitmap;
+	}
+	mutex_lock(lock);
+	(*avail)++;
+	set_bit(dbi, bitmap);
+	if (*avail == 1)
+		list_add_tail(&up->list, head);
+
+	kref_put(&up->ref_count, up_rel_func);
+	mutex_unlock(lock);
+}
+EXPORT_SYMBOL(mlx5_free_bfreg);
diff --git a/drivers/net/ethernet/mellanox/mlx5/lib/vport.c b/drivers/net/ethernet/mellanox/mlx5/lib/vport.c
new file mode 100644
index 0000000..beb3f8d
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/lib/vport.c
@@ -0,0 +1,1025 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/export.h>
+#include <linux/etherdevice.h>
+#include "lib/driver.h"
+#include "lib/vport.h"
+#include "mlx5.h"
+
+static int _mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod,
+				   u16 vport, u32 *out, int outlen)
+{
+	u32 in[MLX5_ST_SZ_DW(query_vport_state_in)] = {0};
+
+	MLX5_SET(query_vport_state_in, in, opcode,
+		 MLX5_CMD_OP_QUERY_VPORT_STATE);
+	MLX5_SET(query_vport_state_in, in, op_mod, opmod);
+	MLX5_SET(query_vport_state_in, in, vport_number, vport);
+	if (vport)
+		MLX5_SET(query_vport_state_in, in, other_vport, 1);
+
+	return mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
+}
+
+u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
+{
+	u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {0};
+
+	_mlx5_query_vport_state(mdev, opmod, vport, out, sizeof(out));
+
+	return MLX5_GET(query_vport_state_out, out, state);
+}
+EXPORT_SYMBOL_GPL(mlx5_query_vport_state);
+
+u8 mlx5_query_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
+{
+	u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {0};
+
+	_mlx5_query_vport_state(mdev, opmod, vport, out, sizeof(out));
+
+	return MLX5_GET(query_vport_state_out, out, admin_state);
+}
+EXPORT_SYMBOL_GPL(mlx5_query_vport_admin_state);
+
+int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
+				  u16 vport, u8 state)
+{
+	u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)]   = {0};
+	u32 out[MLX5_ST_SZ_DW(modify_vport_state_out)] = {0};
+
+	MLX5_SET(modify_vport_state_in, in, opcode,
+		 MLX5_CMD_OP_MODIFY_VPORT_STATE);
+	MLX5_SET(modify_vport_state_in, in, op_mod, opmod);
+	MLX5_SET(modify_vport_state_in, in, vport_number, vport);
+	if (vport)
+		MLX5_SET(modify_vport_state_in, in, other_vport, 1);
+	MLX5_SET(modify_vport_state_in, in, admin_state, state);
+
+	return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+}
+EXPORT_SYMBOL_GPL(mlx5_modify_vport_admin_state);
+
+static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u16 vport,
+					u32 *out, int outlen)
+{
+	u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0};
+
+	MLX5_SET(query_nic_vport_context_in, in, opcode,
+		 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
+	MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
+	if (vport)
+		MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
+
+	return mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
+}
+
+static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in,
+					 int inlen)
+{
+	u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0};
+
+	MLX5_SET(modify_nic_vport_context_in, in, opcode,
+		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
+	return mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
+}
+
+int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
+				    u16 vport, u8 *min_inline)
+{
+	u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {0};
+	int err;
+
+	err = mlx5_query_nic_vport_context(mdev, vport, out, sizeof(out));
+	if (!err)
+		*min_inline = MLX5_GET(query_nic_vport_context_out, out,
+				       nic_vport_context.min_wqe_inline_mode);
+	return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_min_inline);
+
+int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev,
+				     u16 vport, u8 min_inline)
+{
+	u32 in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {0};
+	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
+	void *nic_vport_ctx;
+
+	MLX5_SET(modify_nic_vport_context_in, in,
+		 field_select.min_inline, 1);
+	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
+	MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
+
+	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
+				     in, nic_vport_context);
+	MLX5_SET(nic_vport_context, nic_vport_ctx,
+		 min_wqe_inline_mode, min_inline);
+
+	return mlx5_modify_nic_vport_context(mdev, in, inlen);
+}
+
+int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
+				     u16 vport, u8 *addr)
+{
+	u32 *out;
+	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
+	u8 *out_addr;
+	int err;
+
+	out = mlx5_vzalloc(outlen);
+	if (!out)
+		return -ENOMEM;
+
+	out_addr = MLX5_ADDR_OF(query_nic_vport_context_out, out,
+				nic_vport_context.permanent_address);
+
+	err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
+	if (!err)
+		ether_addr_copy(addr, &out_addr[2]);
+
+	kvfree(out);
+	return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_address);
+
+int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev,
+				      u16 vport, u8 *addr)
+{
+	void *in;
+	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
+	int err;
+	void *nic_vport_ctx;
+	u8 *perm_mac;
+
+	in = mlx5_vzalloc(inlen);
+	if (!in) {
+		mlx5_core_warn(mdev, "failed to allocate inbox\n");
+		return -ENOMEM;
+	}
+
+	MLX5_SET(modify_nic_vport_context_in, in,
+		 field_select.permanent_address, 1);
+	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
+
+	if (vport)
+		MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
+
+	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
+				     in, nic_vport_context);
+	perm_mac = MLX5_ADDR_OF(nic_vport_context, nic_vport_ctx,
+				permanent_address);
+
+	ether_addr_copy(&perm_mac[2], addr);
+
+	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
+
+	kvfree(in);
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_address);
+
+int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu)
+{
+	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
+	u32 *out;
+	int err;
+
+	out = mlx5_vzalloc(outlen);
+	if (!out)
+		return -ENOMEM;
+
+	err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
+	if (!err)
+		*mtu = MLX5_GET(query_nic_vport_context_out, out,
+				nic_vport_context.mtu);
+
+	kvfree(out);
+	return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mtu);
+
+int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu)
+{
+	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
+	void *in;
+	int err;
+
+	in = mlx5_vzalloc(inlen);
+	if (!in)
+		return -ENOMEM;
+
+	MLX5_SET(modify_nic_vport_context_in, in, field_select.mtu, 1);
+	MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.mtu, mtu);
+
+	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
+
+	kvfree(in);
+	return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mtu);
+
+int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
+				  u32 vport,
+				  enum mlx5_list_type list_type,
+				  u8 addr_list[][ETH_ALEN],
+				  int *list_size)
+{
+	u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0};
+	void *nic_vport_ctx;
+	int max_list_size;
+	int req_list_size;
+	int out_sz;
+	void *out;
+	int err;
+	int i;
+
+	req_list_size = *list_size;
+
+	max_list_size = list_type == MLX5_NVPRT_LIST_TYPE_UC ?
+		1 << MLX5_CAP_GEN(dev, log_max_current_uc_list) :
+		1 << MLX5_CAP_GEN(dev, log_max_current_mc_list);
+
+	if (req_list_size > max_list_size) {
+		mlx5_core_warn(dev, "Requested list size (%d) > (%d) max_list_size\n",
+			       req_list_size, max_list_size);
+		req_list_size = max_list_size;
+	}
+
+	out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
+			req_list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
+
+	out = kzalloc(out_sz, GFP_KERNEL);
+	if (!out)
+		return -ENOMEM;
+
+	MLX5_SET(query_nic_vport_context_in, in, opcode,
+		 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
+	MLX5_SET(query_nic_vport_context_in, in, allowed_list_type, list_type);
+	MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
+
+	if (vport)
+		MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
+
+	err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
+	if (err)
+		goto out;
+
+	nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
+				     nic_vport_context);
+	req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
+				 allowed_list_size);
+
+	*list_size = req_list_size;
+	for (i = 0; i < req_list_size; i++) {
+		u8 *mac_addr = MLX5_ADDR_OF(nic_vport_context,
+					nic_vport_ctx,
+					current_uc_mac_address[i]) + 2;
+		ether_addr_copy(addr_list[i], mac_addr);
+	}
+out:
+	kfree(out);
+	return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_list);
+
+int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
+				   enum mlx5_list_type list_type,
+				   u8 addr_list[][ETH_ALEN],
+				   int list_size)
+{
+	u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
+	void *nic_vport_ctx;
+	int max_list_size;
+	int in_sz;
+	void *in;
+	int err;
+	int i;
+
+	max_list_size = list_type == MLX5_NVPRT_LIST_TYPE_UC ?
+		 1 << MLX5_CAP_GEN(dev, log_max_current_uc_list) :
+		 1 << MLX5_CAP_GEN(dev, log_max_current_mc_list);
+
+	if (list_size > max_list_size)
+		return -ENOSPC;
+
+	in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
+		list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
+
+	memset(out, 0, sizeof(out));
+	in = kzalloc(in_sz, GFP_KERNEL);
+	if (!in)
+		return -ENOMEM;
+
+	MLX5_SET(modify_nic_vport_context_in, in, opcode,
+		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
+	MLX5_SET(modify_nic_vport_context_in, in,
+		 field_select.addresses_list, 1);
+
+	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
+				     nic_vport_context);
+
+	MLX5_SET(nic_vport_context, nic_vport_ctx,
+		 allowed_list_type, list_type);
+	MLX5_SET(nic_vport_context, nic_vport_ctx,
+		 allowed_list_size, list_size);
+
+	for (i = 0; i < list_size; i++) {
+		u8 *curr_mac = MLX5_ADDR_OF(nic_vport_context,
+					    nic_vport_ctx,
+					    current_uc_mac_address[i]) + 2;
+		ether_addr_copy(curr_mac, addr_list[i]);
+	}
+
+	err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
+	kfree(in);
+	return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_list);
+
+int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev,
+			       u32 vport,
+			       u16 vlans[],
+			       int *size)
+{
+	u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
+	void *nic_vport_ctx;
+	int req_list_size;
+	int max_list_size;
+	int out_sz;
+	void *out;
+	int err;
+	int i;
+
+	req_list_size = *size;
+	max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list);
+	if (req_list_size > max_list_size) {
+		mlx5_core_warn(dev, "Requested list size (%d) > (%d) max list size\n",
+			       req_list_size, max_list_size);
+		req_list_size = max_list_size;
+	}
+
+	out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
+			req_list_size * MLX5_ST_SZ_BYTES(vlan_layout);
+
+	memset(in, 0, sizeof(in));
+	out = kzalloc(out_sz, GFP_KERNEL);
+	if (!out)
+		return -ENOMEM;
+
+	MLX5_SET(query_nic_vport_context_in, in, opcode,
+		 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
+	MLX5_SET(query_nic_vport_context_in, in, allowed_list_type,
+		 MLX5_NVPRT_LIST_TYPE_VLAN);
+	MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
+
+	if (vport)
+		MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
+
+	err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
+	if (err)
+		goto out;
+
+	nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
+				     nic_vport_context);
+	req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
+				 allowed_list_size);
+
+	*size = req_list_size;
+	for (i = 0; i < req_list_size; i++) {
+		void *vlan_addr = MLX5_ADDR_OF(nic_vport_context,
+					       nic_vport_ctx,
+					       current_uc_mac_address[i]);
+		vlans[i] = MLX5_GET(vlan_layout, vlan_addr, vlan);
+	}
+out:
+	kfree(out);
+	return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_vlans);
+
+int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
+				u16 vlans[],
+				int list_size)
+{
+	u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
+	void *nic_vport_ctx;
+	int max_list_size;
+	int in_sz;
+	void *in;
+	int err;
+	int i;
+
+	max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list);
+
+	if (list_size > max_list_size)
+		return -ENOSPC;
+
+	in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
+		list_size * MLX5_ST_SZ_BYTES(vlan_layout);
+
+	memset(out, 0, sizeof(out));
+	in = kzalloc(in_sz, GFP_KERNEL);
+	if (!in)
+		return -ENOMEM;
+
+	MLX5_SET(modify_nic_vport_context_in, in, opcode,
+		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
+	MLX5_SET(modify_nic_vport_context_in, in,
+		 field_select.addresses_list, 1);
+
+	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
+				     nic_vport_context);
+
+	MLX5_SET(nic_vport_context, nic_vport_ctx,
+		 allowed_list_type, MLX5_NVPRT_LIST_TYPE_VLAN);
+	MLX5_SET(nic_vport_context, nic_vport_ctx,
+		 allowed_list_size, list_size);
+
+	for (i = 0; i < list_size; i++) {
+		void *vlan_addr = MLX5_ADDR_OF(nic_vport_context,
+					       nic_vport_ctx,
+					       current_uc_mac_address[i]);
+		MLX5_SET(vlan_layout, vlan_addr, vlan, vlans[i]);
+	}
+
+	err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
+	kfree(in);
+	return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_vlans);
+
+int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
+					   u64 *system_image_guid)
+{
+	u32 *out;
+	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
+
+	out = mlx5_vzalloc(outlen);
+	if (!out)
+		return -ENOMEM;
+
+	mlx5_query_nic_vport_context(mdev, 0, out, outlen);
+
+	*system_image_guid = MLX5_GET64(query_nic_vport_context_out, out,
+					nic_vport_context.system_image_guid);
+
+	kfree(out);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_system_image_guid);
+
+int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
+{
+	u32 *out;
+	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
+
+	out = mlx5_vzalloc(outlen);
+	if (!out)
+		return -ENOMEM;
+
+	mlx5_query_nic_vport_context(mdev, 0, out, outlen);
+
+	*node_guid = MLX5_GET64(query_nic_vport_context_out, out,
+				nic_vport_context.node_guid);
+
+	kfree(out);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid);
+
+int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
+				    u32 vport, u64 node_guid)
+{
+	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
+	void *nic_vport_context;
+	void *in;
+	int err;
+
+	if (!vport)
+		return -EINVAL;
+	if (!MLX5_CAP_GEN(mdev, vport_group_manager))
+		return -EACCES;
+	if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify))
+		return -ENOTSUPP;
+
+	in = mlx5_vzalloc(inlen);
+	if (!in)
+		return -ENOMEM;
+
+	MLX5_SET(modify_nic_vport_context_in, in,
+		 field_select.node_guid, 1);
+	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
+	MLX5_SET(modify_nic_vport_context_in, in, other_vport, !!vport);
+
+	nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
+					 in, nic_vport_context);
+	MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid);
+
+	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
+
+	kvfree(in);
+
+	return err;
+}
+
+int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
+					u16 *qkey_viol_cntr)
+{
+	u32 *out;
+	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
+
+	out = mlx5_vzalloc(outlen);
+	if (!out)
+		return -ENOMEM;
+
+	mlx5_query_nic_vport_context(mdev, 0, out, outlen);
+
+	*qkey_viol_cntr = MLX5_GET(query_nic_vport_context_out, out,
+				   nic_vport_context.qkey_violation_counter);
+
+	kfree(out);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_qkey_viol_cntr);
+
+int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
+			     u8 port_num, u16  vf_num, u16 gid_index,
+			     union ib_gid *gid)
+{
+	int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_in);
+	int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
+	int is_group_manager;
+	void *out = NULL;
+	void *in = NULL;
+	union ib_gid *tmp;
+	int tbsz;
+	int nout;
+	int err;
+
+	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
+	tbsz = mlx5_get_gid_table_len(MLX5_CAP_GEN(dev, gid_table_size));
+	mlx5_core_dbg(dev, "vf_num %d, index %d, gid_table_size %d\n",
+		      vf_num, gid_index, tbsz);
+
+	if (gid_index > tbsz && gid_index != 0xffff)
+		return -EINVAL;
+
+	if (gid_index == 0xffff)
+		nout = tbsz;
+	else
+		nout = 1;
+
+	out_sz += nout * sizeof(*gid);
+
+	in = kzalloc(in_sz, GFP_KERNEL);
+	out = kzalloc(out_sz, GFP_KERNEL);
+	if (!in || !out) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	MLX5_SET(query_hca_vport_gid_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_GID);
+	if (other_vport) {
+		if (is_group_manager) {
+			MLX5_SET(query_hca_vport_gid_in, in, vport_number, vf_num);
+			MLX5_SET(query_hca_vport_gid_in, in, other_vport, 1);
+		} else {
+			err = -EPERM;
+			goto out;
+		}
+	}
+	MLX5_SET(query_hca_vport_gid_in, in, gid_index, gid_index);
+
+	if (MLX5_CAP_GEN(dev, num_ports) == 2)
+		MLX5_SET(query_hca_vport_gid_in, in, port_num, port_num);
+
+	err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
+	if (err)
+		goto out;
+
+	tmp = out + MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
+	gid->global.subnet_prefix = tmp->global.subnet_prefix;
+	gid->global.interface_id = tmp->global.interface_id;
+
+out:
+	kfree(in);
+	kfree(out);
+	return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_gid);
+
+int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
+			      u8 port_num, u16 vf_num, u16 pkey_index,
+			      u16 *pkey)
+{
+	int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_in);
+	int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_out);
+	int is_group_manager;
+	void *out = NULL;
+	void *in = NULL;
+	void *pkarr;
+	int nout;
+	int tbsz;
+	int err;
+	int i;
+
+	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
+
+	tbsz = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size));
+	if (pkey_index > tbsz && pkey_index != 0xffff)
+		return -EINVAL;
+
+	if (pkey_index == 0xffff)
+		nout = tbsz;
+	else
+		nout = 1;
+
+	out_sz += nout * MLX5_ST_SZ_BYTES(pkey);
+
+	in = kzalloc(in_sz, GFP_KERNEL);
+	out = kzalloc(out_sz, GFP_KERNEL);
+	if (!in || !out) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	MLX5_SET(query_hca_vport_pkey_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY);
+	if (other_vport) {
+		if (is_group_manager) {
+			MLX5_SET(query_hca_vport_pkey_in, in, vport_number, vf_num);
+			MLX5_SET(query_hca_vport_pkey_in, in, other_vport, 1);
+		} else {
+			err = -EPERM;
+			goto out;
+		}
+	}
+	MLX5_SET(query_hca_vport_pkey_in, in, pkey_index, pkey_index);
+
+	if (MLX5_CAP_GEN(dev, num_ports) == 2)
+		MLX5_SET(query_hca_vport_pkey_in, in, port_num, port_num);
+
+	err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
+	if (err)
+		goto out;
+
+	pkarr = MLX5_ADDR_OF(query_hca_vport_pkey_out, out, pkey);
+	for (i = 0; i < nout; i++, pkey++, pkarr += MLX5_ST_SZ_BYTES(pkey))
+		*pkey = MLX5_GET_PR(pkey, pkarr, pkey);
+
+out:
+	kfree(in);
+	kfree(out);
+	return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_pkey);
+
+int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev,
+				 u8 other_vport, u8 port_num,
+				 u16 vf_num,
+				 struct mlx5_hca_vport_context *rep)
+{
+	int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
+	int in[MLX5_ST_SZ_DW(query_hca_vport_context_in)] = {0};
+	int is_group_manager;
+	void *out;
+	void *ctx;
+	int err;
+
+	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
+
+	out = kzalloc(out_sz, GFP_KERNEL);
+	if (!out)
+		return -ENOMEM;
+
+	MLX5_SET(query_hca_vport_context_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT);
+
+	if (other_vport) {
+		if (is_group_manager) {
+			MLX5_SET(query_hca_vport_context_in, in, other_vport, 1);
+			MLX5_SET(query_hca_vport_context_in, in, vport_number, vf_num);
+		} else {
+			err = -EPERM;
+			goto ex;
+		}
+	}
+
+	if (MLX5_CAP_GEN(dev, num_ports) == 2)
+		MLX5_SET(query_hca_vport_context_in, in, port_num, port_num);
+
+	err = mlx5_cmd_exec(dev, in, sizeof(in), out,  out_sz);
+	if (err)
+		goto ex;
+
+	ctx = MLX5_ADDR_OF(query_hca_vport_context_out, out, hca_vport_context);
+	rep->field_select = MLX5_GET_PR(hca_vport_context, ctx, field_select);
+	rep->sm_virt_aware = MLX5_GET_PR(hca_vport_context, ctx, sm_virt_aware);
+	rep->has_smi = MLX5_GET_PR(hca_vport_context, ctx, has_smi);
+	rep->has_raw = MLX5_GET_PR(hca_vport_context, ctx, has_raw);
+	rep->policy = MLX5_GET_PR(hca_vport_context, ctx, vport_state_policy);
+	rep->phys_state = MLX5_GET_PR(hca_vport_context, ctx,
+				      port_physical_state);
+	rep->vport_state = MLX5_GET_PR(hca_vport_context, ctx, vport_state);
+	rep->port_physical_state = MLX5_GET_PR(hca_vport_context, ctx,
+					       port_physical_state);
+	rep->port_guid = MLX5_GET64_PR(hca_vport_context, ctx, port_guid);
+	rep->node_guid = MLX5_GET64_PR(hca_vport_context, ctx, node_guid);
+	rep->cap_mask1 = MLX5_GET_PR(hca_vport_context, ctx, cap_mask1);
+	rep->cap_mask1_perm = MLX5_GET_PR(hca_vport_context, ctx,
+					  cap_mask1_field_select);
+	rep->cap_mask2 = MLX5_GET_PR(hca_vport_context, ctx, cap_mask2);
+	rep->cap_mask2_perm = MLX5_GET_PR(hca_vport_context, ctx,
+					  cap_mask2_field_select);
+	rep->lid = MLX5_GET_PR(hca_vport_context, ctx, lid);
+	rep->init_type_reply = MLX5_GET_PR(hca_vport_context, ctx,
+					   init_type_reply);
+	rep->lmc = MLX5_GET_PR(hca_vport_context, ctx, lmc);
+	rep->subnet_timeout = MLX5_GET_PR(hca_vport_context, ctx,
+					  subnet_timeout);
+	rep->sm_lid = MLX5_GET_PR(hca_vport_context, ctx, sm_lid);
+	rep->sm_sl = MLX5_GET_PR(hca_vport_context, ctx, sm_sl);
+	rep->qkey_violation_counter = MLX5_GET_PR(hca_vport_context, ctx,
+						  qkey_violation_counter);
+	rep->pkey_violation_counter = MLX5_GET_PR(hca_vport_context, ctx,
+						  pkey_violation_counter);
+	rep->grh_required = MLX5_GET_PR(hca_vport_context, ctx, grh_required);
+	rep->sys_image_guid = MLX5_GET64_PR(hca_vport_context, ctx,
+					    system_image_guid);
+
+ex:
+	kfree(out);
+	return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_context);
+
+int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev,
+					   u64 *sys_image_guid)
+{
+	struct mlx5_hca_vport_context *rep;
+	int err;
+
+	rep = kzalloc(sizeof(*rep), GFP_KERNEL);
+	if (!rep)
+		return -ENOMEM;
+
+	err = mlx5_query_hca_vport_context(dev, 0, 1, 0, rep);
+	if (!err)
+		*sys_image_guid = rep->sys_image_guid;
+
+	kfree(rep);
+	return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_system_image_guid);
+
+int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev,
+				   u64 *node_guid)
+{
+	struct mlx5_hca_vport_context *rep;
+	int err;
+
+	rep = kzalloc(sizeof(*rep), GFP_KERNEL);
+	if (!rep)
+		return -ENOMEM;
+
+	err = mlx5_query_hca_vport_context(dev, 0, 1, 0, rep);
+	if (!err)
+		*node_guid = rep->node_guid;
+
+	kfree(rep);
+	return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_node_guid);
+
+int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev,
+				 u32 vport,
+				 int *promisc_uc,
+				 int *promisc_mc,
+				 int *promisc_all)
+{
+	u32 *out;
+	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
+	int err;
+
+	out = kzalloc(outlen, GFP_KERNEL);
+	if (!out)
+		return -ENOMEM;
+
+	err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
+	if (err)
+		goto out;
+
+	*promisc_uc = MLX5_GET(query_nic_vport_context_out, out,
+			       nic_vport_context.promisc_uc);
+	*promisc_mc = MLX5_GET(query_nic_vport_context_out, out,
+			       nic_vport_context.promisc_mc);
+	*promisc_all = MLX5_GET(query_nic_vport_context_out, out,
+				nic_vport_context.promisc_all);
+
+out:
+	kfree(out);
+	return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_promisc);
+
+int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev,
+				  int promisc_uc,
+				  int promisc_mc,
+				  int promisc_all)
+{
+	void *in;
+	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
+	int err;
+
+	in = mlx5_vzalloc(inlen);
+	if (!in) {
+		mlx5_core_err(mdev, "failed to allocate inbox\n");
+		return -ENOMEM;
+	}
+
+	MLX5_SET(modify_nic_vport_context_in, in, field_select.promisc, 1);
+	MLX5_SET(modify_nic_vport_context_in, in,
+		 nic_vport_context.promisc_uc, promisc_uc);
+	MLX5_SET(modify_nic_vport_context_in, in,
+		 nic_vport_context.promisc_mc, promisc_mc);
+	MLX5_SET(modify_nic_vport_context_in, in,
+		 nic_vport_context.promisc_all, promisc_all);
+
+	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
+
+	kvfree(in);
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_promisc);
+
+enum mlx5_vport_roce_state {
+	MLX5_VPORT_ROCE_DISABLED = 0,
+	MLX5_VPORT_ROCE_ENABLED  = 1,
+};
+
+static int mlx5_nic_vport_update_roce_state(struct mlx5_core_dev *mdev,
+					    enum mlx5_vport_roce_state state)
+{
+	void *in;
+	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
+	int err;
+
+	in = mlx5_vzalloc(inlen);
+	if (!in) {
+		mlx5_core_warn(mdev, "failed to allocate inbox\n");
+		return -ENOMEM;
+	}
+
+	MLX5_SET(modify_nic_vport_context_in, in, field_select.roce_en, 1);
+	MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.roce_en,
+		 state);
+
+	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
+
+	kvfree(in);
+
+	return err;
+}
+
+int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev)
+{
+	return mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_ENABLED);
+}
+EXPORT_SYMBOL_GPL(mlx5_nic_vport_enable_roce);
+
+int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev)
+{
+	return mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_DISABLED);
+}
+EXPORT_SYMBOL_GPL(mlx5_nic_vport_disable_roce);
+
+int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport,
+				  int vf, u8 port_num, void *out,
+				  size_t out_sz)
+{
+	int	in_sz = MLX5_ST_SZ_BYTES(query_vport_counter_in);
+	int	is_group_manager;
+	void   *in;
+	int	err;
+
+	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
+	in = mlx5_vzalloc(in_sz);
+	if (!in) {
+		err = -ENOMEM;
+		return err;
+	}
+
+	MLX5_SET(query_vport_counter_in, in, opcode,
+		 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
+	if (other_vport) {
+		if (is_group_manager) {
+			MLX5_SET(query_vport_counter_in, in, other_vport, 1);
+			MLX5_SET(query_vport_counter_in, in, vport_number, vf + 1);
+		} else {
+			err = -EPERM;
+			goto free;
+		}
+	}
+	if (MLX5_CAP_GEN(dev, num_ports) == 2)
+		MLX5_SET(query_vport_counter_in, in, port_num, port_num);
+
+	err = mlx5_cmd_exec(dev, in, in_sz, out,  out_sz);
+free:
+	kvfree(in);
+	return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_core_query_vport_counter);
+
+int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
+				       u8 other_vport, u8 port_num,
+				       int vf,
+				       struct mlx5_hca_vport_context *req)
+{
+	int in_sz = MLX5_ST_SZ_BYTES(modify_hca_vport_context_in);
+	u8 out[MLX5_ST_SZ_BYTES(modify_hca_vport_context_out)];
+	int is_group_manager;
+	void *in;
+	int err;
+	void *ctx;
+
+	mlx5_core_dbg(dev, "vf %d\n", vf);
+	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
+	in = kzalloc(in_sz, GFP_KERNEL);
+	if (!in)
+		return -ENOMEM;
+
+	memset(out, 0, sizeof(out));
+	MLX5_SET(modify_hca_vport_context_in, in, opcode, MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT);
+	if (other_vport) {
+		if (is_group_manager) {
+			MLX5_SET(modify_hca_vport_context_in, in, other_vport, 1);
+			MLX5_SET(modify_hca_vport_context_in, in, vport_number, vf);
+		} else {
+			err = -EPERM;
+			goto ex;
+		}
+	}
+
+	if (MLX5_CAP_GEN(dev, num_ports) > 1)
+		MLX5_SET(modify_hca_vport_context_in, in, port_num, port_num);
+
+	ctx = MLX5_ADDR_OF(modify_hca_vport_context_in, in, hca_vport_context);
+	MLX5_SET(hca_vport_context, ctx, field_select, req->field_select);
+	MLX5_SET(hca_vport_context, ctx, sm_virt_aware, req->sm_virt_aware);
+	MLX5_SET(hca_vport_context, ctx, has_smi, req->has_smi);
+	MLX5_SET(hca_vport_context, ctx, has_raw, req->has_raw);
+	MLX5_SET(hca_vport_context, ctx, vport_state_policy, req->policy);
+	MLX5_SET(hca_vport_context, ctx, port_physical_state, req->phys_state);
+	MLX5_SET(hca_vport_context, ctx, vport_state, req->vport_state);
+	MLX5_SET64(hca_vport_context, ctx, port_guid, req->port_guid);
+	MLX5_SET64(hca_vport_context, ctx, node_guid, req->node_guid);
+	MLX5_SET(hca_vport_context, ctx, cap_mask1, req->cap_mask1);
+	MLX5_SET(hca_vport_context, ctx, cap_mask1_field_select, req->cap_mask1_perm);
+	MLX5_SET(hca_vport_context, ctx, cap_mask2, req->cap_mask2);
+	MLX5_SET(hca_vport_context, ctx, cap_mask2_field_select, req->cap_mask2_perm);
+	MLX5_SET(hca_vport_context, ctx, lid, req->lid);
+	MLX5_SET(hca_vport_context, ctx, init_type_reply, req->init_type_reply);
+	MLX5_SET(hca_vport_context, ctx, lmc, req->lmc);
+	MLX5_SET(hca_vport_context, ctx, subnet_timeout, req->subnet_timeout);
+	MLX5_SET(hca_vport_context, ctx, sm_lid, req->sm_lid);
+	MLX5_SET(hca_vport_context, ctx, sm_sl, req->sm_sl);
+	MLX5_SET(hca_vport_context, ctx, qkey_violation_counter, req->qkey_violation_counter);
+	MLX5_SET(hca_vport_context, ctx, pkey_violation_counter, req->pkey_violation_counter);
+	err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
+ex:
+	kfree(in);
+	return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_core_modify_hca_vport_context);
diff --git a/drivers/net/ethernet/mellanox/mlx5/lib/vport.h b/drivers/net/ethernet/mellanox/mlx5/lib/vport.h
new file mode 100644
index 0000000..c99cd27
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/lib/vport.h
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __MLX5_VPORT_H__
+#define __MLX5_VPORT_H__
+
+#include "lib/driver.h"
+#include "lib/device.h"
+
+enum {
+	MLX5_CAP_INLINE_MODE_L2,
+	MLX5_CAP_INLINE_MODE_VPORT_CONTEXT,
+	MLX5_CAP_INLINE_MODE_NOT_REQUIRED,
+};
+
+u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport);
+u8 mlx5_query_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
+				u16 vport);
+int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
+				  u16 vport, u8 state);
+int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
+				     u16 vport, u8 *addr);
+int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
+				    u16 vport, u8 *min_inline);
+int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev,
+				     u16 vport, u8 min_inline);
+int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev,
+				      u16 vport, u8 *addr);
+int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu);
+int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu);
+int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
+					   u64 *system_image_guid);
+int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid);
+int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
+				    u32 vport, u64 node_guid);
+int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
+					u16 *qkey_viol_cntr);
+int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
+			     u8 port_num, u16  vf_num, u16 gid_index,
+			     union ib_gid *gid);
+int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
+			      u8 port_num, u16 vf_num, u16 pkey_index,
+			      u16 *pkey);
+int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev,
+				 u8 other_vport, u8 port_num,
+				 u16 vf_num,
+				 struct mlx5_hca_vport_context *rep);
+int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev,
+					   u64 *sys_image_guid);
+int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev,
+				   u64 *node_guid);
+int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
+				  u32 vport,
+				  enum mlx5_list_type list_type,
+				  u8 addr_list[][ETH_ALEN],
+				  int *list_size);
+int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
+				   enum mlx5_list_type list_type,
+				   u8 addr_list[][ETH_ALEN],
+				   int list_size);
+int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev,
+				 u32 vport,
+				 int *promisc_uc,
+				 int *promisc_mc,
+				 int *promisc_all);
+int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev,
+				  int promisc_uc,
+				  int promisc_mc,
+				  int promisc_all);
+int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev,
+			       u32 vport,
+			       u16 vlans[],
+			       int *size);
+int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
+				u16 vlans[],
+				int list_size);
+
+int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev);
+int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev);
+int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport,
+				  int vf, u8 port_num, void *out,
+				  size_t out_sz);
+int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
+				       u8 other_vport, u8 port_num,
+				       int vf,
+				       struct mlx5_hca_vport_context *req);
+
+#endif /* __MLX5_VPORT_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/main.c b/drivers/net/ethernet/mellanox/mlx5/main.c
new file mode 100644
index 0000000..dd7e390
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/main.c
@@ -0,0 +1,1575 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/highmem.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/io-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include "lib/driver.h"
+#include "lib/cq.h"
+#include "lib/qp.h"
+#include "lib/srq.h"
+#include <linux/debugfs.h>
+#include <linux/kmod.h>
+#include "lib/mlx5_ifc.h"
+#ifdef CONFIG_RFS_ACCEL
+#include <linux/cpu_rmap.h>
+#endif
+#include <net/devlink.h>
+#include "mlx5.h"
+#include "fs/core.h"
+#ifdef CONFIG_MLX5_CORE_EN
+#include "sriov/eswitch.h"
+#endif
+
+MODULE_AUTHOR("Eli Cohen <eli@...lanox.com>");
+MODULE_DESCRIPTION("Mellanox Connect-IB, ConnectX-4 core driver");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(DRIVER_VERSION);
+
+unsigned int mlx5_core_debug_mask;
+module_param_named(debug_mask, mlx5_core_debug_mask, uint, 0644);
+MODULE_PARM_DESC(debug_mask, "debug mask: 1 = dump cmd data, 2 = dump cmd exec time, 3 = both. Default=0");
+
+#define MLX5_DEFAULT_PROF	2
+static unsigned int prof_sel = MLX5_DEFAULT_PROF;
+module_param_named(prof_sel, prof_sel, uint, 0444);
+MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2");
+
+enum {
+	MLX5_ATOMIC_REQ_MODE_BE = 0x0,
+	MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS = 0x1,
+};
+
+static struct mlx5_profile profile[] = {
+	[0] = {
+		.mask           = 0,
+	},
+	[1] = {
+		.mask		= MLX5_PROF_MASK_QP_SIZE,
+		.log_max_qp	= 12,
+	},
+	[2] = {
+		.mask		= MLX5_PROF_MASK_QP_SIZE |
+				  MLX5_PROF_MASK_MR_CACHE,
+		.log_max_qp	= 17,
+		.mr_cache[0]	= {
+			.size	= 500,
+			.limit	= 250
+		},
+		.mr_cache[1]	= {
+			.size	= 500,
+			.limit	= 250
+		},
+		.mr_cache[2]	= {
+			.size	= 500,
+			.limit	= 250
+		},
+		.mr_cache[3]	= {
+			.size	= 500,
+			.limit	= 250
+		},
+		.mr_cache[4]	= {
+			.size	= 500,
+			.limit	= 250
+		},
+		.mr_cache[5]	= {
+			.size	= 500,
+			.limit	= 250
+		},
+		.mr_cache[6]	= {
+			.size	= 500,
+			.limit	= 250
+		},
+		.mr_cache[7]	= {
+			.size	= 500,
+			.limit	= 250
+		},
+		.mr_cache[8]	= {
+			.size	= 500,
+			.limit	= 250
+		},
+		.mr_cache[9]	= {
+			.size	= 500,
+			.limit	= 250
+		},
+		.mr_cache[10]	= {
+			.size	= 500,
+			.limit	= 250
+		},
+		.mr_cache[11]	= {
+			.size	= 500,
+			.limit	= 250
+		},
+		.mr_cache[12]	= {
+			.size	= 64,
+			.limit	= 32
+		},
+		.mr_cache[13]	= {
+			.size	= 32,
+			.limit	= 16
+		},
+		.mr_cache[14]	= {
+			.size	= 16,
+			.limit	= 8
+		},
+		.mr_cache[15]	= {
+			.size	= 8,
+			.limit	= 4
+		},
+		.mr_cache[16]	= {
+			.size	= 8,
+			.limit	= 4
+		},
+		.mr_cache[17]	= {
+			.size	= 8,
+			.limit	= 4
+		},
+		.mr_cache[18]	= {
+			.size	= 8,
+			.limit	= 4
+		},
+		.mr_cache[19]	= {
+			.size	= 4,
+			.limit	= 2
+		},
+		.mr_cache[20]	= {
+			.size	= 4,
+			.limit	= 2
+		},
+	},
+};
+
+#define FW_INIT_TIMEOUT_MILI	2000
+#define FW_INIT_WAIT_MS		2
+
+static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili)
+{
+	unsigned long end = jiffies + msecs_to_jiffies(max_wait_mili);
+	int err = 0;
+
+	while (fw_initializing(dev)) {
+		if (time_after(jiffies, end)) {
+			err = -EBUSY;
+			break;
+		}
+		msleep(FW_INIT_WAIT_MS);
+	}
+
+	return err;
+}
+
+static void mlx5_set_driver_version(struct mlx5_core_dev *dev)
+{
+	int driver_ver_sz = MLX5_FLD_SZ_BYTES(set_driver_version_in,
+					      driver_version);
+	u8 in[MLX5_ST_SZ_BYTES(set_driver_version_in)] = {0};
+	u8 out[MLX5_ST_SZ_BYTES(set_driver_version_out)] = {0};
+	int remaining_size = driver_ver_sz;
+	char *string;
+
+	if (!MLX5_CAP_GEN(dev, driver_version))
+		return;
+
+	string = MLX5_ADDR_OF(set_driver_version_in, in, driver_version);
+
+	strncpy(string, "Linux", remaining_size);
+
+	remaining_size = max_t(int, 0, driver_ver_sz - strlen(string));
+	strncat(string, ",", remaining_size);
+
+	remaining_size = max_t(int, 0, driver_ver_sz - strlen(string));
+	strncat(string, DRIVER_NAME, remaining_size);
+
+	remaining_size = max_t(int, 0, driver_ver_sz - strlen(string));
+	strncat(string, ",", remaining_size);
+
+	remaining_size = max_t(int, 0, driver_ver_sz - strlen(string));
+	strncat(string, DRIVER_VERSION, remaining_size);
+
+	/*Send the command*/
+	MLX5_SET(set_driver_version_in, in, opcode,
+		 MLX5_CMD_OP_SET_DRIVER_VERSION);
+
+	mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
+static int set_dma_caps(struct pci_dev *pdev)
+{
+	int err;
+
+	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+	if (err) {
+		dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n");
+		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+		if (err) {
+			dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n");
+			return err;
+		}
+	}
+
+	err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+	if (err) {
+		dev_warn(&pdev->dev,
+			 "Warning: couldn't set 64-bit consistent PCI DMA mask\n");
+		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+		if (err) {
+			dev_err(&pdev->dev,
+				"Can't set consistent PCI DMA mask, aborting\n");
+			return err;
+		}
+	}
+
+	dma_set_max_seg_size(&pdev->dev, 2u * 1024 * 1024 * 1024);
+	return err;
+}
+
+static int mlx5_pci_enable_device(struct mlx5_core_dev *dev)
+{
+	struct pci_dev *pdev = dev->pdev;
+	int err = 0;
+
+	mutex_lock(&dev->pci_status_mutex);
+	if (dev->pci_status == MLX5_PCI_STATUS_DISABLED) {
+		err = pci_enable_device(pdev);
+		if (!err)
+			dev->pci_status = MLX5_PCI_STATUS_ENABLED;
+	}
+	mutex_unlock(&dev->pci_status_mutex);
+
+	return err;
+}
+
+static void mlx5_pci_disable_device(struct mlx5_core_dev *dev)
+{
+	struct pci_dev *pdev = dev->pdev;
+
+	mutex_lock(&dev->pci_status_mutex);
+	if (dev->pci_status == MLX5_PCI_STATUS_ENABLED) {
+		pci_disable_device(pdev);
+		dev->pci_status = MLX5_PCI_STATUS_DISABLED;
+	}
+	mutex_unlock(&dev->pci_status_mutex);
+}
+
+static int request_bar(struct pci_dev *pdev)
+{
+	int err = 0;
+
+	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+		dev_err(&pdev->dev, "Missing registers BAR, aborting\n");
+		return -ENODEV;
+	}
+
+	err = pci_request_regions(pdev, DRIVER_NAME);
+	if (err)
+		dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n");
+
+	return err;
+}
+
+static void release_bar(struct pci_dev *pdev)
+{
+	pci_release_regions(pdev);
+}
+
+static int mlx5_enable_msix(struct mlx5_core_dev *dev)
+{
+	struct mlx5_priv *priv = &dev->priv;
+	struct mlx5_eq_table *table = &priv->eq_table;
+	int num_eqs = 1 << MLX5_CAP_GEN(dev, log_max_eq);
+	int nvec;
+	int i;
+
+	nvec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() +
+	       MLX5_EQ_VEC_COMP_BASE;
+	nvec = min_t(int, nvec, num_eqs);
+	if (nvec <= MLX5_EQ_VEC_COMP_BASE)
+		return -ENOMEM;
+
+	priv->msix_arr = kcalloc(nvec, sizeof(*priv->msix_arr), GFP_KERNEL);
+
+	priv->irq_info = kcalloc(nvec, sizeof(*priv->irq_info), GFP_KERNEL);
+	if (!priv->msix_arr || !priv->irq_info)
+		goto err_free_msix;
+
+	for (i = 0; i < nvec; i++)
+		priv->msix_arr[i].entry = i;
+
+	nvec = pci_enable_msix_range(dev->pdev, priv->msix_arr,
+				     MLX5_EQ_VEC_COMP_BASE + 1, nvec);
+	if (nvec < 0)
+		return nvec;
+
+	table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE;
+
+	return 0;
+
+err_free_msix:
+	kfree(priv->irq_info);
+	kfree(priv->msix_arr);
+	return -ENOMEM;
+}
+
+static void mlx5_disable_msix(struct mlx5_core_dev *dev)
+{
+	struct mlx5_priv *priv = &dev->priv;
+
+	pci_disable_msix(dev->pdev);
+	kfree(priv->irq_info);
+	kfree(priv->msix_arr);
+}
+
+struct mlx5_reg_host_endianess {
+	u8	he;
+	u8      rsvd[15];
+};
+
+
+#define CAP_MASK(pos, size) ((u64)((1 << (size)) - 1) << (pos))
+
+enum {
+	MLX5_CAP_BITS_RW_MASK = CAP_MASK(MLX5_CAP_OFF_CMDIF_CSUM, 2) |
+				MLX5_DEV_CAP_FLAG_DCT,
+};
+
+static u16 to_fw_pkey_sz(struct mlx5_core_dev *dev, u32 size)
+{
+	switch (size) {
+	case 128:
+		return 0;
+	case 256:
+		return 1;
+	case 512:
+		return 2;
+	case 1024:
+		return 3;
+	case 2048:
+		return 4;
+	case 4096:
+		return 5;
+	default:
+		mlx5_core_warn(dev, "invalid pkey table size %d\n", size);
+		return 0;
+	}
+}
+
+static int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev,
+				   enum mlx5_cap_type cap_type,
+				   enum mlx5_cap_mode cap_mode)
+{
+	u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)];
+	int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+	void *out, *hca_caps;
+	u16 opmod = (cap_type << 1) | (cap_mode & 0x01);
+	int err;
+
+	memset(in, 0, sizeof(in));
+	out = kzalloc(out_sz, GFP_KERNEL);
+	if (!out)
+		return -ENOMEM;
+
+	MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
+	MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
+	err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
+	if (err) {
+		mlx5_core_warn(dev,
+			       "QUERY_HCA_CAP : type(%x) opmode(%x) Failed(%d)\n",
+			       cap_type, cap_mode, err);
+		goto query_ex;
+	}
+
+	hca_caps =  MLX5_ADDR_OF(query_hca_cap_out, out, capability);
+
+	switch (cap_mode) {
+	case HCA_CAP_OPMOD_GET_MAX:
+		memcpy(dev->hca_caps_max[cap_type], hca_caps,
+		       MLX5_UN_SZ_BYTES(hca_cap_union));
+		break;
+	case HCA_CAP_OPMOD_GET_CUR:
+		memcpy(dev->hca_caps_cur[cap_type], hca_caps,
+		       MLX5_UN_SZ_BYTES(hca_cap_union));
+		break;
+	default:
+		mlx5_core_warn(dev,
+			       "Tried to query dev cap type(%x) with wrong opmode(%x)\n",
+			       cap_type, cap_mode);
+		err = -EINVAL;
+		break;
+	}
+query_ex:
+	kfree(out);
+	return err;
+}
+
+int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type)
+{
+	int ret;
+
+	ret = mlx5_core_get_caps_mode(dev, cap_type, HCA_CAP_OPMOD_GET_CUR);
+	if (ret)
+		return ret;
+	return mlx5_core_get_caps_mode(dev, cap_type, HCA_CAP_OPMOD_GET_MAX);
+}
+
+static int set_caps(struct mlx5_core_dev *dev, void *in, int in_sz, int opmod)
+{
+	u32 out[MLX5_ST_SZ_DW(set_hca_cap_out)] = {0};
+
+	MLX5_SET(set_hca_cap_in, in, opcode, MLX5_CMD_OP_SET_HCA_CAP);
+	MLX5_SET(set_hca_cap_in, in, op_mod, opmod << 1);
+	return mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
+}
+
+static int handle_hca_cap_atomic(struct mlx5_core_dev *dev)
+{
+	void *set_ctx;
+	void *set_hca_cap;
+	int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
+	int req_endianness;
+	int err;
+
+	if (MLX5_CAP_GEN(dev, atomic)) {
+		err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC);
+		if (err)
+			return err;
+	} else {
+		return 0;
+	}
+
+	req_endianness =
+		MLX5_CAP_ATOMIC(dev,
+				supported_atomic_req_8B_endianess_mode_1);
+
+	if (req_endianness != MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS)
+		return 0;
+
+	set_ctx = kzalloc(set_sz, GFP_KERNEL);
+	if (!set_ctx)
+		return -ENOMEM;
+
+	set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability);
+
+	/* Set requestor to host endianness */
+	MLX5_SET(atomic_caps, set_hca_cap, atomic_req_8B_endianess_mode,
+		 MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS);
+
+	err = set_caps(dev, set_ctx, set_sz, MLX5_SET_HCA_CAP_OP_MOD_ATOMIC);
+
+	kfree(set_ctx);
+	return err;
+}
+
+static int handle_hca_cap(struct mlx5_core_dev *dev)
+{
+	void *set_ctx = NULL;
+	struct mlx5_profile *prof = dev->profile;
+	int err = -ENOMEM;
+	int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
+	void *set_hca_cap;
+
+	set_ctx = kzalloc(set_sz, GFP_KERNEL);
+	if (!set_ctx)
+		goto query_ex;
+
+	err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL);
+	if (err)
+		goto query_ex;
+
+	set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx,
+				   capability);
+	memcpy(set_hca_cap, dev->hca_caps_cur[MLX5_CAP_GENERAL],
+	       MLX5_ST_SZ_BYTES(cmd_hca_cap));
+
+	mlx5_core_dbg(dev, "Current Pkey table size %d Setting new size %d\n",
+		      mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size)),
+		      128);
+	/* we limit the size of the pkey table to 128 entries for now */
+	MLX5_SET(cmd_hca_cap, set_hca_cap, pkey_table_size,
+		 to_fw_pkey_sz(dev, 128));
+
+	if (prof->mask & MLX5_PROF_MASK_QP_SIZE)
+		MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp,
+			 prof->log_max_qp);
+
+	/* disable cmdif checksum */
+	MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0);
+
+	/* If the HCA supports 4K UARs use it */
+	if (MLX5_CAP_GEN_MAX(dev, uar_4k))
+		MLX5_SET(cmd_hca_cap, set_hca_cap, uar_4k, 1);
+
+	MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12);
+
+	err = set_caps(dev, set_ctx, set_sz,
+		       MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE);
+
+query_ex:
+	kfree(set_ctx);
+	return err;
+}
+
+static int set_hca_ctrl(struct mlx5_core_dev *dev)
+{
+	struct mlx5_reg_host_endianess he_in;
+	struct mlx5_reg_host_endianess he_out;
+	int err;
+
+	if (!mlx5_core_is_pf(dev))
+		return 0;
+
+	memset(&he_in, 0, sizeof(he_in));
+	he_in.he = MLX5_SET_HOST_ENDIANNESS;
+	err = mlx5_core_access_reg(dev, &he_in,  sizeof(he_in),
+					&he_out, sizeof(he_out),
+					MLX5_REG_HOST_ENDIANNESS, 0, 1);
+	return err;
+}
+
+int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id)
+{
+	u32 out[MLX5_ST_SZ_DW(enable_hca_out)] = {0};
+	u32 in[MLX5_ST_SZ_DW(enable_hca_in)]   = {0};
+
+	MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA);
+	MLX5_SET(enable_hca_in, in, function_id, func_id);
+	return mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
+}
+
+int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id)
+{
+	u32 out[MLX5_ST_SZ_DW(disable_hca_out)] = {0};
+	u32 in[MLX5_ST_SZ_DW(disable_hca_in)]   = {0};
+
+	MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA);
+	MLX5_SET(disable_hca_in, in, function_id, func_id);
+	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
+u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev)
+{
+	u32 timer_h, timer_h1, timer_l;
+
+	timer_h = ioread32be(&dev->iseg->internal_timer_h);
+	timer_l = ioread32be(&dev->iseg->internal_timer_l);
+	timer_h1 = ioread32be(&dev->iseg->internal_timer_h);
+	if (timer_h != timer_h1) /* wrap around */
+		timer_l = ioread32be(&dev->iseg->internal_timer_l);
+
+	return (u64)timer_l | (u64)timer_h1 << 32;
+}
+
+static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
+{
+	struct mlx5_priv *priv  = &mdev->priv;
+	struct msix_entry *msix = priv->msix_arr;
+	int irq                 = msix[i + MLX5_EQ_VEC_COMP_BASE].vector;
+	int numa_node           = priv->numa_node;
+	int err;
+
+	if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) {
+		mlx5_core_warn(mdev, "zalloc_cpumask_var failed");
+		return -ENOMEM;
+	}
+
+	cpumask_set_cpu(cpumask_local_spread(i, numa_node),
+			priv->irq_info[i].mask);
+
+	err = irq_set_affinity_hint(irq, priv->irq_info[i].mask);
+	if (err) {
+		mlx5_core_warn(mdev, "irq_set_affinity_hint failed,irq 0x%.4x",
+			       irq);
+		goto err_clear_mask;
+	}
+
+	return 0;
+
+err_clear_mask:
+	free_cpumask_var(priv->irq_info[i].mask);
+	return err;
+}
+
+static void mlx5_irq_clear_affinity_hint(struct mlx5_core_dev *mdev, int i)
+{
+	struct mlx5_priv *priv  = &mdev->priv;
+	struct msix_entry *msix = priv->msix_arr;
+	int irq                 = msix[i + MLX5_EQ_VEC_COMP_BASE].vector;
+
+	irq_set_affinity_hint(irq, NULL);
+	free_cpumask_var(priv->irq_info[i].mask);
+}
+
+static int mlx5_irq_set_affinity_hints(struct mlx5_core_dev *mdev)
+{
+	int err;
+	int i;
+
+	for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++) {
+		err = mlx5_irq_set_affinity_hint(mdev, i);
+		if (err)
+			goto err_out;
+	}
+
+	return 0;
+
+err_out:
+	for (i--; i >= 0; i--)
+		mlx5_irq_clear_affinity_hint(mdev, i);
+
+	return err;
+}
+
+static void mlx5_irq_clear_affinity_hints(struct mlx5_core_dev *mdev)
+{
+	int i;
+
+	for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++)
+		mlx5_irq_clear_affinity_hint(mdev, i);
+}
+
+int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
+		    unsigned int *irqn)
+{
+	struct mlx5_eq_table *table = &dev->priv.eq_table;
+	struct mlx5_eq *eq, *n;
+	int err = -ENOENT;
+
+	spin_lock(&table->lock);
+	list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
+		if (eq->index == vector) {
+			*eqn = eq->eqn;
+			*irqn = eq->irqn;
+			err = 0;
+			break;
+		}
+	}
+	spin_unlock(&table->lock);
+
+	return err;
+}
+EXPORT_SYMBOL(mlx5_vector2eqn);
+
+struct mlx5_eq *mlx5_eqn2eq(struct mlx5_core_dev *dev, int eqn)
+{
+	struct mlx5_eq_table *table = &dev->priv.eq_table;
+	struct mlx5_eq *eq;
+
+	spin_lock(&table->lock);
+	list_for_each_entry(eq, &table->comp_eqs_list, list)
+		if (eq->eqn == eqn) {
+			spin_unlock(&table->lock);
+			return eq;
+		}
+
+	spin_unlock(&table->lock);
+
+	return ERR_PTR(-ENOENT);
+}
+
+static void free_comp_eqs(struct mlx5_core_dev *dev)
+{
+	struct mlx5_eq_table *table = &dev->priv.eq_table;
+	struct mlx5_eq *eq, *n;
+
+#ifdef CONFIG_RFS_ACCEL
+	if (dev->rmap) {
+		free_irq_cpu_rmap(dev->rmap);
+		dev->rmap = NULL;
+	}
+#endif
+	spin_lock(&table->lock);
+	list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
+		list_del(&eq->list);
+		spin_unlock(&table->lock);
+		if (mlx5_destroy_unmap_eq(dev, eq))
+			mlx5_core_warn(dev, "failed to destroy EQ 0x%x\n",
+				       eq->eqn);
+		kfree(eq);
+		spin_lock(&table->lock);
+	}
+	spin_unlock(&table->lock);
+}
+
+static int alloc_comp_eqs(struct mlx5_core_dev *dev)
+{
+	struct mlx5_eq_table *table = &dev->priv.eq_table;
+	char name[MLX5_MAX_IRQ_NAME];
+	struct mlx5_eq *eq;
+	int ncomp_vec;
+	int nent;
+	int err;
+	int i;
+
+	INIT_LIST_HEAD(&table->comp_eqs_list);
+	ncomp_vec = table->num_comp_vectors;
+	nent = MLX5_COMP_EQ_SIZE;
+#ifdef CONFIG_RFS_ACCEL
+	dev->rmap = alloc_irq_cpu_rmap(ncomp_vec);
+	if (!dev->rmap)
+		return -ENOMEM;
+#endif
+	for (i = 0; i < ncomp_vec; i++) {
+		eq = kzalloc(sizeof(*eq), GFP_KERNEL);
+		if (!eq) {
+			err = -ENOMEM;
+			goto clean;
+		}
+
+#ifdef CONFIG_RFS_ACCEL
+		irq_cpu_rmap_add(dev->rmap,
+				 dev->priv.msix_arr[i + MLX5_EQ_VEC_COMP_BASE].vector);
+#endif
+		snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", i);
+		err = mlx5_create_map_eq(dev, eq,
+					 i + MLX5_EQ_VEC_COMP_BASE, nent, 0,
+					 name, MLX5_EQ_TYPE_COMP);
+		if (err) {
+			kfree(eq);
+			goto clean;
+		}
+		mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->eqn);
+		eq->index = i;
+		spin_lock(&table->lock);
+		list_add_tail(&eq->list, &table->comp_eqs_list);
+		spin_unlock(&table->lock);
+	}
+
+	return 0;
+
+clean:
+	free_comp_eqs(dev);
+	return err;
+}
+
+static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
+{
+	u32 query_in[MLX5_ST_SZ_DW(query_issi_in)]   = {0};
+	u32 query_out[MLX5_ST_SZ_DW(query_issi_out)] = {0};
+	u32 sup_issi;
+	int err;
+
+	MLX5_SET(query_issi_in, query_in, opcode, MLX5_CMD_OP_QUERY_ISSI);
+	err = mlx5_cmd_exec(dev, query_in, sizeof(query_in),
+			    query_out, sizeof(query_out));
+	if (err) {
+		u32 syndrome;
+		u8 status;
+
+		mlx5_cmd_mbox_status(query_out, &status, &syndrome);
+		if (!status || syndrome == MLX5_DRIVER_SYND) {
+			mlx5_core_err(dev, "Failed to query ISSI err(%d) status(%d) synd(%d)\n",
+				      err, status, syndrome);
+			return err;
+		}
+
+		mlx5_core_warn(dev, "Query ISSI is not supported by FW, ISSI is 0\n");
+		dev->issi = 0;
+		return 0;
+	}
+
+	sup_issi = MLX5_GET(query_issi_out, query_out, supported_issi_dw0);
+
+	if (sup_issi & (1 << 1)) {
+		u32 set_in[MLX5_ST_SZ_DW(set_issi_in)]   = {0};
+		u32 set_out[MLX5_ST_SZ_DW(set_issi_out)] = {0};
+
+		MLX5_SET(set_issi_in, set_in, opcode, MLX5_CMD_OP_SET_ISSI);
+		MLX5_SET(set_issi_in, set_in, current_issi, 1);
+		err = mlx5_cmd_exec(dev, set_in, sizeof(set_in),
+				    set_out, sizeof(set_out));
+		if (err) {
+			mlx5_core_err(dev, "Failed to set ISSI to 1 err(%d)\n",
+				      err);
+			return err;
+		}
+
+		dev->issi = 1;
+
+		return 0;
+	} else if (sup_issi & (1 << 0) || !sup_issi) {
+		return 0;
+	}
+
+	return -ENOTSUPP;
+}
+
+
+static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
+{
+	struct pci_dev *pdev = dev->pdev;
+	int err = 0;
+
+	pci_set_drvdata(dev->pdev, dev);
+	strncpy(priv->name, dev_name(&pdev->dev), MLX5_MAX_NAME_LEN);
+	priv->name[MLX5_MAX_NAME_LEN - 1] = 0;
+
+	mutex_init(&priv->pgdir_mutex);
+	INIT_LIST_HEAD(&priv->pgdir_list);
+	spin_lock_init(&priv->mkey_lock);
+
+	mutex_init(&priv->alloc_mutex);
+
+	priv->numa_node = dev_to_node(&dev->pdev->dev);
+
+	priv->dbg_root = debugfs_create_dir(dev_name(&pdev->dev), mlx5_debugfs_root);
+	if (!priv->dbg_root)
+		return -ENOMEM;
+
+	err = mlx5_pci_enable_device(dev);
+	if (err) {
+		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
+		goto err_dbg;
+	}
+
+	err = request_bar(pdev);
+	if (err) {
+		dev_err(&pdev->dev, "error requesting BARs, aborting\n");
+		goto err_disable;
+	}
+
+	pci_set_master(pdev);
+
+	err = set_dma_caps(pdev);
+	if (err) {
+		dev_err(&pdev->dev, "Failed setting DMA capabilities mask, aborting\n");
+		goto err_clr_master;
+	}
+
+	dev->iseg_base = pci_resource_start(dev->pdev, 0);
+	dev->iseg = ioremap(dev->iseg_base, sizeof(*dev->iseg));
+	if (!dev->iseg) {
+		err = -ENOMEM;
+		dev_err(&pdev->dev, "Failed mapping initialization segment, aborting\n");
+		goto err_clr_master;
+	}
+
+	return 0;
+
+err_clr_master:
+	pci_clear_master(dev->pdev);
+	release_bar(dev->pdev);
+err_disable:
+	mlx5_pci_disable_device(dev);
+
+err_dbg:
+	debugfs_remove(priv->dbg_root);
+	return err;
+}
+
+static void mlx5_pci_close(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
+{
+	iounmap(dev->iseg);
+	pci_clear_master(dev->pdev);
+	release_bar(dev->pdev);
+	mlx5_pci_disable_device(dev);
+	debugfs_remove(priv->dbg_root);
+}
+
+static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
+{
+	struct pci_dev *pdev = dev->pdev;
+	int err;
+
+	err = mlx5_query_board_id(dev);
+	if (err) {
+		dev_err(&pdev->dev, "query board id failed\n");
+		goto out;
+	}
+
+	err = mlx5_eq_init(dev);
+	if (err) {
+		dev_err(&pdev->dev, "failed to initialize eq\n");
+		goto out;
+	}
+
+	err = mlx5_init_cq_table(dev);
+	if (err) {
+		dev_err(&pdev->dev, "failed to initialize cq table\n");
+		goto err_eq_cleanup;
+	}
+
+	mlx5_init_qp_table(dev);
+
+	mlx5_init_srq_table(dev);
+
+	mlx5_init_mkey_table(dev);
+
+	err = mlx5_init_rl_table(dev);
+	if (err) {
+		dev_err(&pdev->dev, "Failed to init rate limiting\n");
+		goto err_tables_cleanup;
+	}
+
+#ifdef CONFIG_MLX5_CORE_EN
+	err = mlx5_eswitch_init(dev);
+	if (err) {
+		dev_err(&pdev->dev, "Failed to init eswitch %d\n", err);
+		goto err_rl_cleanup;
+	}
+#endif
+
+	err = mlx5_sriov_init(dev);
+	if (err) {
+		dev_err(&pdev->dev, "Failed to init sriov %d\n", err);
+		goto err_eswitch_cleanup;
+	}
+
+	return 0;
+
+err_eswitch_cleanup:
+#ifdef CONFIG_MLX5_CORE_EN
+	mlx5_eswitch_cleanup(dev->priv.eswitch);
+
+err_rl_cleanup:
+#endif
+	mlx5_cleanup_rl_table(dev);
+
+err_tables_cleanup:
+	mlx5_cleanup_mkey_table(dev);
+	mlx5_cleanup_srq_table(dev);
+	mlx5_cleanup_qp_table(dev);
+	mlx5_cleanup_cq_table(dev);
+
+err_eq_cleanup:
+	mlx5_eq_cleanup(dev);
+
+out:
+	return err;
+}
+
+static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
+{
+	mlx5_sriov_cleanup(dev);
+#ifdef CONFIG_MLX5_CORE_EN
+	mlx5_eswitch_cleanup(dev->priv.eswitch);
+#endif
+	mlx5_cleanup_rl_table(dev);
+	mlx5_cleanup_mkey_table(dev);
+	mlx5_cleanup_srq_table(dev);
+	mlx5_cleanup_qp_table(dev);
+	mlx5_cleanup_cq_table(dev);
+	mlx5_eq_cleanup(dev);
+}
+
+static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
+			 bool boot)
+{
+	struct pci_dev *pdev = dev->pdev;
+	int err;
+
+	mutex_lock(&dev->intf_state_mutex);
+	if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
+		dev_warn(&dev->pdev->dev, "%s: interface is up, NOP\n",
+			 __func__);
+		goto out;
+	}
+
+	dev_info(&pdev->dev, "firmware version: %d.%d.%d\n", fw_rev_maj(dev),
+		 fw_rev_min(dev), fw_rev_sub(dev));
+
+	/* on load removing any previous indication of internal error, device is
+	 * up
+	 */
+	dev->state = MLX5_DEVICE_STATE_UP;
+
+	err = mlx5_cmd_init(dev);
+	if (err) {
+		dev_err(&pdev->dev, "Failed initializing command interface, aborting\n");
+		goto out_err;
+	}
+
+	err = wait_fw_init(dev, FW_INIT_TIMEOUT_MILI);
+	if (err) {
+		dev_err(&dev->pdev->dev, "Firmware over %d MS in initializing state, aborting\n",
+			FW_INIT_TIMEOUT_MILI);
+		goto out_err;
+	}
+
+	err = mlx5_core_enable_hca(dev, 0);
+	if (err) {
+		dev_err(&pdev->dev, "enable hca failed\n");
+		goto err_cmd_cleanup;
+	}
+
+	err = mlx5_core_set_issi(dev);
+	if (err) {
+		dev_err(&pdev->dev, "failed to set issi\n");
+		goto err_disable_hca;
+	}
+
+	err = mlx5_satisfy_startup_pages(dev, 1);
+	if (err) {
+		dev_err(&pdev->dev, "failed to allocate boot pages\n");
+		goto err_disable_hca;
+	}
+
+	err = set_hca_ctrl(dev);
+	if (err) {
+		dev_err(&pdev->dev, "set_hca_ctrl failed\n");
+		goto reclaim_boot_pages;
+	}
+
+	err = handle_hca_cap(dev);
+	if (err) {
+		dev_err(&pdev->dev, "handle_hca_cap failed\n");
+		goto reclaim_boot_pages;
+	}
+
+	err = handle_hca_cap_atomic(dev);
+	if (err) {
+		dev_err(&pdev->dev, "handle_hca_cap_atomic failed\n");
+		goto reclaim_boot_pages;
+	}
+
+	err = mlx5_satisfy_startup_pages(dev, 0);
+	if (err) {
+		dev_err(&pdev->dev, "failed to allocate init pages\n");
+		goto reclaim_boot_pages;
+	}
+
+	err = mlx5_pagealloc_start(dev);
+	if (err) {
+		dev_err(&pdev->dev, "mlx5_pagealloc_start failed\n");
+		goto reclaim_boot_pages;
+	}
+
+	err = mlx5_cmd_init_hca(dev);
+	if (err) {
+		dev_err(&pdev->dev, "init hca failed\n");
+		goto err_pagealloc_stop;
+	}
+
+	mlx5_set_driver_version(dev);
+
+	mlx5_start_health_poll(dev);
+
+	err = mlx5_query_hca_caps(dev);
+	if (err) {
+		dev_err(&pdev->dev, "query hca failed\n");
+		goto err_stop_poll;
+	}
+
+	if (boot && mlx5_init_once(dev, priv)) {
+		dev_err(&pdev->dev, "sw objs init failed\n");
+		goto err_stop_poll;
+	}
+
+	err = mlx5_enable_msix(dev);
+	if (err) {
+		dev_err(&pdev->dev, "enable msix failed\n");
+		goto err_cleanup_once;
+	}
+
+	dev->priv.uar = mlx5_get_uars_page(dev);
+	if (!dev->priv.uar) {
+		dev_err(&pdev->dev, "Failed allocating uar, aborting\n");
+		goto err_disable_msix;
+	}
+
+	err = mlx5_start_eqs(dev);
+	if (err) {
+		dev_err(&pdev->dev, "Failed to start pages and async EQs\n");
+		goto err_put_uars;
+	}
+
+	err = alloc_comp_eqs(dev);
+	if (err) {
+		dev_err(&pdev->dev, "Failed to alloc completion EQs\n");
+		goto err_stop_eqs;
+	}
+
+	err = mlx5_irq_set_affinity_hints(dev);
+	if (err) {
+		dev_err(&pdev->dev, "Failed to alloc affinity hint cpumask\n");
+		goto err_affinity_hints;
+	}
+
+	err = mlx5_init_fs(dev);
+	if (err) {
+		dev_err(&pdev->dev, "Failed to init flow steering\n");
+		goto err_fs;
+	}
+
+#ifdef CONFIG_MLX5_CORE_EN
+	mlx5_eswitch_attach(dev->priv.eswitch);
+#endif
+
+	err = mlx5_sriov_attach(dev);
+	if (err) {
+		dev_err(&pdev->dev, "sriov init failed %d\n", err);
+		goto err_sriov;
+	}
+
+	if (mlx5_device_registered(dev)) {
+		mlx5_attach_device(dev);
+	} else {
+		err = mlx5_register_device(dev);
+		if (err) {
+			dev_err(&pdev->dev, "mlx5_register_device failed %d\n", err);
+			goto err_reg_dev;
+		}
+	}
+
+	clear_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state);
+	set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
+out:
+	mutex_unlock(&dev->intf_state_mutex);
+
+	return 0;
+
+err_reg_dev:
+	mlx5_sriov_detach(dev);
+
+err_sriov:
+#ifdef CONFIG_MLX5_CORE_EN
+	mlx5_eswitch_detach(dev->priv.eswitch);
+#endif
+	mlx5_cleanup_fs(dev);
+
+err_fs:
+	mlx5_irq_clear_affinity_hints(dev);
+
+err_affinity_hints:
+	free_comp_eqs(dev);
+
+err_stop_eqs:
+	mlx5_stop_eqs(dev);
+
+err_put_uars:
+	mlx5_put_uars_page(dev, priv->uar);
+
+err_disable_msix:
+	mlx5_disable_msix(dev);
+
+err_cleanup_once:
+	if (boot)
+		mlx5_cleanup_once(dev);
+
+err_stop_poll:
+	mlx5_stop_health_poll(dev);
+	if (mlx5_cmd_teardown_hca(dev)) {
+		dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n");
+		goto out_err;
+	}
+
+err_pagealloc_stop:
+	mlx5_pagealloc_stop(dev);
+
+reclaim_boot_pages:
+	mlx5_reclaim_startup_pages(dev);
+
+err_disable_hca:
+	mlx5_core_disable_hca(dev, 0);
+
+err_cmd_cleanup:
+	mlx5_cmd_cleanup(dev);
+
+out_err:
+	dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
+	mutex_unlock(&dev->intf_state_mutex);
+
+	return err;
+}
+
+static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
+			   bool cleanup)
+{
+	int err = 0;
+
+	mutex_lock(&dev->intf_state_mutex);
+	if (test_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state)) {
+		dev_warn(&dev->pdev->dev, "%s: interface is down, NOP\n",
+			 __func__);
+		if (cleanup)
+			mlx5_cleanup_once(dev);
+		goto out;
+	}
+
+	if (mlx5_device_registered(dev))
+		mlx5_detach_device(dev);
+
+	mlx5_sriov_detach(dev);
+#ifdef CONFIG_MLX5_CORE_EN
+	mlx5_eswitch_detach(dev->priv.eswitch);
+#endif
+	mlx5_cleanup_fs(dev);
+	mlx5_irq_clear_affinity_hints(dev);
+	free_comp_eqs(dev);
+	mlx5_stop_eqs(dev);
+	mlx5_put_uars_page(dev, priv->uar);
+	mlx5_disable_msix(dev);
+	if (cleanup)
+		mlx5_cleanup_once(dev);
+	mlx5_stop_health_poll(dev);
+	err = mlx5_cmd_teardown_hca(dev);
+	if (err) {
+		dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n");
+		goto out;
+	}
+	mlx5_pagealloc_stop(dev);
+	mlx5_reclaim_startup_pages(dev);
+	mlx5_core_disable_hca(dev, 0);
+	mlx5_cmd_cleanup(dev);
+
+out:
+	clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
+	set_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state);
+	mutex_unlock(&dev->intf_state_mutex);
+	return err;
+}
+
+struct mlx5_core_event_handler {
+	void (*event)(struct mlx5_core_dev *dev,
+		      enum mlx5_dev_event event,
+		      void *data);
+};
+
+static const struct devlink_ops mlx5_devlink_ops = {
+#ifdef CONFIG_MLX5_CORE_EN
+	.eswitch_mode_set = mlx5_devlink_eswitch_mode_set,
+	.eswitch_mode_get = mlx5_devlink_eswitch_mode_get,
+	.eswitch_inline_mode_set = mlx5_devlink_eswitch_inline_mode_set,
+	.eswitch_inline_mode_get = mlx5_devlink_eswitch_inline_mode_get,
+#endif
+};
+
+#define MLX5_IB_MOD "mlx5_ib"
+static int init_one(struct pci_dev *pdev,
+		    const struct pci_device_id *id)
+{
+	struct mlx5_core_dev *dev;
+	struct devlink *devlink;
+	struct mlx5_priv *priv;
+	int err;
+
+	devlink = devlink_alloc(&mlx5_devlink_ops, sizeof(*dev));
+	if (!devlink) {
+		dev_err(&pdev->dev, "kzalloc failed\n");
+		return -ENOMEM;
+	}
+
+	dev = devlink_priv(devlink);
+	priv = &dev->priv;
+	priv->pci_dev_data = id->driver_data;
+
+	pci_set_drvdata(pdev, dev);
+
+	dev->pdev = pdev;
+	dev->event = mlx5_core_event;
+	dev->profile = &profile[prof_sel];
+
+	INIT_LIST_HEAD(&priv->ctx_list);
+	spin_lock_init(&priv->ctx_lock);
+	mutex_init(&dev->pci_status_mutex);
+	mutex_init(&dev->intf_state_mutex);
+
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+	err = init_srcu_struct(&priv->pfault_srcu);
+	if (err) {
+		dev_err(&pdev->dev, "init_srcu_struct failed with error code %d\n",
+			err);
+		goto clean_dev;
+	}
+#endif
+	mutex_init(&priv->bfregs.reg_head.lock);
+	mutex_init(&priv->bfregs.wc_head.lock);
+	INIT_LIST_HEAD(&priv->bfregs.reg_head.list);
+	INIT_LIST_HEAD(&priv->bfregs.wc_head.list);
+
+	err = mlx5_pci_init(dev, priv);
+	if (err) {
+		dev_err(&pdev->dev, "mlx5_pci_init failed with error code %d\n", err);
+		goto clean_srcu;
+	}
+
+	err = mlx5_health_init(dev);
+	if (err) {
+		dev_err(&pdev->dev, "mlx5_health_init failed with error code %d\n", err);
+		goto close_pci;
+	}
+
+	mlx5_pagealloc_init(dev);
+
+	err = mlx5_load_one(dev, priv, true);
+	if (err) {
+		dev_err(&pdev->dev, "mlx5_load_one failed with error code %d\n", err);
+		goto clean_health;
+	}
+
+	err = request_module_nowait(MLX5_IB_MOD);
+	if (err)
+		pr_info("failed request module on %s\n", MLX5_IB_MOD);
+
+	err = devlink_register(devlink, &pdev->dev);
+	if (err)
+		goto clean_load;
+
+	return 0;
+
+clean_load:
+	mlx5_unload_one(dev, priv, true);
+clean_health:
+	mlx5_pagealloc_cleanup(dev);
+	mlx5_health_cleanup(dev);
+close_pci:
+	mlx5_pci_close(dev, priv);
+clean_srcu:
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+	cleanup_srcu_struct(&priv->pfault_srcu);
+clean_dev:
+#endif
+	pci_set_drvdata(pdev, NULL);
+	devlink_free(devlink);
+
+	return err;
+}
+
+static void remove_one(struct pci_dev *pdev)
+{
+	struct mlx5_core_dev *dev  = pci_get_drvdata(pdev);
+	struct devlink *devlink = priv_to_devlink(dev);
+	struct mlx5_priv *priv = &dev->priv;
+
+	devlink_unregister(devlink);
+	mlx5_unregister_device(dev);
+
+	if (mlx5_unload_one(dev, priv, true)) {
+		dev_err(&dev->pdev->dev, "mlx5_unload_one failed\n");
+		mlx5_health_cleanup(dev);
+		return;
+	}
+
+	mlx5_pagealloc_cleanup(dev);
+	mlx5_health_cleanup(dev);
+	mlx5_pci_close(dev, priv);
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+	cleanup_srcu_struct(&priv->pfault_srcu);
+#endif
+	pci_set_drvdata(pdev, NULL);
+	devlink_free(devlink);
+}
+
+static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
+					      pci_channel_state_t state)
+{
+	struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+	struct mlx5_priv *priv = &dev->priv;
+
+	dev_info(&pdev->dev, "%s was called\n", __func__);
+
+	mlx5_enter_error_state(dev);
+	mlx5_unload_one(dev, priv, false);
+	/* In case of kernel call save the pci state and drain health wq */
+	if (state) {
+		pci_save_state(pdev);
+		mlx5_drain_health_wq(dev);
+		mlx5_pci_disable_device(dev);
+	}
+
+	return state == pci_channel_io_perm_failure ?
+		PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
+}
+
+/* wait for the device to show vital signs by waiting
+ * for the health counter to start counting.
+ */
+static int wait_vital(struct pci_dev *pdev)
+{
+	struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+	struct mlx5_core_health *health = &dev->priv.health;
+	const int niter = 100;
+	u32 last_count = 0;
+	u32 count;
+	int i;
+
+	for (i = 0; i < niter; i++) {
+		count = ioread32be(health->health_counter);
+		if (count && count != 0xffffffff) {
+			if (last_count && last_count != count) {
+				dev_info(&pdev->dev, "Counter value 0x%x after %d iterations\n", count, i);
+				return 0;
+			}
+			last_count = count;
+		}
+		msleep(50);
+	}
+
+	return -ETIMEDOUT;
+}
+
+static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
+{
+	struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+	int err;
+
+	dev_info(&pdev->dev, "%s was called\n", __func__);
+
+	err = mlx5_pci_enable_device(dev);
+	if (err) {
+		dev_err(&pdev->dev, "%s: mlx5_pci_enable_device failed with error code: %d\n"
+			, __func__, err);
+		return PCI_ERS_RESULT_DISCONNECT;
+	}
+
+	pci_set_master(pdev);
+	pci_restore_state(pdev);
+
+	if (wait_vital(pdev)) {
+		dev_err(&pdev->dev, "%s: wait_vital timed out\n", __func__);
+		return PCI_ERS_RESULT_DISCONNECT;
+	}
+
+	return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void mlx5_pci_resume(struct pci_dev *pdev)
+{
+	struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+	struct mlx5_priv *priv = &dev->priv;
+	int err;
+
+	dev_info(&pdev->dev, "%s was called\n", __func__);
+
+	err = mlx5_load_one(dev, priv, false);
+	if (err)
+		dev_err(&pdev->dev, "%s: mlx5_load_one failed with error code: %d\n"
+			, __func__, err);
+	else
+		dev_info(&pdev->dev, "%s: device recovered\n", __func__);
+}
+
+static const struct pci_error_handlers mlx5_err_handler = {
+	.error_detected = mlx5_pci_err_detected,
+	.slot_reset	= mlx5_pci_slot_reset,
+	.resume		= mlx5_pci_resume
+};
+
+static void shutdown(struct pci_dev *pdev)
+{
+	struct mlx5_core_dev *dev  = pci_get_drvdata(pdev);
+	struct mlx5_priv *priv = &dev->priv;
+
+	dev_info(&pdev->dev, "Shutdown was called\n");
+	/* Notify mlx5 clients that the kernel is being shut down */
+	set_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &dev->intf_state);
+	mlx5_unload_one(dev, priv, false);
+	mlx5_pci_disable_device(dev);
+}
+
+static const struct pci_device_id mlx5_core_pci_table[] = {
+	{ PCI_VDEVICE(MELLANOX, 0x1011) },			/* Connect-IB */
+	{ PCI_VDEVICE(MELLANOX, 0x1012), MLX5_PCI_DEV_IS_VF},	/* Connect-IB VF */
+	{ PCI_VDEVICE(MELLANOX, 0x1013) },			/* ConnectX-4 */
+	{ PCI_VDEVICE(MELLANOX, 0x1014), MLX5_PCI_DEV_IS_VF},	/* ConnectX-4 VF */
+	{ PCI_VDEVICE(MELLANOX, 0x1015) },			/* ConnectX-4LX */
+	{ PCI_VDEVICE(MELLANOX, 0x1016), MLX5_PCI_DEV_IS_VF},	/* ConnectX-4LX VF */
+	{ PCI_VDEVICE(MELLANOX, 0x1017) },			/* ConnectX-5, PCIe 3.0 */
+	{ PCI_VDEVICE(MELLANOX, 0x1018), MLX5_PCI_DEV_IS_VF},	/* ConnectX-5 VF */
+	{ PCI_VDEVICE(MELLANOX, 0x1019) },			/* ConnectX-5, PCIe 4.0 */
+	{ PCI_VDEVICE(MELLANOX, 0x101a), MLX5_PCI_DEV_IS_VF},	/* ConnectX-5, PCIe 4.0 VF */
+	{ 0, }
+};
+
+MODULE_DEVICE_TABLE(pci, mlx5_core_pci_table);
+
+void mlx5_disable_device(struct mlx5_core_dev *dev)
+{
+	mlx5_pci_err_detected(dev->pdev, 0);
+}
+
+void mlx5_recover_device(struct mlx5_core_dev *dev)
+{
+	mlx5_pci_disable_device(dev);
+	if (mlx5_pci_slot_reset(dev->pdev) == PCI_ERS_RESULT_RECOVERED)
+		mlx5_pci_resume(dev->pdev);
+}
+
+static struct pci_driver mlx5_core_driver = {
+	.name           = DRIVER_NAME,
+	.id_table       = mlx5_core_pci_table,
+	.probe          = init_one,
+	.remove         = remove_one,
+	.shutdown	= shutdown,
+	.err_handler	= &mlx5_err_handler,
+	.sriov_configure   = mlx5_core_sriov_configure,
+};
+
+static void mlx5_core_verify_params(void)
+{
+	if (prof_sel >= ARRAY_SIZE(profile)) {
+		pr_warn("mlx5_core: WARNING: Invalid module parameter prof_sel %d, valid range 0-%zu, changing back to default(%d)\n",
+			prof_sel,
+			ARRAY_SIZE(profile) - 1,
+			MLX5_DEFAULT_PROF);
+		prof_sel = MLX5_DEFAULT_PROF;
+	}
+}
+
+static int __init init(void)
+{
+	int err;
+
+	mlx5_core_verify_params();
+	mlx5_register_debugfs();
+
+	err = pci_register_driver(&mlx5_core_driver);
+	if (err)
+		goto err_debug;
+
+#ifdef CONFIG_MLX5_CORE_EN
+	mlx5e_init();
+#endif
+
+	return 0;
+
+err_debug:
+	mlx5_unregister_debugfs();
+	return err;
+}
+
+static void __exit cleanup(void)
+{
+#ifdef CONFIG_MLX5_CORE_EN
+	mlx5e_cleanup();
+#endif
+	pci_unregister_driver(&mlx5_core_driver);
+	mlx5_unregister_debugfs();
+}
+
+module_init(init);
+module_exit(cleanup);
diff --git a/drivers/net/ethernet/mellanox/mlx5/mlx5.h b/drivers/net/ethernet/mellanox/mlx5/mlx5.h
new file mode 100644
index 0000000..74241e8
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/mlx5.h
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __MLX5_CORE_H__
+#define __MLX5_CORE_H__
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/if_link.h>
+
+#define DRIVER_NAME "mlx5_core"
+#define DRIVER_VERSION "3.0-1"
+#define DRIVER_RELDATE  "January 2015"
+
+#define MLX5_TOTAL_VPORTS(mdev) (1 + pci_sriov_get_totalvfs(mdev->pdev))
+
+extern uint mlx5_core_debug_mask;
+
+#define mlx5_core_dbg(__dev, format, ...)				\
+	dev_dbg(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format,		\
+		 __func__, __LINE__, current->pid,			\
+		 ##__VA_ARGS__)
+
+#define mlx5_core_dbg_mask(__dev, mask, format, ...)			\
+do {									\
+	if ((mask) & mlx5_core_debug_mask)				\
+		mlx5_core_dbg(__dev, format, ##__VA_ARGS__);		\
+} while (0)
+
+#define mlx5_core_err(__dev, format, ...)				\
+	dev_err(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format,	\
+		__func__, __LINE__, current->pid,	\
+	       ##__VA_ARGS__)
+
+#define mlx5_core_warn(__dev, format, ...)				\
+	dev_warn(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format,	\
+		 __func__, __LINE__, current->pid,			\
+		##__VA_ARGS__)
+
+#define mlx5_core_info(__dev, format, ...)				\
+	dev_info(&(__dev)->pdev->dev, format, ##__VA_ARGS__)
+
+enum {
+	MLX5_CMD_DATA, /* print command payload only */
+	MLX5_CMD_TIME, /* print command execution time */
+};
+
+enum {
+	MLX5_DRIVER_STATUS_ABORTED = 0xfe,
+	MLX5_DRIVER_SYND = 0xbadd00de,
+};
+
+int mlx5_query_hca_caps(struct mlx5_core_dev *dev);
+int mlx5_query_board_id(struct mlx5_core_dev *dev);
+int mlx5_cmd_init_hca(struct mlx5_core_dev *dev);
+int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev);
+void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
+		     unsigned long param);
+void mlx5_core_page_fault(struct mlx5_core_dev *dev,
+			  struct mlx5_pagefault *pfault);
+void mlx5_port_module_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
+void mlx5_enter_error_state(struct mlx5_core_dev *dev);
+void mlx5_disable_device(struct mlx5_core_dev *dev);
+void mlx5_recover_device(struct mlx5_core_dev *dev);
+int mlx5_sriov_init(struct mlx5_core_dev *dev);
+void mlx5_sriov_cleanup(struct mlx5_core_dev *dev);
+int mlx5_sriov_attach(struct mlx5_core_dev *dev);
+void mlx5_sriov_detach(struct mlx5_core_dev *dev);
+int mlx5_core_sriov_configure(struct pci_dev *dev, int num_vfs);
+bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev);
+int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id);
+int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id);
+int mlx5_create_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
+				       void *context, u32 *element_id);
+int mlx5_modify_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
+				       void *context, u32 element_id,
+				       u32 modify_bitmask);
+int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
+					u32 element_id);
+int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev);
+u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev);
+u32 mlx5_get_msix_vec(struct mlx5_core_dev *dev, int vecidx);
+struct mlx5_eq *mlx5_eqn2eq(struct mlx5_core_dev *dev, int eqn);
+void mlx5_cq_tasklet_cb(unsigned long data);
+
+void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev);
+void mlx5_lag_remove(struct mlx5_core_dev *dev);
+
+void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv);
+void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv);
+void mlx5_attach_device(struct mlx5_core_dev *dev);
+void mlx5_detach_device(struct mlx5_core_dev *dev);
+bool mlx5_device_registered(struct mlx5_core_dev *dev);
+int mlx5_register_device(struct mlx5_core_dev *dev);
+void mlx5_unregister_device(struct mlx5_core_dev *dev);
+void mlx5_add_dev_by_protocol(struct mlx5_core_dev *dev, int protocol);
+void mlx5_remove_dev_by_protocol(struct mlx5_core_dev *dev, int protocol);
+struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev);
+void mlx5_dev_list_lock(void);
+void mlx5_dev_list_unlock(void);
+int mlx5_dev_list_trylock(void);
+int mlx5_encap_alloc(struct mlx5_core_dev *dev,
+		     int header_type,
+		     size_t size,
+		     void *encap_header,
+		     u32 *encap_id);
+void mlx5_encap_dealloc(struct mlx5_core_dev *dev, u32 encap_id);
+
+bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv);
+
+void mlx5e_init(void);
+void mlx5e_cleanup(void);
+
+static inline int mlx5_lag_is_lacp_owner(struct mlx5_core_dev *dev)
+{
+	/* LACP owner conditions:
+	 * 1) Function is physical.
+	 * 2) LAG is supported by FW.
+	 * 3) LAG is managed by driver (currently the only option).
+	 */
+	return  MLX5_CAP_GEN(dev, vport_group_manager) &&
+		   (MLX5_CAP_GEN(dev, num_lag_ports) > 1) &&
+		    MLX5_CAP_GEN(dev, lag_master);
+}
+
+#endif /* __MLX5_CORE_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/pagealloc.c
new file mode 100644
index 0000000..914616f
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/pagealloc.c
@@ -0,0 +1,580 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/highmem.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include "lib/driver.h"
+#include "lib/cmd.h"
+#include "mlx5.h"
+
+enum {
+	MLX5_PAGES_CANT_GIVE	= 0,
+	MLX5_PAGES_GIVE		= 1,
+	MLX5_PAGES_TAKE		= 2
+};
+
+struct mlx5_pages_req {
+	struct mlx5_core_dev *dev;
+	u16	func_id;
+	s32	npages;
+	struct work_struct work;
+};
+
+struct fw_page {
+	struct rb_node		rb_node;
+	u64			addr;
+	struct page	       *page;
+	u16			func_id;
+	unsigned long		bitmask;
+	struct list_head	list;
+	unsigned		free_count;
+};
+
+enum {
+	MAX_RECLAIM_TIME_MSECS	= 5000,
+	MAX_RECLAIM_VFS_PAGES_TIME_MSECS = 2 * 1000 * 60,
+};
+
+enum {
+	MLX5_MAX_RECLAIM_TIME_MILI	= 5000,
+	MLX5_NUM_4K_IN_PAGE		= PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
+};
+
+static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id)
+{
+	struct rb_root *root = &dev->priv.page_root;
+	struct rb_node **new = &root->rb_node;
+	struct rb_node *parent = NULL;
+	struct fw_page *nfp;
+	struct fw_page *tfp;
+	int i;
+
+	while (*new) {
+		parent = *new;
+		tfp = rb_entry(parent, struct fw_page, rb_node);
+		if (tfp->addr < addr)
+			new = &parent->rb_left;
+		else if (tfp->addr > addr)
+			new = &parent->rb_right;
+		else
+			return -EEXIST;
+	}
+
+	nfp = kzalloc(sizeof(*nfp), GFP_KERNEL);
+	if (!nfp)
+		return -ENOMEM;
+
+	nfp->addr = addr;
+	nfp->page = page;
+	nfp->func_id = func_id;
+	nfp->free_count = MLX5_NUM_4K_IN_PAGE;
+	for (i = 0; i < MLX5_NUM_4K_IN_PAGE; i++)
+		set_bit(i, &nfp->bitmask);
+
+	rb_link_node(&nfp->rb_node, parent, new);
+	rb_insert_color(&nfp->rb_node, root);
+	list_add(&nfp->list, &dev->priv.free_list);
+
+	return 0;
+}
+
+static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr)
+{
+	struct rb_root *root = &dev->priv.page_root;
+	struct rb_node *tmp = root->rb_node;
+	struct fw_page *result = NULL;
+	struct fw_page *tfp;
+
+	while (tmp) {
+		tfp = rb_entry(tmp, struct fw_page, rb_node);
+		if (tfp->addr < addr) {
+			tmp = tmp->rb_left;
+		} else if (tfp->addr > addr) {
+			tmp = tmp->rb_right;
+		} else {
+			result = tfp;
+			break;
+		}
+	}
+
+	return result;
+}
+
+static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
+				s32 *npages, int boot)
+{
+	u32 out[MLX5_ST_SZ_DW(query_pages_out)] = {0};
+	u32 in[MLX5_ST_SZ_DW(query_pages_in)]   = {0};
+	int err;
+
+	MLX5_SET(query_pages_in, in, opcode, MLX5_CMD_OP_QUERY_PAGES);
+	MLX5_SET(query_pages_in, in, op_mod, boot ?
+		 MLX5_QUERY_PAGES_IN_OP_MOD_BOOT_PAGES :
+		 MLX5_QUERY_PAGES_IN_OP_MOD_INIT_PAGES);
+
+	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+	if (err)
+		return err;
+
+	*npages = MLX5_GET(query_pages_out, out, num_pages);
+	*func_id = MLX5_GET(query_pages_out, out, function_id);
+
+	return err;
+}
+
+static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr)
+{
+	struct fw_page *fp;
+	unsigned n;
+
+	if (list_empty(&dev->priv.free_list))
+		return -ENOMEM;
+
+	fp = list_entry(dev->priv.free_list.next, struct fw_page, list);
+	n = find_first_bit(&fp->bitmask, 8 * sizeof(fp->bitmask));
+	if (n >= MLX5_NUM_4K_IN_PAGE) {
+		mlx5_core_warn(dev, "alloc 4k bug\n");
+		return -ENOENT;
+	}
+	clear_bit(n, &fp->bitmask);
+	fp->free_count--;
+	if (!fp->free_count)
+		list_del(&fp->list);
+
+	*addr = fp->addr + n * MLX5_ADAPTER_PAGE_SIZE;
+
+	return 0;
+}
+
+#define MLX5_U64_4K_PAGE_MASK ((~(u64)0U) << PAGE_SHIFT)
+
+static void free_4k(struct mlx5_core_dev *dev, u64 addr)
+{
+	struct fw_page *fwp;
+	int n;
+
+	fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK);
+	if (!fwp) {
+		mlx5_core_warn(dev, "page not found\n");
+		return;
+	}
+
+	n = (addr & ~MLX5_U64_4K_PAGE_MASK) >> MLX5_ADAPTER_PAGE_SHIFT;
+	fwp->free_count++;
+	set_bit(n, &fwp->bitmask);
+	if (fwp->free_count == MLX5_NUM_4K_IN_PAGE) {
+		rb_erase(&fwp->rb_node, &dev->priv.page_root);
+		if (fwp->free_count != 1)
+			list_del(&fwp->list);
+		dma_unmap_page(&dev->pdev->dev, addr & MLX5_U64_4K_PAGE_MASK,
+			       PAGE_SIZE, DMA_BIDIRECTIONAL);
+		__free_page(fwp->page);
+		kfree(fwp);
+	} else if (fwp->free_count == 1) {
+		list_add(&fwp->list, &dev->priv.free_list);
+	}
+}
+
+static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
+{
+	struct page *page;
+	u64 zero_addr = 1;
+	u64 addr;
+	int err;
+	int nid = dev_to_node(&dev->pdev->dev);
+
+	page = alloc_pages_node(nid, GFP_HIGHUSER, 0);
+	if (!page) {
+		mlx5_core_warn(dev, "failed to allocate page\n");
+		return -ENOMEM;
+	}
+map:
+	addr = dma_map_page(&dev->pdev->dev, page, 0,
+			    PAGE_SIZE, DMA_BIDIRECTIONAL);
+	if (dma_mapping_error(&dev->pdev->dev, addr)) {
+		mlx5_core_warn(dev, "failed dma mapping page\n");
+		err = -ENOMEM;
+		goto err_mapping;
+	}
+
+	/* Firmware doesn't support page with physical address 0 */
+	if (addr == 0) {
+		zero_addr = addr;
+		goto map;
+	}
+
+	err = insert_page(dev, addr, page, func_id);
+	if (err) {
+		mlx5_core_err(dev, "failed to track allocated page\n");
+		dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE,
+			       DMA_BIDIRECTIONAL);
+	}
+
+err_mapping:
+	if (err)
+		__free_page(page);
+
+	if (zero_addr == 0)
+		dma_unmap_page(&dev->pdev->dev, zero_addr, PAGE_SIZE,
+			       DMA_BIDIRECTIONAL);
+
+	return err;
+}
+
+static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id)
+{
+	u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
+	u32 in[MLX5_ST_SZ_DW(manage_pages_in)]   = {0};
+	int err;
+
+	MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
+	MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_CANT_GIVE);
+	MLX5_SET(manage_pages_in, in, function_id, func_id);
+
+	err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+	if (err)
+		mlx5_core_warn(dev, "page notify failed func_id(%d) err(%d)\n",
+			       func_id, err);
+}
+
+static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
+		      int notify_fail)
+{
+	u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
+	int inlen = MLX5_ST_SZ_BYTES(manage_pages_in);
+	u64 addr;
+	int err;
+	u32 *in;
+	int i;
+
+	inlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_in, pas[0]);
+	in = mlx5_vzalloc(inlen);
+	if (!in) {
+		err = -ENOMEM;
+		mlx5_core_warn(dev, "vzalloc failed %d\n", inlen);
+		goto out_free;
+	}
+
+	for (i = 0; i < npages; i++) {
+retry:
+		err = alloc_4k(dev, &addr);
+		if (err) {
+			if (err == -ENOMEM)
+				err = alloc_system_page(dev, func_id);
+			if (err)
+				goto out_4k;
+
+			goto retry;
+		}
+		MLX5_ARRAY_SET64(manage_pages_in, in, pas, i, addr);
+	}
+
+	MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
+	MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_GIVE);
+	MLX5_SET(manage_pages_in, in, function_id, func_id);
+	MLX5_SET(manage_pages_in, in, input_num_entries, npages);
+
+	err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+	if (err) {
+		mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n",
+			       func_id, npages, err);
+		goto out_4k;
+	}
+
+	dev->priv.fw_pages += npages;
+	if (func_id)
+		dev->priv.vfs_pages += npages;
+
+	mlx5_core_dbg(dev, "err %d\n", err);
+
+	kvfree(in);
+	return 0;
+
+out_4k:
+	for (i--; i >= 0; i--)
+		free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]));
+out_free:
+	kvfree(in);
+	if (notify_fail)
+		page_notify_fail(dev, func_id);
+	return err;
+}
+
+static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
+			     u32 *in, int in_size, u32 *out, int out_size)
+{
+	struct fw_page *fwp;
+	struct rb_node *p;
+	u32 func_id;
+	u32 npages;
+	u32 i = 0;
+
+	if (dev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR)
+		return mlx5_cmd_exec(dev, in, in_size, out, out_size);
+
+	/* No hard feelings, we want our pages back! */
+	npages = MLX5_GET(manage_pages_in, in, input_num_entries);
+	func_id = MLX5_GET(manage_pages_in, in, function_id);
+
+	p = rb_first(&dev->priv.page_root);
+	while (p && i < npages) {
+		fwp = rb_entry(p, struct fw_page, rb_node);
+		p = rb_next(p);
+		if (fwp->func_id != func_id)
+			continue;
+
+		MLX5_ARRAY_SET64(manage_pages_out, out, pas, i, fwp->addr);
+		i++;
+	}
+
+	MLX5_SET(manage_pages_out, out, output_num_entries, i);
+	return 0;
+}
+
+static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
+			 int *nclaimed)
+{
+	int outlen = MLX5_ST_SZ_BYTES(manage_pages_out);
+	u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {0};
+	int num_claimed;
+	u32 *out;
+	int err;
+	int i;
+
+	if (nclaimed)
+		*nclaimed = 0;
+
+	outlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]);
+	out = mlx5_vzalloc(outlen);
+	if (!out)
+		return -ENOMEM;
+
+	MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
+	MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_TAKE);
+	MLX5_SET(manage_pages_in, in, function_id, func_id);
+	MLX5_SET(manage_pages_in, in, input_num_entries, npages);
+
+	mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
+	err = reclaim_pages_cmd(dev, in, sizeof(in), out, outlen);
+	if (err) {
+		mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err);
+		goto out_free;
+	}
+
+	num_claimed = MLX5_GET(manage_pages_out, out, output_num_entries);
+	if (num_claimed > npages) {
+		mlx5_core_warn(dev, "fw returned %d, driver asked %d => corruption\n",
+			       num_claimed, npages);
+		err = -EINVAL;
+		goto out_free;
+	}
+
+	for (i = 0; i < num_claimed; i++)
+		free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i]));
+
+
+	if (nclaimed)
+		*nclaimed = num_claimed;
+
+	dev->priv.fw_pages -= num_claimed;
+	if (func_id)
+		dev->priv.vfs_pages -= num_claimed;
+
+out_free:
+	kvfree(out);
+	return err;
+}
+
+static void pages_work_handler(struct work_struct *work)
+{
+	struct mlx5_pages_req *req = container_of(work, struct mlx5_pages_req, work);
+	struct mlx5_core_dev *dev = req->dev;
+	int err = 0;
+
+	if (req->npages < 0)
+		err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL);
+	else if (req->npages > 0)
+		err = give_pages(dev, req->func_id, req->npages, 1);
+
+	if (err)
+		mlx5_core_warn(dev, "%s fail %d\n",
+			       req->npages < 0 ? "reclaim" : "give", err);
+
+	kfree(req);
+}
+
+void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
+				 s32 npages)
+{
+	struct mlx5_pages_req *req;
+
+	req = kzalloc(sizeof(*req), GFP_ATOMIC);
+	if (!req) {
+		mlx5_core_warn(dev, "failed to allocate pages request\n");
+		return;
+	}
+
+	req->dev = dev;
+	req->func_id = func_id;
+	req->npages = npages;
+	INIT_WORK(&req->work, pages_work_handler);
+	queue_work(dev->priv.pg_wq, &req->work);
+}
+
+int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot)
+{
+	u16 uninitialized_var(func_id);
+	s32 uninitialized_var(npages);
+	int err;
+
+	err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot);
+	if (err)
+		return err;
+
+	mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n",
+		      npages, boot ? "boot" : "init", func_id);
+
+	return give_pages(dev, func_id, npages, 0);
+}
+
+enum {
+	MLX5_BLKS_FOR_RECLAIM_PAGES = 12
+};
+
+static int optimal_reclaimed_pages(void)
+{
+	struct mlx5_cmd_prot_block *block;
+	struct mlx5_cmd_layout *lay;
+	int ret;
+
+	ret = (sizeof(lay->out) + MLX5_BLKS_FOR_RECLAIM_PAGES * sizeof(block->data) -
+	       MLX5_ST_SZ_BYTES(manage_pages_out)) /
+	       MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]);
+
+	return ret;
+}
+
+int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
+{
+	unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
+	struct fw_page *fwp;
+	struct rb_node *p;
+	int nclaimed = 0;
+	int err = 0;
+
+	do {
+		p = rb_first(&dev->priv.page_root);
+		if (p) {
+			fwp = rb_entry(p, struct fw_page, rb_node);
+			err = reclaim_pages(dev, fwp->func_id,
+					    optimal_reclaimed_pages(),
+					    &nclaimed);
+
+			if (err) {
+				mlx5_core_warn(dev, "failed reclaiming pages (%d)\n",
+					       err);
+				return err;
+			}
+			if (nclaimed)
+				end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
+		}
+		if (time_after(jiffies, end)) {
+			mlx5_core_warn(dev, "FW did not return all pages. giving up...\n");
+			break;
+		}
+	} while (p);
+
+	WARN(dev->priv.fw_pages,
+	     "FW pages counter is %d after reclaiming all pages\n",
+	     dev->priv.fw_pages);
+	WARN(dev->priv.vfs_pages,
+	     "VFs FW pages counter is %d after reclaiming all pages\n",
+	     dev->priv.vfs_pages);
+
+	return 0;
+}
+
+void mlx5_pagealloc_init(struct mlx5_core_dev *dev)
+{
+	dev->priv.page_root = RB_ROOT;
+	INIT_LIST_HEAD(&dev->priv.free_list);
+}
+
+void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev)
+{
+	/* nothing */
+}
+
+int mlx5_pagealloc_start(struct mlx5_core_dev *dev)
+{
+	dev->priv.pg_wq = create_singlethread_workqueue("mlx5_page_allocator");
+	if (!dev->priv.pg_wq)
+		return -ENOMEM;
+
+	return 0;
+}
+
+void mlx5_pagealloc_stop(struct mlx5_core_dev *dev)
+{
+	destroy_workqueue(dev->priv.pg_wq);
+}
+
+int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev)
+{
+	unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_VFS_PAGES_TIME_MSECS);
+	int prev_vfs_pages = dev->priv.vfs_pages;
+
+	/* In case of internal error we will free the pages manually later */
+	if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+		mlx5_core_warn(dev, "Skipping wait for vf pages stage");
+		return 0;
+	}
+
+	mlx5_core_dbg(dev, "Waiting for %d pages from %s\n", prev_vfs_pages,
+		      dev->priv.name);
+	while (dev->priv.vfs_pages) {
+		if (time_after(jiffies, end)) {
+			mlx5_core_warn(dev, "aborting while there are %d pending pages\n", dev->priv.vfs_pages);
+			return -ETIMEDOUT;
+		}
+		if (dev->priv.vfs_pages < prev_vfs_pages) {
+			end = jiffies + msecs_to_jiffies(MAX_RECLAIM_VFS_PAGES_TIME_MSECS);
+			prev_vfs_pages = dev->priv.vfs_pages;
+		}
+		msleep(50);
+	}
+
+	mlx5_core_dbg(dev, "All pages received from %s\n", dev->priv.name);
+	return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/sriov/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/sriov/eswitch.c
new file mode 100644
index 0000000..378fd34
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/sriov/eswitch.c
@@ -0,0 +1,2133 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/etherdevice.h>
+#include "lib/driver.h"
+#include "lib/mlx5_ifc.h"
+#include "lib/vport.h"
+#include "lib/fs.h"
+#include "mlx5.h"
+#include "sriov/eswitch.h"
+
+#define UPLINK_VPORT 0xFFFF
+
+enum {
+	MLX5_ACTION_NONE = 0,
+	MLX5_ACTION_ADD  = 1,
+	MLX5_ACTION_DEL  = 2,
+};
+
+/* E-Switch UC L2 table hash node */
+struct esw_uc_addr {
+	struct l2addr_node node;
+	u32                table_index;
+	u32                vport;
+};
+
+/* E-Switch MC FDB table hash node */
+struct esw_mc_addr { /* SRIOV only */
+	struct l2addr_node     node;
+	struct mlx5_flow_handle *uplink_rule; /* Forward to uplink rule */
+	u32                    refcnt;
+};
+
+/* Vport UC/MC hash node */
+struct vport_addr {
+	struct l2addr_node     node;
+	u8                     action;
+	u32                    vport;
+	struct mlx5_flow_handle *flow_rule; /* SRIOV only */
+	/* A flag indicating that mac was added due to mc promiscuous vport */
+	bool mc_promisc;
+};
+
+enum {
+	UC_ADDR_CHANGE = BIT(0),
+	MC_ADDR_CHANGE = BIT(1),
+	PROMISC_CHANGE = BIT(3),
+};
+
+/* Vport context events */
+#define SRIOV_VPORT_EVENTS (UC_ADDR_CHANGE | \
+			    MC_ADDR_CHANGE | \
+			    PROMISC_CHANGE)
+
+static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
+					u32 events_mask)
+{
+	int in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)]   = {0};
+	int out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0};
+	void *nic_vport_ctx;
+
+	MLX5_SET(modify_nic_vport_context_in, in,
+		 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
+	MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1);
+	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
+	if (vport)
+		MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
+	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
+				     in, nic_vport_context);
+
+	MLX5_SET(nic_vport_context, nic_vport_ctx, arm_change_event, 1);
+
+	if (events_mask & UC_ADDR_CHANGE)
+		MLX5_SET(nic_vport_context, nic_vport_ctx,
+			 event_on_uc_address_change, 1);
+	if (events_mask & MC_ADDR_CHANGE)
+		MLX5_SET(nic_vport_context, nic_vport_ctx,
+			 event_on_mc_address_change, 1);
+	if (events_mask & PROMISC_CHANGE)
+		MLX5_SET(nic_vport_context, nic_vport_ctx,
+			 event_on_promisc_change, 1);
+
+	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
+/* E-Switch vport context HW commands */
+static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport,
+					void *in, int inlen)
+{
+	u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)] = {0};
+
+	MLX5_SET(modify_esw_vport_context_in, in, opcode,
+		 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
+	MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
+	if (vport)
+		MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
+	return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+}
+
+static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
+				  u16 vlan, u8 qos, u8 set_flags)
+{
+	u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {0};
+
+	if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) ||
+	    !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist))
+		return -ENOTSUPP;
+
+	esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%x\n",
+		  vport, vlan, qos, set_flags);
+
+	if (set_flags & SET_VLAN_STRIP)
+		MLX5_SET(modify_esw_vport_context_in, in,
+			 esw_vport_context.vport_cvlan_strip, 1);
+
+	if (set_flags & SET_VLAN_INSERT) {
+		/* insert only if no vlan in packet */
+		MLX5_SET(modify_esw_vport_context_in, in,
+			 esw_vport_context.vport_cvlan_insert, 1);
+
+		MLX5_SET(modify_esw_vport_context_in, in,
+			 esw_vport_context.cvlan_pcp, qos);
+		MLX5_SET(modify_esw_vport_context_in, in,
+			 esw_vport_context.cvlan_id, vlan);
+	}
+
+	MLX5_SET(modify_esw_vport_context_in, in,
+		 field_select.vport_cvlan_strip, 1);
+	MLX5_SET(modify_esw_vport_context_in, in,
+		 field_select.vport_cvlan_insert, 1);
+
+	return modify_esw_vport_context_cmd(dev, vport, in, sizeof(in));
+}
+
+/* HW L2 Table (MPFS) management */
+static int set_l2_table_entry_cmd(struct mlx5_core_dev *dev, u32 index,
+				  u8 *mac, u8 vlan_valid, u16 vlan)
+{
+	u32 in[MLX5_ST_SZ_DW(set_l2_table_entry_in)]   = {0};
+	u32 out[MLX5_ST_SZ_DW(set_l2_table_entry_out)] = {0};
+	u8 *in_mac_addr;
+
+	MLX5_SET(set_l2_table_entry_in, in, opcode,
+		 MLX5_CMD_OP_SET_L2_TABLE_ENTRY);
+	MLX5_SET(set_l2_table_entry_in, in, table_index, index);
+	MLX5_SET(set_l2_table_entry_in, in, vlan_valid, vlan_valid);
+	MLX5_SET(set_l2_table_entry_in, in, vlan, vlan);
+
+	in_mac_addr = MLX5_ADDR_OF(set_l2_table_entry_in, in, mac_address);
+	ether_addr_copy(&in_mac_addr[2], mac);
+
+	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
+static int del_l2_table_entry_cmd(struct mlx5_core_dev *dev, u32 index)
+{
+	u32 in[MLX5_ST_SZ_DW(delete_l2_table_entry_in)]   = {0};
+	u32 out[MLX5_ST_SZ_DW(delete_l2_table_entry_out)] = {0};
+
+	MLX5_SET(delete_l2_table_entry_in, in, opcode,
+		 MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY);
+	MLX5_SET(delete_l2_table_entry_in, in, table_index, index);
+	return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
+static int alloc_l2_table_index(struct mlx5_l2_table *l2_table, u32 *ix)
+{
+	int err = 0;
+
+	*ix = find_first_zero_bit(l2_table->bitmap, l2_table->size);
+	if (*ix >= l2_table->size)
+		err = -ENOSPC;
+	else
+		__set_bit(*ix, l2_table->bitmap);
+
+	return err;
+}
+
+static void free_l2_table_index(struct mlx5_l2_table *l2_table, u32 ix)
+{
+	__clear_bit(ix, l2_table->bitmap);
+}
+
+static int set_l2_table_entry(struct mlx5_core_dev *dev, u8 *mac,
+			      u8 vlan_valid, u16 vlan,
+			      u32 *index)
+{
+	struct mlx5_l2_table *l2_table = &dev->priv.eswitch->l2_table;
+	int err;
+
+	err = alloc_l2_table_index(l2_table, index);
+	if (err)
+		return err;
+
+	err = set_l2_table_entry_cmd(dev, *index, mac, vlan_valid, vlan);
+	if (err)
+		free_l2_table_index(l2_table, *index);
+
+	return err;
+}
+
+static void del_l2_table_entry(struct mlx5_core_dev *dev, u32 index)
+{
+	struct mlx5_l2_table *l2_table = &dev->priv.eswitch->l2_table;
+
+	del_l2_table_entry_cmd(dev, index);
+	free_l2_table_index(l2_table, index);
+}
+
+/* E-Switch FDB */
+static struct mlx5_flow_handle *
+__esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
+			 u8 mac_c[ETH_ALEN], u8 mac_v[ETH_ALEN])
+{
+	int match_header = (is_zero_ether_addr(mac_c) ? 0 :
+			    MLX5_MATCH_OUTER_HEADERS);
+	struct mlx5_flow_handle *flow_rule = NULL;
+	struct mlx5_flow_act flow_act = {0};
+	struct mlx5_flow_destination dest;
+	struct mlx5_flow_spec *spec;
+	void *mv_misc = NULL;
+	void *mc_misc = NULL;
+	u8 *dmac_v = NULL;
+	u8 *dmac_c = NULL;
+
+	if (rx_rule)
+		match_header |= MLX5_MATCH_MISC_PARAMETERS;
+
+	spec = mlx5_vzalloc(sizeof(*spec));
+	if (!spec) {
+		esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n");
+		return NULL;
+	}
+	dmac_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+			      outer_headers.dmac_47_16);
+	dmac_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+			      outer_headers.dmac_47_16);
+
+	if (match_header & MLX5_MATCH_OUTER_HEADERS) {
+		ether_addr_copy(dmac_v, mac_v);
+		ether_addr_copy(dmac_c, mac_c);
+	}
+
+	if (match_header & MLX5_MATCH_MISC_PARAMETERS) {
+		mv_misc  = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+					misc_parameters);
+		mc_misc  = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+					misc_parameters);
+		MLX5_SET(fte_match_set_misc, mv_misc, source_port, UPLINK_VPORT);
+		MLX5_SET_TO_ONES(fte_match_set_misc, mc_misc, source_port);
+	}
+
+	dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+	dest.vport_num = vport;
+
+	esw_debug(esw->dev,
+		  "\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n",
+		  dmac_v, dmac_c, vport);
+	spec->match_criteria_enable = match_header;
+	flow_act.action =  MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+	flow_rule =
+		mlx5_add_flow_rules(esw->fdb_table.fdb, spec,
+				    &flow_act, &dest, 1);
+	if (IS_ERR(flow_rule)) {
+		esw_warn(esw->dev,
+			 "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n",
+			 dmac_v, dmac_c, vport, PTR_ERR(flow_rule));
+		flow_rule = NULL;
+	}
+
+	kvfree(spec);
+	return flow_rule;
+}
+
+static struct mlx5_flow_handle *
+esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport)
+{
+	u8 mac_c[ETH_ALEN];
+
+	eth_broadcast_addr(mac_c);
+	return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac);
+}
+
+static struct mlx5_flow_handle *
+esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u32 vport)
+{
+	u8 mac_c[ETH_ALEN];
+	u8 mac_v[ETH_ALEN];
+
+	eth_zero_addr(mac_c);
+	eth_zero_addr(mac_v);
+	mac_c[0] = 0x01;
+	mac_v[0] = 0x01;
+	return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac_v);
+}
+
+static struct mlx5_flow_handle *
+esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u32 vport)
+{
+	u8 mac_c[ETH_ALEN];
+	u8 mac_v[ETH_ALEN];
+
+	eth_zero_addr(mac_c);
+	eth_zero_addr(mac_v);
+	return __esw_fdb_set_vport_rule(esw, vport, true, mac_c, mac_v);
+}
+
+static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw, int nvports)
+{
+	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+	struct mlx5_core_dev *dev = esw->dev;
+	struct mlx5_flow_namespace *root_ns;
+	struct mlx5_flow_table *fdb;
+	struct mlx5_flow_group *g;
+	void *match_criteria;
+	int table_size;
+	u32 *flow_group_in;
+	u8 *dmac;
+	int err = 0;
+
+	esw_debug(dev, "Create FDB log_max_size(%d)\n",
+		  MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
+
+	root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
+	if (!root_ns) {
+		esw_warn(dev, "Failed to get FDB flow namespace\n");
+		return -ENOMEM;
+	}
+
+	flow_group_in = mlx5_vzalloc(inlen);
+	if (!flow_group_in)
+		return -ENOMEM;
+	memset(flow_group_in, 0, inlen);
+
+	table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
+	fdb = mlx5_create_flow_table(root_ns, 0, table_size, 0, 0);
+	if (IS_ERR(fdb)) {
+		err = PTR_ERR(fdb);
+		esw_warn(dev, "Failed to create FDB Table err %d\n", err);
+		goto out;
+	}
+	esw->fdb_table.fdb = fdb;
+
+	/* Addresses group : Full match unicast/multicast addresses */
+	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+		 MLX5_MATCH_OUTER_HEADERS);
+	match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
+	dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, outer_headers.dmac_47_16);
+	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
+	/* Preserve 2 entries for allmulti and promisc rules*/
+	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 3);
+	eth_broadcast_addr(dmac);
+	g = mlx5_create_flow_group(fdb, flow_group_in);
+	if (IS_ERR(g)) {
+		err = PTR_ERR(g);
+		esw_warn(dev, "Failed to create flow group err(%d)\n", err);
+		goto out;
+	}
+	esw->fdb_table.legacy.addr_grp = g;
+
+	/* Allmulti group : One rule that forwards any mcast traffic */
+	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+		 MLX5_MATCH_OUTER_HEADERS);
+	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 2);
+	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 2);
+	eth_zero_addr(dmac);
+	dmac[0] = 0x01;
+	g = mlx5_create_flow_group(fdb, flow_group_in);
+	if (IS_ERR(g)) {
+		err = PTR_ERR(g);
+		esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err);
+		goto out;
+	}
+	esw->fdb_table.legacy.allmulti_grp = g;
+
+	/* Promiscuous group :
+	 * One rule that forward all unmatched traffic from previous groups
+	 */
+	eth_zero_addr(dmac);
+	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+		 MLX5_MATCH_MISC_PARAMETERS);
+	MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
+	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 1);
+	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1);
+	g = mlx5_create_flow_group(fdb, flow_group_in);
+	if (IS_ERR(g)) {
+		err = PTR_ERR(g);
+		esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err);
+		goto out;
+	}
+	esw->fdb_table.legacy.promisc_grp = g;
+
+out:
+	if (err) {
+		if (!IS_ERR_OR_NULL(esw->fdb_table.legacy.allmulti_grp)) {
+			mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp);
+			esw->fdb_table.legacy.allmulti_grp = NULL;
+		}
+		if (!IS_ERR_OR_NULL(esw->fdb_table.legacy.addr_grp)) {
+			mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp);
+			esw->fdb_table.legacy.addr_grp = NULL;
+		}
+		if (!IS_ERR_OR_NULL(esw->fdb_table.fdb)) {
+			mlx5_destroy_flow_table(esw->fdb_table.fdb);
+			esw->fdb_table.fdb = NULL;
+		}
+	}
+
+	kvfree(flow_group_in);
+	return err;
+}
+
+static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw)
+{
+	if (!esw->fdb_table.fdb)
+		return;
+
+	esw_debug(esw->dev, "Destroy FDB Table\n");
+	mlx5_destroy_flow_group(esw->fdb_table.legacy.promisc_grp);
+	mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp);
+	mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp);
+	mlx5_destroy_flow_table(esw->fdb_table.fdb);
+	esw->fdb_table.fdb = NULL;
+	esw->fdb_table.legacy.addr_grp = NULL;
+	esw->fdb_table.legacy.allmulti_grp = NULL;
+	esw->fdb_table.legacy.promisc_grp = NULL;
+}
+
+/* E-Switch vport UC/MC lists management */
+typedef int (*vport_addr_action)(struct mlx5_eswitch *esw,
+				 struct vport_addr *vaddr);
+
+static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
+{
+	struct hlist_head *hash = esw->l2_table.l2_hash;
+	struct esw_uc_addr *esw_uc;
+	u8 *mac = vaddr->node.addr;
+	u32 vport = vaddr->vport;
+	int err;
+
+	esw_uc = l2addr_hash_find(hash, mac, struct esw_uc_addr);
+	if (esw_uc) {
+		esw_warn(esw->dev,
+			 "Failed to set L2 mac(%pM) for vport(%d), mac is already in use by vport(%d)\n",
+			 mac, vport, esw_uc->vport);
+		return -EEXIST;
+	}
+
+	esw_uc = l2addr_hash_add(hash, mac, struct esw_uc_addr, GFP_KERNEL);
+	if (!esw_uc)
+		return -ENOMEM;
+	esw_uc->vport = vport;
+
+	err = set_l2_table_entry(esw->dev, mac, 0, 0, &esw_uc->table_index);
+	if (err)
+		goto abort;
+
+	/* SRIOV is enabled: Forward UC MAC to vport */
+	if (esw->fdb_table.fdb && esw->mode == SRIOV_LEGACY)
+		vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
+
+	esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM index:%d fr(%p)\n",
+		  vport, mac, esw_uc->table_index, vaddr->flow_rule);
+	return err;
+abort:
+	l2addr_hash_del(esw_uc);
+	return err;
+}
+
+static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
+{
+	struct hlist_head *hash = esw->l2_table.l2_hash;
+	struct esw_uc_addr *esw_uc;
+	u8 *mac = vaddr->node.addr;
+	u32 vport = vaddr->vport;
+
+	esw_uc = l2addr_hash_find(hash, mac, struct esw_uc_addr);
+	if (!esw_uc || esw_uc->vport != vport) {
+		esw_debug(esw->dev,
+			  "MAC(%pM) doesn't belong to vport (%d)\n",
+			  mac, vport);
+		return -EINVAL;
+	}
+	esw_debug(esw->dev, "\tDELETE UC MAC: vport[%d] %pM index:%d fr(%p)\n",
+		  vport, mac, esw_uc->table_index, vaddr->flow_rule);
+
+	del_l2_table_entry(esw->dev, esw_uc->table_index);
+
+	if (vaddr->flow_rule)
+		mlx5_del_flow_rules(vaddr->flow_rule);
+	vaddr->flow_rule = NULL;
+
+	l2addr_hash_del(esw_uc);
+	return 0;
+}
+
+static void update_allmulti_vports(struct mlx5_eswitch *esw,
+				   struct vport_addr *vaddr,
+				   struct esw_mc_addr *esw_mc)
+{
+	u8 *mac = vaddr->node.addr;
+	u32 vport_idx = 0;
+
+	for (vport_idx = 0; vport_idx < esw->total_vports; vport_idx++) {
+		struct mlx5_vport *vport = &esw->vports[vport_idx];
+		struct hlist_head *vport_hash = vport->mc_list;
+		struct vport_addr *iter_vaddr =
+					l2addr_hash_find(vport_hash,
+							 mac,
+							 struct vport_addr);
+		if (IS_ERR_OR_NULL(vport->allmulti_rule) ||
+		    vaddr->vport == vport_idx)
+			continue;
+		switch (vaddr->action) {
+		case MLX5_ACTION_ADD:
+			if (iter_vaddr)
+				continue;
+			iter_vaddr = l2addr_hash_add(vport_hash, mac,
+						     struct vport_addr,
+						     GFP_KERNEL);
+			if (!iter_vaddr) {
+				esw_warn(esw->dev,
+					 "ALL-MULTI: Failed to add MAC(%pM) to vport[%d] DB\n",
+					 mac, vport_idx);
+				continue;
+			}
+			iter_vaddr->vport = vport_idx;
+			iter_vaddr->flow_rule =
+					esw_fdb_set_vport_rule(esw,
+							       mac,
+							       vport_idx);
+			iter_vaddr->mc_promisc = true;
+			break;
+		case MLX5_ACTION_DEL:
+			if (!iter_vaddr)
+				continue;
+			mlx5_del_flow_rules(iter_vaddr->flow_rule);
+			l2addr_hash_del(iter_vaddr);
+			break;
+		}
+	}
+}
+
+static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
+{
+	struct hlist_head *hash = esw->mc_table;
+	struct esw_mc_addr *esw_mc;
+	u8 *mac = vaddr->node.addr;
+	u32 vport = vaddr->vport;
+
+	if (!esw->fdb_table.fdb)
+		return 0;
+
+	esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr);
+	if (esw_mc)
+		goto add;
+
+	esw_mc = l2addr_hash_add(hash, mac, struct esw_mc_addr, GFP_KERNEL);
+	if (!esw_mc)
+		return -ENOMEM;
+
+	esw_mc->uplink_rule = /* Forward MC MAC to Uplink */
+		esw_fdb_set_vport_rule(esw, mac, UPLINK_VPORT);
+
+	/* Add this multicast mac to all the mc promiscuous vports */
+	update_allmulti_vports(esw, vaddr, esw_mc);
+
+add:
+	/* If the multicast mac is added as a result of mc promiscuous vport,
+	 * don't increment the multicast ref count
+	 */
+	if (!vaddr->mc_promisc)
+		esw_mc->refcnt++;
+
+	/* Forward MC MAC to vport */
+	vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
+	esw_debug(esw->dev,
+		  "\tADDED MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n",
+		  vport, mac, vaddr->flow_rule,
+		  esw_mc->refcnt, esw_mc->uplink_rule);
+	return 0;
+}
+
+static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
+{
+	struct hlist_head *hash = esw->mc_table;
+	struct esw_mc_addr *esw_mc;
+	u8 *mac = vaddr->node.addr;
+	u32 vport = vaddr->vport;
+
+	if (!esw->fdb_table.fdb)
+		return 0;
+
+	esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr);
+	if (!esw_mc) {
+		esw_warn(esw->dev,
+			 "Failed to find eswitch MC addr for MAC(%pM) vport(%d)",
+			 mac, vport);
+		return -EINVAL;
+	}
+	esw_debug(esw->dev,
+		  "\tDELETE MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n",
+		  vport, mac, vaddr->flow_rule, esw_mc->refcnt,
+		  esw_mc->uplink_rule);
+
+	if (vaddr->flow_rule)
+		mlx5_del_flow_rules(vaddr->flow_rule);
+	vaddr->flow_rule = NULL;
+
+	/* If the multicast mac is added as a result of mc promiscuous vport,
+	 * don't decrement the multicast ref count.
+	 */
+	if (vaddr->mc_promisc || (--esw_mc->refcnt > 0))
+		return 0;
+
+	/* Remove this multicast mac from all the mc promiscuous vports */
+	update_allmulti_vports(esw, vaddr, esw_mc);
+
+	if (esw_mc->uplink_rule)
+		mlx5_del_flow_rules(esw_mc->uplink_rule);
+
+	l2addr_hash_del(esw_mc);
+	return 0;
+}
+
+/* Apply vport UC/MC list to HW l2 table and FDB table */
+static void esw_apply_vport_addr_list(struct mlx5_eswitch *esw,
+				      u32 vport_num, int list_type)
+{
+	struct mlx5_vport *vport = &esw->vports[vport_num];
+	bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC;
+	vport_addr_action vport_addr_add;
+	vport_addr_action vport_addr_del;
+	struct vport_addr *addr;
+	struct l2addr_node *node;
+	struct hlist_head *hash;
+	struct hlist_node *tmp;
+	int hi;
+
+	vport_addr_add = is_uc ? esw_add_uc_addr :
+				 esw_add_mc_addr;
+	vport_addr_del = is_uc ? esw_del_uc_addr :
+				 esw_del_mc_addr;
+
+	hash = is_uc ? vport->uc_list : vport->mc_list;
+	for_each_l2hash_node(node, tmp, hash, hi) {
+		addr = container_of(node, struct vport_addr, node);
+		switch (addr->action) {
+		case MLX5_ACTION_ADD:
+			vport_addr_add(esw, addr);
+			addr->action = MLX5_ACTION_NONE;
+			break;
+		case MLX5_ACTION_DEL:
+			vport_addr_del(esw, addr);
+			l2addr_hash_del(addr);
+			break;
+		}
+	}
+}
+
+/* Sync vport UC/MC list from vport context */
+static void esw_update_vport_addr_list(struct mlx5_eswitch *esw,
+				       u32 vport_num, int list_type)
+{
+	struct mlx5_vport *vport = &esw->vports[vport_num];
+	bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC;
+	u8 (*mac_list)[ETH_ALEN];
+	struct l2addr_node *node;
+	struct vport_addr *addr;
+	struct hlist_head *hash;
+	struct hlist_node *tmp;
+	int size;
+	int err;
+	int hi;
+	int i;
+
+	size = is_uc ? MLX5_MAX_UC_PER_VPORT(esw->dev) :
+		       MLX5_MAX_MC_PER_VPORT(esw->dev);
+
+	mac_list = kcalloc(size, ETH_ALEN, GFP_KERNEL);
+	if (!mac_list)
+		return;
+
+	hash = is_uc ? vport->uc_list : vport->mc_list;
+
+	for_each_l2hash_node(node, tmp, hash, hi) {
+		addr = container_of(node, struct vport_addr, node);
+		addr->action = MLX5_ACTION_DEL;
+	}
+
+	if (!vport->enabled)
+		goto out;
+
+	err = mlx5_query_nic_vport_mac_list(esw->dev, vport_num, list_type,
+					    mac_list, &size);
+	if (err)
+		goto out;
+	esw_debug(esw->dev, "vport[%d] context update %s list size (%d)\n",
+		  vport_num, is_uc ? "UC" : "MC", size);
+
+	for (i = 0; i < size; i++) {
+		if (is_uc && !is_valid_ether_addr(mac_list[i]))
+			continue;
+
+		if (!is_uc && !is_multicast_ether_addr(mac_list[i]))
+			continue;
+
+		addr = l2addr_hash_find(hash, mac_list[i], struct vport_addr);
+		if (addr) {
+			addr->action = MLX5_ACTION_NONE;
+			/* If this mac was previously added because of allmulti
+			 * promiscuous rx mode, its now converted to be original
+			 * vport mac.
+			 */
+			if (addr->mc_promisc) {
+				struct esw_mc_addr *esw_mc =
+					l2addr_hash_find(esw->mc_table,
+							 mac_list[i],
+							 struct esw_mc_addr);
+				if (!esw_mc) {
+					esw_warn(esw->dev,
+						 "Failed to MAC(%pM) in mcast DB\n",
+						 mac_list[i]);
+					continue;
+				}
+				esw_mc->refcnt++;
+				addr->mc_promisc = false;
+			}
+			continue;
+		}
+
+		addr = l2addr_hash_add(hash, mac_list[i], struct vport_addr,
+				       GFP_KERNEL);
+		if (!addr) {
+			esw_warn(esw->dev,
+				 "Failed to add MAC(%pM) to vport[%d] DB\n",
+				 mac_list[i], vport_num);
+			continue;
+		}
+		addr->vport = vport_num;
+		addr->action = MLX5_ACTION_ADD;
+	}
+out:
+	kfree(mac_list);
+}
+
+/* Sync vport UC/MC list from vport context
+ * Must be called after esw_update_vport_addr_list
+ */
+static void esw_update_vport_mc_promisc(struct mlx5_eswitch *esw, u32 vport_num)
+{
+	struct mlx5_vport *vport = &esw->vports[vport_num];
+	struct l2addr_node *node;
+	struct vport_addr *addr;
+	struct hlist_head *hash;
+	struct hlist_node *tmp;
+	int hi;
+
+	hash = vport->mc_list;
+
+	for_each_l2hash_node(node, tmp, esw->mc_table, hi) {
+		u8 *mac = node->addr;
+
+		addr = l2addr_hash_find(hash, mac, struct vport_addr);
+		if (addr) {
+			if (addr->action == MLX5_ACTION_DEL)
+				addr->action = MLX5_ACTION_NONE;
+			continue;
+		}
+		addr = l2addr_hash_add(hash, mac, struct vport_addr,
+				       GFP_KERNEL);
+		if (!addr) {
+			esw_warn(esw->dev,
+				 "Failed to add allmulti MAC(%pM) to vport[%d] DB\n",
+				 mac, vport_num);
+			continue;
+		}
+		addr->vport = vport_num;
+		addr->action = MLX5_ACTION_ADD;
+		addr->mc_promisc = true;
+	}
+}
+
+/* Apply vport rx mode to HW FDB table */
+static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num,
+				    bool promisc, bool mc_promisc)
+{
+	struct esw_mc_addr *allmulti_addr = esw->mc_promisc;
+	struct mlx5_vport *vport = &esw->vports[vport_num];
+
+	if (IS_ERR_OR_NULL(vport->allmulti_rule) != mc_promisc)
+		goto promisc;
+
+	if (mc_promisc) {
+		vport->allmulti_rule =
+				esw_fdb_set_vport_allmulti_rule(esw, vport_num);
+		if (!allmulti_addr->uplink_rule)
+			allmulti_addr->uplink_rule =
+				esw_fdb_set_vport_allmulti_rule(esw,
+								UPLINK_VPORT);
+		allmulti_addr->refcnt++;
+	} else if (vport->allmulti_rule) {
+		mlx5_del_flow_rules(vport->allmulti_rule);
+		vport->allmulti_rule = NULL;
+
+		if (--allmulti_addr->refcnt > 0)
+			goto promisc;
+
+		if (allmulti_addr->uplink_rule)
+			mlx5_del_flow_rules(allmulti_addr->uplink_rule);
+		allmulti_addr->uplink_rule = NULL;
+	}
+
+promisc:
+	if (IS_ERR_OR_NULL(vport->promisc_rule) != promisc)
+		return;
+
+	if (promisc) {
+		vport->promisc_rule = esw_fdb_set_vport_promisc_rule(esw,
+								     vport_num);
+	} else if (vport->promisc_rule) {
+		mlx5_del_flow_rules(vport->promisc_rule);
+		vport->promisc_rule = NULL;
+	}
+}
+
+/* Sync vport rx mode from vport context */
+static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num)
+{
+	struct mlx5_vport *vport = &esw->vports[vport_num];
+	int promisc_all = 0;
+	int promisc_uc = 0;
+	int promisc_mc = 0;
+	int err;
+
+	err = mlx5_query_nic_vport_promisc(esw->dev,
+					   vport_num,
+					   &promisc_uc,
+					   &promisc_mc,
+					   &promisc_all);
+	if (err)
+		return;
+	esw_debug(esw->dev, "vport[%d] context update rx mode promisc_all=%d, all_multi=%d\n",
+		  vport_num, promisc_all, promisc_mc);
+
+	if (!vport->info.trusted || !vport->enabled) {
+		promisc_uc = 0;
+		promisc_mc = 0;
+		promisc_all = 0;
+	}
+
+	esw_apply_vport_rx_mode(esw, vport_num, promisc_all,
+				(promisc_all || promisc_mc));
+}
+
+static void esw_vport_change_handle_locked(struct mlx5_vport *vport)
+{
+	struct mlx5_core_dev *dev = vport->dev;
+	struct mlx5_eswitch *esw = dev->priv.eswitch;
+	u8 mac[ETH_ALEN];
+
+	mlx5_query_nic_vport_mac_address(dev, vport->vport, mac);
+	esw_debug(dev, "vport[%d] Context Changed: perm mac: %pM\n",
+		  vport->vport, mac);
+
+	if (vport->enabled_events & UC_ADDR_CHANGE) {
+		esw_update_vport_addr_list(esw, vport->vport,
+					   MLX5_NVPRT_LIST_TYPE_UC);
+		esw_apply_vport_addr_list(esw, vport->vport,
+					  MLX5_NVPRT_LIST_TYPE_UC);
+	}
+
+	if (vport->enabled_events & MC_ADDR_CHANGE) {
+		esw_update_vport_addr_list(esw, vport->vport,
+					   MLX5_NVPRT_LIST_TYPE_MC);
+	}
+
+	if (vport->enabled_events & PROMISC_CHANGE) {
+		esw_update_vport_rx_mode(esw, vport->vport);
+		if (!IS_ERR_OR_NULL(vport->allmulti_rule))
+			esw_update_vport_mc_promisc(esw, vport->vport);
+	}
+
+	if (vport->enabled_events & (PROMISC_CHANGE | MC_ADDR_CHANGE)) {
+		esw_apply_vport_addr_list(esw, vport->vport,
+					  MLX5_NVPRT_LIST_TYPE_MC);
+	}
+
+	esw_debug(esw->dev, "vport[%d] Context Changed: Done\n", vport->vport);
+	if (vport->enabled)
+		arm_vport_context_events_cmd(dev, vport->vport,
+					     vport->enabled_events);
+}
+
+static void esw_vport_change_handler(struct work_struct *work)
+{
+	struct mlx5_vport *vport =
+		container_of(work, struct mlx5_vport, vport_change_handler);
+	struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
+
+	mutex_lock(&esw->state_lock);
+	esw_vport_change_handle_locked(vport);
+	mutex_unlock(&esw->state_lock);
+}
+
+static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
+				       struct mlx5_vport *vport)
+{
+	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+	struct mlx5_flow_group *vlan_grp = NULL;
+	struct mlx5_flow_group *drop_grp = NULL;
+	struct mlx5_core_dev *dev = esw->dev;
+	struct mlx5_flow_namespace *root_ns;
+	struct mlx5_flow_table *acl;
+	void *match_criteria;
+	u32 *flow_group_in;
+	/* The egress acl table contains 2 rules:
+	 * 1)Allow traffic with vlan_tag=vst_vlan_id
+	 * 2)Drop all other traffic.
+	 */
+	int table_size = 2;
+	int err = 0;
+
+	if (!MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
+		return -EOPNOTSUPP;
+
+	if (!IS_ERR_OR_NULL(vport->egress.acl))
+		return 0;
+
+	esw_debug(dev, "Create vport[%d] egress ACL log_max_size(%d)\n",
+		  vport->vport, MLX5_CAP_ESW_EGRESS_ACL(dev, log_max_ft_size));
+
+	root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS);
+	if (!root_ns) {
+		esw_warn(dev, "Failed to get E-Switch egress flow namespace\n");
+		return -EIO;
+	}
+
+	flow_group_in = mlx5_vzalloc(inlen);
+	if (!flow_group_in)
+		return -ENOMEM;
+
+	acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
+	if (IS_ERR(acl)) {
+		err = PTR_ERR(acl);
+		esw_warn(dev, "Failed to create E-Switch vport[%d] egress flow Table, err(%d)\n",
+			 vport->vport, err);
+		goto out;
+	}
+
+	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+	match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
+	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag);
+	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.first_vid);
+	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
+	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
+
+	vlan_grp = mlx5_create_flow_group(acl, flow_group_in);
+	if (IS_ERR(vlan_grp)) {
+		err = PTR_ERR(vlan_grp);
+		esw_warn(dev, "Failed to create E-Switch vport[%d] egress allowed vlans flow group, err(%d)\n",
+			 vport->vport, err);
+		goto out;
+	}
+
+	memset(flow_group_in, 0, inlen);
+	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
+	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
+	drop_grp = mlx5_create_flow_group(acl, flow_group_in);
+	if (IS_ERR(drop_grp)) {
+		err = PTR_ERR(drop_grp);
+		esw_warn(dev, "Failed to create E-Switch vport[%d] egress drop flow group, err(%d)\n",
+			 vport->vport, err);
+		goto out;
+	}
+
+	vport->egress.acl = acl;
+	vport->egress.drop_grp = drop_grp;
+	vport->egress.allowed_vlans_grp = vlan_grp;
+out:
+	kvfree(flow_group_in);
+	if (err && !IS_ERR_OR_NULL(vlan_grp))
+		mlx5_destroy_flow_group(vlan_grp);
+	if (err && !IS_ERR_OR_NULL(acl))
+		mlx5_destroy_flow_table(acl);
+	return err;
+}
+
+static void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
+					   struct mlx5_vport *vport)
+{
+	if (!IS_ERR_OR_NULL(vport->egress.allowed_vlan))
+		mlx5_del_flow_rules(vport->egress.allowed_vlan);
+
+	if (!IS_ERR_OR_NULL(vport->egress.drop_rule))
+		mlx5_del_flow_rules(vport->egress.drop_rule);
+
+	vport->egress.allowed_vlan = NULL;
+	vport->egress.drop_rule = NULL;
+}
+
+static void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
+					 struct mlx5_vport *vport)
+{
+	if (IS_ERR_OR_NULL(vport->egress.acl))
+		return;
+
+	esw_debug(esw->dev, "Destroy vport[%d] E-Switch egress ACL\n", vport->vport);
+
+	esw_vport_cleanup_egress_rules(esw, vport);
+	mlx5_destroy_flow_group(vport->egress.allowed_vlans_grp);
+	mlx5_destroy_flow_group(vport->egress.drop_grp);
+	mlx5_destroy_flow_table(vport->egress.acl);
+	vport->egress.allowed_vlans_grp = NULL;
+	vport->egress.drop_grp = NULL;
+	vport->egress.acl = NULL;
+}
+
+static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
+					struct mlx5_vport *vport)
+{
+	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+	struct mlx5_core_dev *dev = esw->dev;
+	struct mlx5_flow_namespace *root_ns;
+	struct mlx5_flow_table *acl;
+	struct mlx5_flow_group *g;
+	void *match_criteria;
+	u32 *flow_group_in;
+	/* The ingress acl table contains 4 groups
+	 * (2 active rules at the same time -
+	 *      1 allow rule from one of the first 3 groups.
+	 *      1 drop rule from the last group):
+	 * 1)Allow untagged traffic with smac=original mac.
+	 * 2)Allow untagged traffic.
+	 * 3)Allow traffic with smac=original mac.
+	 * 4)Drop all other traffic.
+	 */
+	int table_size = 4;
+	int err = 0;
+
+	if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
+		return -EOPNOTSUPP;
+
+	if (!IS_ERR_OR_NULL(vport->ingress.acl))
+		return 0;
+
+	esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n",
+		  vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size));
+
+	root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS);
+	if (!root_ns) {
+		esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n");
+		return -EIO;
+	}
+
+	flow_group_in = mlx5_vzalloc(inlen);
+	if (!flow_group_in)
+		return -ENOMEM;
+
+	acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
+	if (IS_ERR(acl)) {
+		err = PTR_ERR(acl);
+		esw_warn(dev, "Failed to create E-Switch vport[%d] ingress flow Table, err(%d)\n",
+			 vport->vport, err);
+		goto out;
+	}
+	vport->ingress.acl = acl;
+
+	match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
+
+	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag);
+	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
+	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
+	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
+	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
+
+	g = mlx5_create_flow_group(acl, flow_group_in);
+	if (IS_ERR(g)) {
+		err = PTR_ERR(g);
+		esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged spoofchk flow group, err(%d)\n",
+			 vport->vport, err);
+		goto out;
+	}
+	vport->ingress.allow_untagged_spoofchk_grp = g;
+
+	memset(flow_group_in, 0, inlen);
+	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag);
+	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
+	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
+
+	g = mlx5_create_flow_group(acl, flow_group_in);
+	if (IS_ERR(g)) {
+		err = PTR_ERR(g);
+		esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged flow group, err(%d)\n",
+			 vport->vport, err);
+		goto out;
+	}
+	vport->ingress.allow_untagged_only_grp = g;
+
+	memset(flow_group_in, 0, inlen);
+	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
+	MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
+	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 2);
+	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2);
+
+	g = mlx5_create_flow_group(acl, flow_group_in);
+	if (IS_ERR(g)) {
+		err = PTR_ERR(g);
+		esw_warn(dev, "Failed to create E-Switch vport[%d] ingress spoofchk flow group, err(%d)\n",
+			 vport->vport, err);
+		goto out;
+	}
+	vport->ingress.allow_spoofchk_only_grp = g;
+
+	memset(flow_group_in, 0, inlen);
+	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 3);
+	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3);
+
+	g = mlx5_create_flow_group(acl, flow_group_in);
+	if (IS_ERR(g)) {
+		err = PTR_ERR(g);
+		esw_warn(dev, "Failed to create E-Switch vport[%d] ingress drop flow group, err(%d)\n",
+			 vport->vport, err);
+		goto out;
+	}
+	vport->ingress.drop_grp = g;
+
+out:
+	if (err) {
+		if (!IS_ERR_OR_NULL(vport->ingress.allow_spoofchk_only_grp))
+			mlx5_destroy_flow_group(
+					vport->ingress.allow_spoofchk_only_grp);
+		if (!IS_ERR_OR_NULL(vport->ingress.allow_untagged_only_grp))
+			mlx5_destroy_flow_group(
+					vport->ingress.allow_untagged_only_grp);
+		if (!IS_ERR_OR_NULL(vport->ingress.allow_untagged_spoofchk_grp))
+			mlx5_destroy_flow_group(
+				vport->ingress.allow_untagged_spoofchk_grp);
+		if (!IS_ERR_OR_NULL(vport->ingress.acl))
+			mlx5_destroy_flow_table(vport->ingress.acl);
+	}
+
+	kvfree(flow_group_in);
+	return err;
+}
+
+static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
+					    struct mlx5_vport *vport)
+{
+	if (!IS_ERR_OR_NULL(vport->ingress.drop_rule))
+		mlx5_del_flow_rules(vport->ingress.drop_rule);
+
+	if (!IS_ERR_OR_NULL(vport->ingress.allow_rule))
+		mlx5_del_flow_rules(vport->ingress.allow_rule);
+
+	vport->ingress.drop_rule = NULL;
+	vport->ingress.allow_rule = NULL;
+}
+
+static void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
+					  struct mlx5_vport *vport)
+{
+	if (IS_ERR_OR_NULL(vport->ingress.acl))
+		return;
+
+	esw_debug(esw->dev, "Destroy vport[%d] E-Switch ingress ACL\n", vport->vport);
+
+	esw_vport_cleanup_ingress_rules(esw, vport);
+	mlx5_destroy_flow_group(vport->ingress.allow_spoofchk_only_grp);
+	mlx5_destroy_flow_group(vport->ingress.allow_untagged_only_grp);
+	mlx5_destroy_flow_group(vport->ingress.allow_untagged_spoofchk_grp);
+	mlx5_destroy_flow_group(vport->ingress.drop_grp);
+	mlx5_destroy_flow_table(vport->ingress.acl);
+	vport->ingress.acl = NULL;
+	vport->ingress.drop_grp = NULL;
+	vport->ingress.allow_spoofchk_only_grp = NULL;
+	vport->ingress.allow_untagged_only_grp = NULL;
+	vport->ingress.allow_untagged_spoofchk_grp = NULL;
+}
+
+static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
+				    struct mlx5_vport *vport)
+{
+	struct mlx5_flow_act flow_act = {0};
+	struct mlx5_flow_spec *spec;
+	int err = 0;
+	u8 *smac_v;
+
+	if (vport->info.spoofchk && !is_valid_ether_addr(vport->info.mac)) {
+		mlx5_core_warn(esw->dev,
+			       "vport[%d] configure ingress rules failed, illegal mac with spoofchk\n",
+			       vport->vport);
+		return -EPERM;
+
+	}
+
+	esw_vport_cleanup_ingress_rules(esw, vport);
+
+	if (!vport->info.vlan && !vport->info.qos && !vport->info.spoofchk) {
+		esw_vport_disable_ingress_acl(esw, vport);
+		return 0;
+	}
+
+	err = esw_vport_enable_ingress_acl(esw, vport);
+	if (err) {
+		mlx5_core_warn(esw->dev,
+			       "failed to enable ingress acl (%d) on vport[%d]\n",
+			       err, vport->vport);
+		return err;
+	}
+
+	esw_debug(esw->dev,
+		  "vport[%d] configure ingress rules, vlan(%d) qos(%d)\n",
+		  vport->vport, vport->info.vlan, vport->info.qos);
+
+	spec = mlx5_vzalloc(sizeof(*spec));
+	if (!spec) {
+		err = -ENOMEM;
+		esw_warn(esw->dev, "vport[%d] configure ingress rules failed, err(%d)\n",
+			 vport->vport, err);
+		goto out;
+	}
+
+	if (vport->info.vlan || vport->info.qos)
+		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag);
+
+	if (vport->info.spoofchk) {
+		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_47_16);
+		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_15_0);
+		smac_v = MLX5_ADDR_OF(fte_match_param,
+				      spec->match_value,
+				      outer_headers.smac_47_16);
+		ether_addr_copy(smac_v, vport->info.mac);
+	}
+
+	spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
+	vport->ingress.allow_rule =
+		mlx5_add_flow_rules(vport->ingress.acl, spec,
+				    &flow_act, NULL, 0);
+	if (IS_ERR(vport->ingress.allow_rule)) {
+		err = PTR_ERR(vport->ingress.allow_rule);
+		esw_warn(esw->dev,
+			 "vport[%d] configure ingress allow rule, err(%d)\n",
+			 vport->vport, err);
+		vport->ingress.allow_rule = NULL;
+		goto out;
+	}
+
+	memset(spec, 0, sizeof(*spec));
+	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
+	vport->ingress.drop_rule =
+		mlx5_add_flow_rules(vport->ingress.acl, spec,
+				    &flow_act, NULL, 0);
+	if (IS_ERR(vport->ingress.drop_rule)) {
+		err = PTR_ERR(vport->ingress.drop_rule);
+		esw_warn(esw->dev,
+			 "vport[%d] configure ingress drop rule, err(%d)\n",
+			 vport->vport, err);
+		vport->ingress.drop_rule = NULL;
+		goto out;
+	}
+
+out:
+	if (err)
+		esw_vport_cleanup_ingress_rules(esw, vport);
+	kvfree(spec);
+	return err;
+}
+
+static int esw_vport_egress_config(struct mlx5_eswitch *esw,
+				   struct mlx5_vport *vport)
+{
+	struct mlx5_flow_act flow_act = {0};
+	struct mlx5_flow_spec *spec;
+	int err = 0;
+
+	esw_vport_cleanup_egress_rules(esw, vport);
+
+	if (!vport->info.vlan && !vport->info.qos) {
+		esw_vport_disable_egress_acl(esw, vport);
+		return 0;
+	}
+
+	err = esw_vport_enable_egress_acl(esw, vport);
+	if (err) {
+		mlx5_core_warn(esw->dev,
+			       "failed to enable egress acl (%d) on vport[%d]\n",
+			       err, vport->vport);
+		return err;
+	}
+
+	esw_debug(esw->dev,
+		  "vport[%d] configure egress rules, vlan(%d) qos(%d)\n",
+		  vport->vport, vport->info.vlan, vport->info.qos);
+
+	spec = mlx5_vzalloc(sizeof(*spec));
+	if (!spec) {
+		err = -ENOMEM;
+		esw_warn(esw->dev, "vport[%d] configure egress rules failed, err(%d)\n",
+			 vport->vport, err);
+		goto out;
+	}
+
+	/* Allowed vlan rule */
+	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag);
+	MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.vlan_tag);
+	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid);
+	MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vport->info.vlan);
+
+	spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
+	vport->egress.allowed_vlan =
+		mlx5_add_flow_rules(vport->egress.acl, spec,
+				    &flow_act, NULL, 0);
+	if (IS_ERR(vport->egress.allowed_vlan)) {
+		err = PTR_ERR(vport->egress.allowed_vlan);
+		esw_warn(esw->dev,
+			 "vport[%d] configure egress allowed vlan rule failed, err(%d)\n",
+			 vport->vport, err);
+		vport->egress.allowed_vlan = NULL;
+		goto out;
+	}
+
+	/* Drop others rule (star rule) */
+	memset(spec, 0, sizeof(*spec));
+	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
+	vport->egress.drop_rule =
+		mlx5_add_flow_rules(vport->egress.acl, spec,
+				    &flow_act, NULL, 0);
+	if (IS_ERR(vport->egress.drop_rule)) {
+		err = PTR_ERR(vport->egress.drop_rule);
+		esw_warn(esw->dev,
+			 "vport[%d] configure egress drop rule failed, err(%d)\n",
+			 vport->vport, err);
+		vport->egress.drop_rule = NULL;
+	}
+out:
+	kvfree(spec);
+	return err;
+}
+
+/* Vport QoS management */
+static int esw_create_tsar(struct mlx5_eswitch *esw)
+{
+	u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
+	struct mlx5_core_dev *dev = esw->dev;
+	int err;
+
+	if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
+		return 0;
+
+	if (esw->qos.enabled)
+		return -EEXIST;
+
+	err = mlx5_create_scheduling_element_cmd(dev,
+						 SCHEDULING_HIERARCHY_E_SWITCH,
+						 &tsar_ctx,
+						 &esw->qos.root_tsar_id);
+	if (err) {
+		esw_warn(esw->dev, "E-Switch create TSAR failed (%d)\n", err);
+		return err;
+	}
+
+	esw->qos.enabled = true;
+	return 0;
+}
+
+static void esw_destroy_tsar(struct mlx5_eswitch *esw)
+{
+	int err;
+
+	if (!esw->qos.enabled)
+		return;
+
+	err = mlx5_destroy_scheduling_element_cmd(esw->dev,
+						  SCHEDULING_HIERARCHY_E_SWITCH,
+						  esw->qos.root_tsar_id);
+	if (err)
+		esw_warn(esw->dev, "E-Switch destroy TSAR failed (%d)\n", err);
+
+	esw->qos.enabled = false;
+}
+
+static int esw_vport_enable_qos(struct mlx5_eswitch *esw, int vport_num,
+				u32 initial_max_rate)
+{
+	u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
+	struct mlx5_vport *vport = &esw->vports[vport_num];
+	struct mlx5_core_dev *dev = esw->dev;
+	void *vport_elem;
+	int err = 0;
+
+	if (!esw->qos.enabled || !MLX5_CAP_GEN(dev, qos) ||
+	    !MLX5_CAP_QOS(dev, esw_scheduling))
+		return 0;
+
+	if (vport->qos.enabled)
+		return -EEXIST;
+
+	MLX5_SET(scheduling_context, &sched_ctx, element_type,
+		 SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
+	vport_elem = MLX5_ADDR_OF(scheduling_context, &sched_ctx,
+				  element_attributes);
+	MLX5_SET(vport_element, vport_elem, vport_number, vport_num);
+	MLX5_SET(scheduling_context, &sched_ctx, parent_element_id,
+		 esw->qos.root_tsar_id);
+	MLX5_SET(scheduling_context, &sched_ctx, max_average_bw,
+		 initial_max_rate);
+
+	err = mlx5_create_scheduling_element_cmd(dev,
+						 SCHEDULING_HIERARCHY_E_SWITCH,
+						 &sched_ctx,
+						 &vport->qos.esw_tsar_ix);
+	if (err) {
+		esw_warn(esw->dev, "E-Switch create TSAR vport element failed (vport=%d,err=%d)\n",
+			 vport_num, err);
+		return err;
+	}
+
+	vport->qos.enabled = true;
+	return 0;
+}
+
+static void esw_vport_disable_qos(struct mlx5_eswitch *esw, int vport_num)
+{
+	struct mlx5_vport *vport = &esw->vports[vport_num];
+	int err = 0;
+
+	if (!vport->qos.enabled)
+		return;
+
+	err = mlx5_destroy_scheduling_element_cmd(esw->dev,
+						  SCHEDULING_HIERARCHY_E_SWITCH,
+						  vport->qos.esw_tsar_ix);
+	if (err)
+		esw_warn(esw->dev, "E-Switch destroy TSAR vport element failed (vport=%d,err=%d)\n",
+			 vport_num, err);
+
+	vport->qos.enabled = false;
+}
+
+static int esw_vport_qos_config(struct mlx5_eswitch *esw, int vport_num,
+				u32 max_rate)
+{
+	u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
+	struct mlx5_vport *vport = &esw->vports[vport_num];
+	struct mlx5_core_dev *dev = esw->dev;
+	void *vport_elem;
+	u32 bitmask = 0;
+	int err = 0;
+
+	if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
+		return -EOPNOTSUPP;
+
+	if (!vport->qos.enabled)
+		return -EIO;
+
+	MLX5_SET(scheduling_context, &sched_ctx, element_type,
+		 SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
+	vport_elem = MLX5_ADDR_OF(scheduling_context, &sched_ctx,
+				  element_attributes);
+	MLX5_SET(vport_element, vport_elem, vport_number, vport_num);
+	MLX5_SET(scheduling_context, &sched_ctx, parent_element_id,
+		 esw->qos.root_tsar_id);
+	MLX5_SET(scheduling_context, &sched_ctx, max_average_bw,
+		 max_rate);
+	bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW;
+
+	err = mlx5_modify_scheduling_element_cmd(dev,
+						 SCHEDULING_HIERARCHY_E_SWITCH,
+						 &sched_ctx,
+						 vport->qos.esw_tsar_ix,
+						 bitmask);
+	if (err) {
+		esw_warn(esw->dev, "E-Switch modify TSAR vport element failed (vport=%d,err=%d)\n",
+			 vport_num, err);
+		return err;
+	}
+
+	return 0;
+}
+
+static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN])
+{
+	((u8 *)node_guid)[7] = mac[0];
+	((u8 *)node_guid)[6] = mac[1];
+	((u8 *)node_guid)[5] = mac[2];
+	((u8 *)node_guid)[4] = 0xff;
+	((u8 *)node_guid)[3] = 0xfe;
+	((u8 *)node_guid)[2] = mac[3];
+	((u8 *)node_guid)[1] = mac[4];
+	((u8 *)node_guid)[0] = mac[5];
+}
+
+static void esw_apply_vport_conf(struct mlx5_eswitch *esw,
+				 struct mlx5_vport *vport)
+{
+	int vport_num = vport->vport;
+
+	if (!vport_num)
+		return;
+
+	mlx5_modify_vport_admin_state(esw->dev,
+				      MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
+				      vport_num,
+				      vport->info.link_state);
+	mlx5_modify_nic_vport_mac_address(esw->dev, vport_num, vport->info.mac);
+	mlx5_modify_nic_vport_node_guid(esw->dev, vport_num, vport->info.node_guid);
+	modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan, vport->info.qos,
+			       (vport->info.vlan || vport->info.qos));
+
+	/* Only legacy mode needs ACLs */
+	if (esw->mode == SRIOV_LEGACY) {
+		esw_vport_ingress_config(esw, vport);
+		esw_vport_egress_config(esw, vport);
+	}
+}
+
+static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
+			     int enable_events)
+{
+	struct mlx5_vport *vport = &esw->vports[vport_num];
+
+	mutex_lock(&esw->state_lock);
+	WARN_ON(vport->enabled);
+
+	esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num);
+
+	/* Restore old vport configuration */
+	esw_apply_vport_conf(esw, vport);
+
+	/* Attach vport to the eswitch rate limiter */
+	if (esw_vport_enable_qos(esw, vport_num, vport->info.max_rate))
+		esw_warn(esw->dev, "Failed to attach vport %d to eswitch rate limiter", vport_num);
+
+	/* Sync with current vport context */
+	vport->enabled_events = enable_events;
+	vport->enabled = true;
+
+	/* only PF is trusted by default */
+	if (!vport_num)
+		vport->info.trusted = true;
+
+	esw_vport_change_handle_locked(vport);
+
+	esw->enabled_vports++;
+	esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num);
+	mutex_unlock(&esw->state_lock);
+}
+
+static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
+{
+	struct mlx5_vport *vport = &esw->vports[vport_num];
+
+	if (!vport->enabled)
+		return;
+
+	esw_debug(esw->dev, "Disabling vport(%d)\n", vport_num);
+	/* Mark this vport as disabled to discard new events */
+	vport->enabled = false;
+
+	synchronize_irq(mlx5_get_msix_vec(esw->dev, MLX5_EQ_VEC_ASYNC));
+	/* Wait for current already scheduled events to complete */
+	flush_workqueue(esw->work_queue);
+	/* Disable events from this vport */
+	arm_vport_context_events_cmd(esw->dev, vport->vport, 0);
+	mutex_lock(&esw->state_lock);
+	/* We don't assume VFs will cleanup after themselves.
+	 * Calling vport change handler while vport is disabled will cleanup
+	 * the vport resources.
+	 */
+	esw_vport_change_handle_locked(vport);
+	vport->enabled_events = 0;
+	esw_vport_disable_qos(esw, vport_num);
+	if (vport_num && esw->mode == SRIOV_LEGACY) {
+		mlx5_modify_vport_admin_state(esw->dev,
+					      MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
+					      vport_num,
+					      MLX5_ESW_VPORT_ADMIN_STATE_DOWN);
+		esw_vport_disable_egress_acl(esw, vport);
+		esw_vport_disable_ingress_acl(esw, vport);
+	}
+	esw->enabled_vports--;
+	mutex_unlock(&esw->state_lock);
+}
+
+/* Public E-Switch API */
+int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
+{
+	int err;
+	int i, enabled_events;
+
+	if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
+	    MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+		return 0;
+
+	if (!MLX5_CAP_GEN(esw->dev, eswitch_flow_table) ||
+	    !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
+		esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n");
+		return -ENOTSUPP;
+	}
+
+	if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support))
+		esw_warn(esw->dev, "E-Switch ingress ACL is not supported by FW\n");
+
+	if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support))
+		esw_warn(esw->dev, "E-Switch engress ACL is not supported by FW\n");
+
+	esw_info(esw->dev, "E-Switch enable SRIOV: nvfs(%d) mode (%d)\n", nvfs, mode);
+	esw->mode = mode;
+	esw_disable_vport(esw, 0);
+
+	if (mode == SRIOV_LEGACY)
+		err = esw_create_legacy_fdb_table(esw, nvfs + 1);
+	else
+		err = esw_offloads_init(esw, nvfs + 1);
+	if (err)
+		goto abort;
+
+	err = esw_create_tsar(esw);
+	if (err)
+		esw_warn(esw->dev, "Failed to create eswitch TSAR");
+
+	enabled_events = (mode == SRIOV_LEGACY) ? SRIOV_VPORT_EVENTS : UC_ADDR_CHANGE;
+	for (i = 0; i <= nvfs; i++)
+		esw_enable_vport(esw, i, enabled_events);
+
+	esw_info(esw->dev, "SRIOV enabled: active vports(%d)\n",
+		 esw->enabled_vports);
+	return 0;
+
+abort:
+	esw_enable_vport(esw, 0, UC_ADDR_CHANGE);
+	esw->mode = SRIOV_NONE;
+	return err;
+}
+
+void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
+{
+	struct esw_mc_addr *mc_promisc;
+	int nvports;
+	int i;
+
+	if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
+	    MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+		return;
+
+	esw_info(esw->dev, "disable SRIOV: active vports(%d) mode(%d)\n",
+		 esw->enabled_vports, esw->mode);
+
+	mc_promisc = esw->mc_promisc;
+	nvports = esw->enabled_vports;
+
+	for (i = 0; i < esw->total_vports; i++)
+		esw_disable_vport(esw, i);
+
+	if (mc_promisc && mc_promisc->uplink_rule)
+		mlx5_del_flow_rules(mc_promisc->uplink_rule);
+
+	esw_destroy_tsar(esw);
+
+	if (esw->mode == SRIOV_LEGACY)
+		esw_destroy_legacy_fdb_table(esw);
+	else if (esw->mode == SRIOV_OFFLOADS)
+		esw_offloads_cleanup(esw, nvports);
+
+	esw->mode = SRIOV_NONE;
+	/* VPORT 0 (PF) must be enabled back with non-sriov configuration */
+	esw_enable_vport(esw, 0, UC_ADDR_CHANGE);
+}
+
+void mlx5_eswitch_attach(struct mlx5_eswitch *esw)
+{
+	if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
+	    MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+		return;
+
+	esw_enable_vport(esw, 0, UC_ADDR_CHANGE);
+	/* VF Vports will be enabled when SRIOV is enabled */
+}
+
+void mlx5_eswitch_detach(struct mlx5_eswitch *esw)
+{
+	if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
+	    MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+		return;
+
+	esw_disable_vport(esw, 0);
+}
+
+int mlx5_eswitch_init(struct mlx5_core_dev *dev)
+{
+	int l2_table_size = 1 << MLX5_CAP_GEN(dev, log_max_l2_table);
+	int total_vports = MLX5_TOTAL_VPORTS(dev);
+	struct esw_mc_addr *mc_promisc;
+	struct mlx5_eswitch *esw;
+	int vport_num;
+	int err;
+
+	if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
+	    MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+		return 0;
+
+	esw_info(dev,
+		 "Total vports %d, l2 table size(%d), per vport: max uc(%d) max mc(%d)\n",
+		 total_vports, l2_table_size,
+		 MLX5_MAX_UC_PER_VPORT(dev),
+		 MLX5_MAX_MC_PER_VPORT(dev));
+
+	esw = kzalloc(sizeof(*esw), GFP_KERNEL);
+	if (!esw)
+		return -ENOMEM;
+
+	esw->dev = dev;
+
+	esw->l2_table.bitmap = kcalloc(BITS_TO_LONGS(l2_table_size),
+				   sizeof(uintptr_t), GFP_KERNEL);
+	if (!esw->l2_table.bitmap) {
+		err = -ENOMEM;
+		goto abort;
+	}
+	esw->l2_table.size = l2_table_size;
+
+	mc_promisc = kzalloc(sizeof(*mc_promisc), GFP_KERNEL);
+	if (!mc_promisc) {
+		err = -ENOMEM;
+		goto abort;
+	}
+	esw->mc_promisc = mc_promisc;
+
+	esw->work_queue = create_singlethread_workqueue("mlx5_esw_wq");
+	if (!esw->work_queue) {
+		err = -ENOMEM;
+		goto abort;
+	}
+
+	esw->vports = kcalloc(total_vports, sizeof(struct mlx5_vport),
+			      GFP_KERNEL);
+	if (!esw->vports) {
+		err = -ENOMEM;
+		goto abort;
+	}
+
+	esw->offloads.vport_reps =
+		kzalloc(total_vports * sizeof(struct mlx5_eswitch_rep),
+			GFP_KERNEL);
+	if (!esw->offloads.vport_reps) {
+		err = -ENOMEM;
+		goto abort;
+	}
+
+	hash_init(esw->offloads.encap_tbl);
+	mutex_init(&esw->state_lock);
+
+	for (vport_num = 0; vport_num < total_vports; vport_num++) {
+		struct mlx5_vport *vport = &esw->vports[vport_num];
+
+		vport->vport = vport_num;
+		vport->info.link_state = MLX5_ESW_VPORT_ADMIN_STATE_AUTO;
+		vport->dev = dev;
+		INIT_WORK(&vport->vport_change_handler,
+			  esw_vport_change_handler);
+	}
+
+	esw->total_vports = total_vports;
+	esw->enabled_vports = 0;
+	esw->mode = SRIOV_NONE;
+	esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE;
+
+	dev->priv.eswitch = esw;
+	return 0;
+abort:
+	if (esw->work_queue)
+		destroy_workqueue(esw->work_queue);
+	kfree(esw->l2_table.bitmap);
+	kfree(esw->vports);
+	kfree(esw->offloads.vport_reps);
+	kfree(esw);
+	return err;
+}
+
+void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
+{
+	if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
+	    MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+		return;
+
+	esw_info(esw->dev, "cleanup\n");
+
+	esw->dev->priv.eswitch = NULL;
+	destroy_workqueue(esw->work_queue);
+	kfree(esw->l2_table.bitmap);
+	kfree(esw->mc_promisc);
+	kfree(esw->offloads.vport_reps);
+	kfree(esw->vports);
+	kfree(esw);
+}
+
+void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe)
+{
+	struct mlx5_eqe_vport_change *vc_eqe = &eqe->data.vport_change;
+	u16 vport_num = be16_to_cpu(vc_eqe->vport_num);
+	struct mlx5_vport *vport;
+
+	if (!esw) {
+		pr_warn("MLX5 E-Switch: vport %d got an event while eswitch is not initialized\n",
+			vport_num);
+		return;
+	}
+
+	vport = &esw->vports[vport_num];
+	if (vport->enabled)
+		queue_work(esw->work_queue, &vport->vport_change_handler);
+}
+
+/* Vport Administration */
+#define ESW_ALLOWED(esw) \
+	(esw && MLX5_CAP_GEN(esw->dev, vport_group_manager) && mlx5_core_is_pf(esw->dev))
+#define LEGAL_VPORT(esw, vport) (vport >= 0 && vport < esw->total_vports)
+
+int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
+			       int vport, u8 mac[ETH_ALEN])
+{
+	struct mlx5_vport *evport;
+	u64 node_guid;
+	int err = 0;
+
+	if (!ESW_ALLOWED(esw))
+		return -EPERM;
+	if (!LEGAL_VPORT(esw, vport))
+		return -EINVAL;
+
+	mutex_lock(&esw->state_lock);
+	evport = &esw->vports[vport];
+
+	if (evport->info.spoofchk && !is_valid_ether_addr(mac)) {
+		mlx5_core_warn(esw->dev,
+			       "MAC invalidation is not allowed when spoofchk is on, vport(%d)\n",
+			       vport);
+		err = -EPERM;
+		goto unlock;
+	}
+
+	err = mlx5_modify_nic_vport_mac_address(esw->dev, vport, mac);
+	if (err) {
+		mlx5_core_warn(esw->dev,
+			       "Failed to mlx5_modify_nic_vport_mac vport(%d) err=(%d)\n",
+			       vport, err);
+		goto unlock;
+	}
+
+	node_guid_gen_from_mac(&node_guid, mac);
+	err = mlx5_modify_nic_vport_node_guid(esw->dev, vport, node_guid);
+	if (err)
+		mlx5_core_warn(esw->dev,
+			       "Failed to set vport %d node guid, err = %d. RDMA_CM will not function properly for this VF.\n",
+			       vport, err);
+
+	ether_addr_copy(evport->info.mac, mac);
+	evport->info.node_guid = node_guid;
+	if (evport->enabled && esw->mode == SRIOV_LEGACY)
+		err = esw_vport_ingress_config(esw, evport);
+
+unlock:
+	mutex_unlock(&esw->state_lock);
+	return err;
+}
+
+int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
+				 int vport, int link_state)
+{
+	struct mlx5_vport *evport;
+	int err = 0;
+
+	if (!ESW_ALLOWED(esw))
+		return -EPERM;
+	if (!LEGAL_VPORT(esw, vport))
+		return -EINVAL;
+
+	mutex_lock(&esw->state_lock);
+	evport = &esw->vports[vport];
+
+	err = mlx5_modify_vport_admin_state(esw->dev,
+					    MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
+					    vport, link_state);
+	if (err) {
+		mlx5_core_warn(esw->dev,
+			       "Failed to set vport %d link state, err = %d",
+			       vport, err);
+		goto unlock;
+	}
+
+	evport->info.link_state = link_state;
+
+unlock:
+	mutex_unlock(&esw->state_lock);
+	return 0;
+}
+
+int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
+				  int vport, struct ifla_vf_info *ivi)
+{
+	struct mlx5_vport *evport;
+
+	if (!ESW_ALLOWED(esw))
+		return -EPERM;
+	if (!LEGAL_VPORT(esw, vport))
+		return -EINVAL;
+
+	evport = &esw->vports[vport];
+
+	memset(ivi, 0, sizeof(*ivi));
+	ivi->vf = vport - 1;
+
+	mutex_lock(&esw->state_lock);
+	ether_addr_copy(ivi->mac, evport->info.mac);
+	ivi->linkstate = evport->info.link_state;
+	ivi->vlan = evport->info.vlan;
+	ivi->qos = evport->info.qos;
+	ivi->spoofchk = evport->info.spoofchk;
+	ivi->trusted = evport->info.trusted;
+	ivi->max_tx_rate = evport->info.max_rate;
+	mutex_unlock(&esw->state_lock);
+
+	return 0;
+}
+
+int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
+				  int vport, u16 vlan, u8 qos, u8 set_flags)
+{
+	struct mlx5_vport *evport;
+	int err = 0;
+
+	if (!ESW_ALLOWED(esw))
+		return -EPERM;
+	if (!LEGAL_VPORT(esw, vport) || (vlan > 4095) || (qos > 7))
+		return -EINVAL;
+
+	mutex_lock(&esw->state_lock);
+	evport = &esw->vports[vport];
+
+	err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set_flags);
+	if (err)
+		goto unlock;
+
+	evport->info.vlan = vlan;
+	evport->info.qos = qos;
+	if (evport->enabled && esw->mode == SRIOV_LEGACY) {
+		err = esw_vport_ingress_config(esw, evport);
+		if (err)
+			goto unlock;
+		err = esw_vport_egress_config(esw, evport);
+	}
+
+unlock:
+	mutex_unlock(&esw->state_lock);
+	return err;
+}
+
+int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
+				int vport, u16 vlan, u8 qos)
+{
+	u8 set_flags = 0;
+
+	if (vlan || qos)
+		set_flags = SET_VLAN_STRIP | SET_VLAN_INSERT;
+
+	return __mlx5_eswitch_set_vport_vlan(esw, vport, vlan, qos, set_flags);
+}
+
+int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
+				    int vport, bool spoofchk)
+{
+	struct mlx5_vport *evport;
+	bool pschk;
+	int err = 0;
+
+	if (!ESW_ALLOWED(esw))
+		return -EPERM;
+	if (!LEGAL_VPORT(esw, vport))
+		return -EINVAL;
+
+	mutex_lock(&esw->state_lock);
+	evport = &esw->vports[vport];
+	pschk = evport->info.spoofchk;
+	evport->info.spoofchk = spoofchk;
+	if (evport->enabled && esw->mode == SRIOV_LEGACY)
+		err = esw_vport_ingress_config(esw, evport);
+	if (err)
+		evport->info.spoofchk = pschk;
+	mutex_unlock(&esw->state_lock);
+
+	return err;
+}
+
+int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
+				 int vport, bool setting)
+{
+	struct mlx5_vport *evport;
+
+	if (!ESW_ALLOWED(esw))
+		return -EPERM;
+	if (!LEGAL_VPORT(esw, vport))
+		return -EINVAL;
+
+	mutex_lock(&esw->state_lock);
+	evport = &esw->vports[vport];
+	evport->info.trusted = setting;
+	if (evport->enabled)
+		esw_vport_change_handle_locked(evport);
+	mutex_unlock(&esw->state_lock);
+
+	return 0;
+}
+
+int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw,
+				int vport, u32 max_rate)
+{
+	struct mlx5_vport *evport;
+	int err = 0;
+
+	if (!ESW_ALLOWED(esw))
+		return -EPERM;
+	if (!LEGAL_VPORT(esw, vport))
+		return -EINVAL;
+
+	mutex_lock(&esw->state_lock);
+	evport = &esw->vports[vport];
+	err = esw_vport_qos_config(esw, vport, max_rate);
+	if (!err)
+		evport->info.max_rate = max_rate;
+
+	mutex_unlock(&esw->state_lock);
+	return err;
+}
+
+int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
+				 int vport,
+				 struct ifla_vf_stats *vf_stats)
+{
+	int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
+	u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
+	int err = 0;
+	u32 *out;
+
+	if (!ESW_ALLOWED(esw))
+		return -EPERM;
+	if (!LEGAL_VPORT(esw, vport))
+		return -EINVAL;
+
+	out = mlx5_vzalloc(outlen);
+	if (!out)
+		return -ENOMEM;
+
+	MLX5_SET(query_vport_counter_in, in, opcode,
+		 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
+	MLX5_SET(query_vport_counter_in, in, op_mod, 0);
+	MLX5_SET(query_vport_counter_in, in, vport_number, vport);
+	if (vport)
+		MLX5_SET(query_vport_counter_in, in, other_vport, 1);
+
+	memset(out, 0, outlen);
+	err = mlx5_cmd_exec(esw->dev, in, sizeof(in), out, outlen);
+	if (err)
+		goto free_out;
+
+	#define MLX5_GET_CTR(p, x) \
+		MLX5_GET64(query_vport_counter_out, p, x)
+
+	memset(vf_stats, 0, sizeof(*vf_stats));
+	vf_stats->rx_packets =
+		MLX5_GET_CTR(out, received_eth_unicast.packets) +
+		MLX5_GET_CTR(out, received_eth_multicast.packets) +
+		MLX5_GET_CTR(out, received_eth_broadcast.packets);
+
+	vf_stats->rx_bytes =
+		MLX5_GET_CTR(out, received_eth_unicast.octets) +
+		MLX5_GET_CTR(out, received_eth_multicast.octets) +
+		MLX5_GET_CTR(out, received_eth_broadcast.octets);
+
+	vf_stats->tx_packets =
+		MLX5_GET_CTR(out, transmitted_eth_unicast.packets) +
+		MLX5_GET_CTR(out, transmitted_eth_multicast.packets) +
+		MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
+
+	vf_stats->tx_bytes =
+		MLX5_GET_CTR(out, transmitted_eth_unicast.octets) +
+		MLX5_GET_CTR(out, transmitted_eth_multicast.octets) +
+		MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
+
+	vf_stats->multicast =
+		MLX5_GET_CTR(out, received_eth_multicast.packets);
+
+	vf_stats->broadcast =
+		MLX5_GET_CTR(out, received_eth_broadcast.packets);
+
+free_out:
+	kvfree(out);
+	return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/sriov/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/sriov/eswitch.h
new file mode 100644
index 0000000..9840fd1
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/sriov/eswitch.h
@@ -0,0 +1,340 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies, Ltd.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __MLX5_ESWITCH_H__
+#define __MLX5_ESWITCH_H__
+
+#include <linux/if_ether.h>
+#include <linux/if_link.h>
+#include <net/devlink.h>
+#include "lib/device.h"
+
+#define MLX5_MAX_UC_PER_VPORT(dev) \
+	(1 << MLX5_CAP_GEN(dev, log_max_current_uc_list))
+
+#define MLX5_MAX_MC_PER_VPORT(dev) \
+	(1 << MLX5_CAP_GEN(dev, log_max_current_mc_list))
+
+#define MLX5_L2_ADDR_HASH_SIZE (BIT(BITS_PER_BYTE))
+#define MLX5_L2_ADDR_HASH(addr) (addr[5])
+
+#define FDB_UPLINK_VPORT 0xffff
+
+/* L2 -mac address based- hash helpers */
+struct l2addr_node {
+	struct hlist_node hlist;
+	u8                addr[ETH_ALEN];
+};
+
+#define for_each_l2hash_node(hn, tmp, hash, i) \
+	for (i = 0; i < MLX5_L2_ADDR_HASH_SIZE; i++) \
+		hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
+
+#define l2addr_hash_find(hash, mac, type) ({                \
+	int ix = MLX5_L2_ADDR_HASH(mac);                    \
+	bool found = false;                                 \
+	type *ptr = NULL;                                   \
+							    \
+	hlist_for_each_entry(ptr, &hash[ix], node.hlist)    \
+		if (ether_addr_equal(ptr->node.addr, mac)) {\
+			found = true;                       \
+			break;                              \
+		}                                           \
+	if (!found)                                         \
+		ptr = NULL;                                 \
+	ptr;                                                \
+})
+
+#define l2addr_hash_add(hash, mac, type, gfp) ({            \
+	int ix = MLX5_L2_ADDR_HASH(mac);                    \
+	type *ptr = NULL;                                   \
+							    \
+	ptr = kzalloc(sizeof(type), gfp);                   \
+	if (ptr) {                                          \
+		ether_addr_copy(ptr->node.addr, mac);       \
+		hlist_add_head(&ptr->node.hlist, &hash[ix]);\
+	}                                                   \
+	ptr;                                                \
+})
+
+#define l2addr_hash_del(ptr) ({                             \
+	hlist_del(&ptr->node.hlist);                        \
+	kfree(ptr);                                         \
+})
+
+struct vport_ingress {
+	struct mlx5_flow_table *acl;
+	struct mlx5_flow_group *allow_untagged_spoofchk_grp;
+	struct mlx5_flow_group *allow_spoofchk_only_grp;
+	struct mlx5_flow_group *allow_untagged_only_grp;
+	struct mlx5_flow_group *drop_grp;
+	struct mlx5_flow_handle  *allow_rule;
+	struct mlx5_flow_handle  *drop_rule;
+};
+
+struct vport_egress {
+	struct mlx5_flow_table *acl;
+	struct mlx5_flow_group *allowed_vlans_grp;
+	struct mlx5_flow_group *drop_grp;
+	struct mlx5_flow_handle  *allowed_vlan;
+	struct mlx5_flow_handle  *drop_rule;
+};
+
+struct mlx5_vport_info {
+	u8                      mac[ETH_ALEN];
+	u16                     vlan;
+	u8                      qos;
+	u64                     node_guid;
+	int                     link_state;
+	u32                     max_rate;
+	bool                    spoofchk;
+	bool                    trusted;
+};
+
+struct mlx5_vport {
+	struct mlx5_core_dev    *dev;
+	int                     vport;
+	struct hlist_head       uc_list[MLX5_L2_ADDR_HASH_SIZE];
+	struct hlist_head       mc_list[MLX5_L2_ADDR_HASH_SIZE];
+	struct mlx5_flow_handle *promisc_rule;
+	struct mlx5_flow_handle *allmulti_rule;
+	struct work_struct      vport_change_handler;
+
+	struct vport_ingress    ingress;
+	struct vport_egress     egress;
+
+	struct mlx5_vport_info  info;
+
+	struct {
+		bool            enabled;
+		u32             esw_tsar_ix;
+	} qos;
+
+	bool                    enabled;
+	u16                     enabled_events;
+};
+
+struct mlx5_l2_table {
+	struct hlist_head l2_hash[MLX5_L2_ADDR_HASH_SIZE];
+	u32                  size;
+	unsigned long        *bitmap;
+};
+
+struct mlx5_eswitch_fdb {
+	void *fdb;
+	union {
+		struct legacy_fdb {
+			struct mlx5_flow_group *addr_grp;
+			struct mlx5_flow_group *allmulti_grp;
+			struct mlx5_flow_group *promisc_grp;
+		} legacy;
+
+		struct offloads_fdb {
+			struct mlx5_flow_table *fdb;
+			struct mlx5_flow_group *send_to_vport_grp;
+			struct mlx5_flow_group *miss_grp;
+			struct mlx5_flow_handle *miss_rule;
+			int vlan_push_pop_refcount;
+		} offloads;
+	};
+};
+
+enum {
+	SRIOV_NONE,
+	SRIOV_LEGACY,
+	SRIOV_OFFLOADS
+};
+
+struct mlx5_esw_sq {
+	struct mlx5_flow_handle	*send_to_vport_rule;
+	struct list_head	 list;
+};
+
+struct mlx5_eswitch_rep {
+	int		       (*load)(struct mlx5_eswitch *esw,
+				       struct mlx5_eswitch_rep *rep);
+	void		       (*unload)(struct mlx5_eswitch *esw,
+					 struct mlx5_eswitch_rep *rep);
+	u16		       vport;
+	u8		       hw_id[ETH_ALEN];
+	struct net_device      *netdev;
+
+	struct mlx5_flow_handle *vport_rx_rule;
+	struct list_head       vport_sqs_list;
+	u16		       vlan;
+	u32		       vlan_refcount;
+	bool		       valid;
+};
+
+struct mlx5_esw_offload {
+	struct mlx5_flow_table *ft_offloads;
+	struct mlx5_flow_group *vport_rx_group;
+	struct mlx5_eswitch_rep *vport_reps;
+	DECLARE_HASHTABLE(encap_tbl, 8);
+	u8 inline_mode;
+};
+
+struct mlx5_eswitch {
+	struct mlx5_core_dev    *dev;
+	struct mlx5_l2_table    l2_table;
+	struct mlx5_eswitch_fdb fdb_table;
+	struct hlist_head       mc_table[MLX5_L2_ADDR_HASH_SIZE];
+	struct workqueue_struct *work_queue;
+	struct mlx5_vport       *vports;
+	int                     total_vports;
+	int                     enabled_vports;
+	/* Synchronize between vport change events
+	 * and async SRIOV admin state changes
+	 */
+	struct mutex            state_lock;
+	struct esw_mc_addr      *mc_promisc;
+
+	struct {
+		bool            enabled;
+		u32             root_tsar_id;
+	} qos;
+
+	struct mlx5_esw_offload offloads;
+	int                     mode;
+};
+
+void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports);
+int esw_offloads_init(struct mlx5_eswitch *esw, int nvports);
+
+/* E-Switch API */
+int mlx5_eswitch_init(struct mlx5_core_dev *dev);
+void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw);
+void mlx5_eswitch_attach(struct mlx5_eswitch *esw);
+void mlx5_eswitch_detach(struct mlx5_eswitch *esw);
+void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe);
+int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode);
+void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw);
+int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
+			       int vport, u8 mac[ETH_ALEN]);
+int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
+				 int vport, int link_state);
+int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
+				int vport, u16 vlan, u8 qos);
+int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
+				    int vport, bool spoofchk);
+int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
+				 int vport_num, bool setting);
+int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw,
+				int vport, u32 max_rate);
+int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
+				  int vport, struct ifla_vf_info *ivi);
+int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
+				 int vport,
+				 struct ifla_vf_stats *vf_stats);
+
+struct mlx5_flow_spec;
+struct mlx5_esw_flow_attr;
+
+struct mlx5_flow_handle *
+mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
+				struct mlx5_flow_spec *spec,
+				struct mlx5_esw_flow_attr *attr);
+struct mlx5_flow_handle *
+mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn);
+
+enum {
+	SET_VLAN_STRIP	= BIT(0),
+	SET_VLAN_INSERT	= BIT(1)
+};
+
+#define MLX5_FLOW_CONTEXT_ACTION_VLAN_POP  0x40
+#define MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH 0x80
+
+struct mlx5_encap_info {
+	__be32 daddr;
+	__be32 tun_id;
+	__be16 tp_dst;
+};
+
+struct mlx5_encap_entry {
+	struct hlist_node encap_hlist;
+	struct list_head flows;
+	u32 encap_id;
+	struct neighbour *n;
+	struct mlx5_encap_info tun_info;
+	unsigned char h_dest[ETH_ALEN];	/* destination eth addr	*/
+
+	struct net_device *out_dev;
+	int tunnel_type;
+};
+
+struct mlx5_esw_flow_attr {
+	struct mlx5_eswitch_rep *in_rep;
+	struct mlx5_eswitch_rep *out_rep;
+
+	int	action;
+	u16	vlan;
+	bool	vlan_handled;
+	struct mlx5_encap_entry *encap;
+};
+
+int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
+				 struct mlx5_eswitch_rep *rep,
+				 u16 *sqns_array, int sqns_num);
+void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw,
+				 struct mlx5_eswitch_rep *rep);
+
+int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode);
+int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode);
+int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode);
+int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode);
+int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode);
+void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
+				     int vport_index,
+				     struct mlx5_eswitch_rep *rep);
+void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
+				       int vport_index);
+struct net_device *mlx5_eswitch_get_uplink_netdev(struct mlx5_eswitch *esw);
+
+int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
+				 struct mlx5_esw_flow_attr *attr);
+int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
+				 struct mlx5_esw_flow_attr *attr);
+int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
+				  int vport, u16 vlan, u8 qos, u8 set_flags);
+
+#define MLX5_DEBUG_ESWITCH_MASK BIT(3)
+
+#define esw_info(dev, format, ...)				\
+	pr_info("(%s): E-Switch: " format, (dev)->priv.name, ##__VA_ARGS__)
+
+#define esw_warn(dev, format, ...)				\
+	pr_warn("(%s): E-Switch: " format, (dev)->priv.name, ##__VA_ARGS__)
+
+#define esw_debug(dev, format, ...)				\
+	mlx5_core_dbg_mask(dev, MLX5_DEBUG_ESWITCH_MASK, format, ##__VA_ARGS__)
+#endif /* __MLX5_ESWITCH_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/sriov/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/sriov/eswitch_offloads.c
new file mode 100644
index 0000000..d09b2f8
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/sriov/eswitch_offloads.c
@@ -0,0 +1,1002 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/etherdevice.h>
+#include "lib/driver.h"
+#include "lib/mlx5_ifc.h"
+#include "lib/vport.h"
+#include "lib/fs.h"
+#include "mlx5.h"
+#include "sriov/eswitch.h"
+
+enum {
+	FDB_FAST_PATH = 0,
+	FDB_SLOW_PATH
+};
+
+struct mlx5_flow_handle *
+mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
+				struct mlx5_flow_spec *spec,
+				struct mlx5_esw_flow_attr *attr)
+{
+	struct mlx5_flow_destination dest[2] = {};
+	struct mlx5_flow_act flow_act = {0};
+	struct mlx5_fc *counter = NULL;
+	struct mlx5_flow_handle *rule;
+	void *misc;
+	int i = 0;
+
+	if (esw->mode != SRIOV_OFFLOADS)
+		return ERR_PTR(-EOPNOTSUPP);
+
+	/* per flow vlan pop/push is emulated, don't set that into the firmware */
+	flow_act.action = attr->action & ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH | MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
+
+	if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
+		dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+		dest[i].vport_num = attr->out_rep->vport;
+		i++;
+	}
+	if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+		counter = mlx5_fc_create(esw->dev, true);
+		if (IS_ERR(counter))
+			return ERR_CAST(counter);
+		dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+		dest[i].counter = counter;
+		i++;
+	}
+
+	misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
+	MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
+
+	misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
+	MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
+
+	spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
+				      MLX5_MATCH_MISC_PARAMETERS;
+	if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
+		spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
+
+	if (attr->encap)
+		flow_act.encap_id = attr->encap->encap_id;
+
+	rule = mlx5_add_flow_rules((struct mlx5_flow_table *)esw->fdb_table.fdb,
+				   spec, &flow_act, dest, i);
+	if (IS_ERR(rule))
+		mlx5_fc_destroy(esw->dev, counter);
+
+	return rule;
+}
+
+static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
+{
+	struct mlx5_eswitch_rep *rep;
+	int vf_vport, err = 0;
+
+	esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
+	for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) {
+		rep = &esw->offloads.vport_reps[vf_vport];
+		if (!rep->valid)
+			continue;
+
+		err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
+		if (err)
+			goto out;
+	}
+
+out:
+	return err;
+}
+
+static struct mlx5_eswitch_rep *
+esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
+{
+	struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
+
+	in_rep  = attr->in_rep;
+	out_rep = attr->out_rep;
+
+	if (push)
+		vport = in_rep;
+	else if (pop)
+		vport = out_rep;
+	else
+		vport = in_rep;
+
+	return vport;
+}
+
+static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
+				     bool push, bool pop, bool fwd)
+{
+	struct mlx5_eswitch_rep *in_rep, *out_rep;
+
+	if ((push || pop) && !fwd)
+		goto out_notsupp;
+
+	in_rep  = attr->in_rep;
+	out_rep = attr->out_rep;
+
+	if (push && in_rep->vport == FDB_UPLINK_VPORT)
+		goto out_notsupp;
+
+	if (pop && out_rep->vport == FDB_UPLINK_VPORT)
+		goto out_notsupp;
+
+	/* vport has vlan push configured, can't offload VF --> wire rules w.o it */
+	if (!push && !pop && fwd)
+		if (in_rep->vlan && out_rep->vport == FDB_UPLINK_VPORT)
+			goto out_notsupp;
+
+	/* protects against (1) setting rules with different vlans to push and
+	 * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
+	 */
+	if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan))
+		goto out_notsupp;
+
+	return 0;
+
+out_notsupp:
+	return -ENOTSUPP;
+}
+
+int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
+				 struct mlx5_esw_flow_attr *attr)
+{
+	struct offloads_fdb *offloads = &esw->fdb_table.offloads;
+	struct mlx5_eswitch_rep *vport = NULL;
+	bool push, pop, fwd;
+	int err = 0;
+
+	push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
+	pop  = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
+	fwd  = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
+
+	err = esw_add_vlan_action_check(attr, push, pop, fwd);
+	if (err)
+		return err;
+
+	attr->vlan_handled = false;
+
+	vport = esw_vlan_action_get_vport(attr, push, pop);
+
+	if (!push && !pop && fwd) {
+		/* tracks VF --> wire rules without vlan push action */
+		if (attr->out_rep->vport == FDB_UPLINK_VPORT) {
+			vport->vlan_refcount++;
+			attr->vlan_handled = true;
+		}
+
+		return 0;
+	}
+
+	if (!push && !pop)
+		return 0;
+
+	if (!(offloads->vlan_push_pop_refcount)) {
+		/* it's the 1st vlan rule, apply global vlan pop policy */
+		err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
+		if (err)
+			goto out;
+	}
+	offloads->vlan_push_pop_refcount++;
+
+	if (push) {
+		if (vport->vlan_refcount)
+			goto skip_set_push;
+
+		err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan, 0,
+						    SET_VLAN_INSERT | SET_VLAN_STRIP);
+		if (err)
+			goto out;
+		vport->vlan = attr->vlan;
+skip_set_push:
+		vport->vlan_refcount++;
+	}
+out:
+	if (!err)
+		attr->vlan_handled = true;
+	return err;
+}
+
+int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
+				 struct mlx5_esw_flow_attr *attr)
+{
+	struct offloads_fdb *offloads = &esw->fdb_table.offloads;
+	struct mlx5_eswitch_rep *vport = NULL;
+	bool push, pop, fwd;
+	int err = 0;
+
+	if (!attr->vlan_handled)
+		return 0;
+
+	push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
+	pop  = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
+	fwd  = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
+
+	vport = esw_vlan_action_get_vport(attr, push, pop);
+
+	if (!push && !pop && fwd) {
+		/* tracks VF --> wire rules without vlan push action */
+		if (attr->out_rep->vport == FDB_UPLINK_VPORT)
+			vport->vlan_refcount--;
+
+		return 0;
+	}
+
+	if (push) {
+		vport->vlan_refcount--;
+		if (vport->vlan_refcount)
+			goto skip_unset_push;
+
+		vport->vlan = 0;
+		err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
+						    0, 0, SET_VLAN_STRIP);
+		if (err)
+			goto out;
+	}
+
+skip_unset_push:
+	offloads->vlan_push_pop_refcount--;
+	if (offloads->vlan_push_pop_refcount)
+		return 0;
+
+	/* no more vlan rules, stop global vlan pop policy */
+	err = esw_set_global_vlan_pop(esw, 0);
+
+out:
+	return err;
+}
+
+static struct mlx5_flow_handle *
+mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
+{
+	struct mlx5_flow_act flow_act = {0};
+	struct mlx5_flow_destination dest;
+	struct mlx5_flow_handle *flow_rule;
+	struct mlx5_flow_spec *spec;
+	void *misc;
+
+	spec = mlx5_vzalloc(sizeof(*spec));
+	if (!spec) {
+		esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n");
+		flow_rule = ERR_PTR(-ENOMEM);
+		goto out;
+	}
+
+	misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
+	MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
+	MLX5_SET(fte_match_set_misc, misc, source_port, 0x0); /* source vport is 0 */
+
+	misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
+	MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
+	MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
+
+	spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
+	dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+	dest.vport_num = vport;
+	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+
+	flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec,
+					&flow_act, &dest, 1);
+	if (IS_ERR(flow_rule))
+		esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
+out:
+	kvfree(spec);
+	return flow_rule;
+}
+
+void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw,
+				 struct mlx5_eswitch_rep *rep)
+{
+	struct mlx5_esw_sq *esw_sq, *tmp;
+
+	if (esw->mode != SRIOV_OFFLOADS)
+		return;
+
+	list_for_each_entry_safe(esw_sq, tmp, &rep->vport_sqs_list, list) {
+		mlx5_del_flow_rules(esw_sq->send_to_vport_rule);
+		list_del(&esw_sq->list);
+		kfree(esw_sq);
+	}
+}
+
+int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
+				 struct mlx5_eswitch_rep *rep,
+				 u16 *sqns_array, int sqns_num)
+{
+	struct mlx5_flow_handle *flow_rule;
+	struct mlx5_esw_sq *esw_sq;
+	int err;
+	int i;
+
+	if (esw->mode != SRIOV_OFFLOADS)
+		return 0;
+
+	for (i = 0; i < sqns_num; i++) {
+		esw_sq = kzalloc(sizeof(*esw_sq), GFP_KERNEL);
+		if (!esw_sq) {
+			err = -ENOMEM;
+			goto out_err;
+		}
+
+		/* Add re-inject rule to the PF/representor sqs */
+		flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw,
+								rep->vport,
+								sqns_array[i]);
+		if (IS_ERR(flow_rule)) {
+			err = PTR_ERR(flow_rule);
+			kfree(esw_sq);
+			goto out_err;
+		}
+		esw_sq->send_to_vport_rule = flow_rule;
+		list_add(&esw_sq->list, &rep->vport_sqs_list);
+	}
+	return 0;
+
+out_err:
+	mlx5_eswitch_sqs2vport_stop(esw, rep);
+	return err;
+}
+
+static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
+{
+	struct mlx5_flow_act flow_act = {0};
+	struct mlx5_flow_destination dest;
+	struct mlx5_flow_handle *flow_rule = NULL;
+	struct mlx5_flow_spec *spec;
+	int err = 0;
+
+	spec = mlx5_vzalloc(sizeof(*spec));
+	if (!spec) {
+		esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n");
+		err = -ENOMEM;
+		goto out;
+	}
+
+	dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+	dest.vport_num = 0;
+	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+
+	flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec,
+					&flow_act, &dest, 1);
+	if (IS_ERR(flow_rule)) {
+		err = PTR_ERR(flow_rule);
+		esw_warn(esw->dev,  "FDB: Failed to add miss flow rule err %d\n", err);
+		goto out;
+	}
+
+	esw->fdb_table.offloads.miss_rule = flow_rule;
+out:
+	kvfree(spec);
+	return err;
+}
+
+#define MAX_PF_SQ 256
+#define ESW_OFFLOADS_NUM_ENTRIES (1 << 13) /* 8K */
+#define ESW_OFFLOADS_NUM_GROUPS  4
+
+static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports)
+{
+	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+	struct mlx5_core_dev *dev = esw->dev;
+	struct mlx5_flow_namespace *root_ns;
+	struct mlx5_flow_table *fdb = NULL;
+	struct mlx5_flow_group *g;
+	u32 *flow_group_in;
+	void *match_criteria;
+	int table_size, ix, err = 0;
+	u32 flags = 0;
+
+	flow_group_in = mlx5_vzalloc(inlen);
+	if (!flow_group_in)
+		return -ENOMEM;
+
+	root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
+	if (!root_ns) {
+		esw_warn(dev, "Failed to get FDB flow namespace\n");
+		goto ns_err;
+	}
+
+	esw_debug(dev, "Create offloads FDB table, log_max_size(%d)\n",
+		  MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
+
+	if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) &&
+	    MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))
+		flags |= MLX5_FLOW_TABLE_TUNNEL_EN;
+
+	fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH,
+						  ESW_OFFLOADS_NUM_ENTRIES,
+						  ESW_OFFLOADS_NUM_GROUPS, 0,
+						  flags);
+	if (IS_ERR(fdb)) {
+		err = PTR_ERR(fdb);
+		esw_warn(dev, "Failed to create Fast path FDB Table err %d\n", err);
+		goto fast_fdb_err;
+	}
+	esw->fdb_table.fdb = fdb;
+
+	table_size = nvports + MAX_PF_SQ + 1;
+	fdb = mlx5_create_flow_table(root_ns, FDB_SLOW_PATH, table_size, 0, 0);
+	if (IS_ERR(fdb)) {
+		err = PTR_ERR(fdb);
+		esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
+		goto slow_fdb_err;
+	}
+	esw->fdb_table.offloads.fdb = fdb;
+
+	/* create send-to-vport group */
+	memset(flow_group_in, 0, inlen);
+	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+		 MLX5_MATCH_MISC_PARAMETERS);
+
+	match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
+
+	MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
+	MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
+
+	ix = nvports + MAX_PF_SQ;
+	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
+	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
+
+	g = mlx5_create_flow_group(fdb, flow_group_in);
+	if (IS_ERR(g)) {
+		err = PTR_ERR(g);
+		esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
+		goto send_vport_err;
+	}
+	esw->fdb_table.offloads.send_to_vport_grp = g;
+
+	/* create miss group */
+	memset(flow_group_in, 0, inlen);
+	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 0);
+
+	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
+	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + 1);
+
+	g = mlx5_create_flow_group(fdb, flow_group_in);
+	if (IS_ERR(g)) {
+		err = PTR_ERR(g);
+		esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
+		goto miss_err;
+	}
+	esw->fdb_table.offloads.miss_grp = g;
+
+	err = esw_add_fdb_miss_rule(esw);
+	if (err)
+		goto miss_rule_err;
+
+	return 0;
+
+miss_rule_err:
+	mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
+miss_err:
+	mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
+send_vport_err:
+	mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb);
+slow_fdb_err:
+	mlx5_destroy_flow_table(esw->fdb_table.fdb);
+fast_fdb_err:
+ns_err:
+	kvfree(flow_group_in);
+	return err;
+}
+
+static void esw_destroy_offloads_fdb_table(struct mlx5_eswitch *esw)
+{
+	if (!esw->fdb_table.fdb)
+		return;
+
+	esw_debug(esw->dev, "Destroy offloads FDB Table\n");
+	mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule);
+	mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
+	mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
+
+	mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb);
+	mlx5_destroy_flow_table(esw->fdb_table.fdb);
+}
+
+static int esw_create_offloads_table(struct mlx5_eswitch *esw)
+{
+	struct mlx5_flow_namespace *ns;
+	struct mlx5_flow_table *ft_offloads;
+	struct mlx5_core_dev *dev = esw->dev;
+	int err = 0;
+
+	ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
+	if (!ns) {
+		esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
+		return -ENOMEM;
+	}
+
+	ft_offloads = mlx5_create_flow_table(ns, 0, dev->priv.sriov.num_vfs + 2, 0, 0);
+	if (IS_ERR(ft_offloads)) {
+		err = PTR_ERR(ft_offloads);
+		esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
+		return err;
+	}
+
+	esw->offloads.ft_offloads = ft_offloads;
+	return 0;
+}
+
+static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
+{
+	struct mlx5_esw_offload *offloads = &esw->offloads;
+
+	mlx5_destroy_flow_table(offloads->ft_offloads);
+}
+
+static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
+{
+	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+	struct mlx5_flow_group *g;
+	struct mlx5_priv *priv = &esw->dev->priv;
+	u32 *flow_group_in;
+	void *match_criteria, *misc;
+	int err = 0;
+	int nvports = priv->sriov.num_vfs + 2;
+
+	flow_group_in = mlx5_vzalloc(inlen);
+	if (!flow_group_in)
+		return -ENOMEM;
+
+	/* create vport rx group */
+	memset(flow_group_in, 0, inlen);
+	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+		 MLX5_MATCH_MISC_PARAMETERS);
+
+	match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
+	misc = MLX5_ADDR_OF(fte_match_param, match_criteria, misc_parameters);
+	MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
+
+	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
+	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
+
+	g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
+
+	if (IS_ERR(g)) {
+		err = PTR_ERR(g);
+		mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
+		goto out;
+	}
+
+	esw->offloads.vport_rx_group = g;
+out:
+	kfree(flow_group_in);
+	return err;
+}
+
+static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
+{
+	mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
+}
+
+struct mlx5_flow_handle *
+mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn)
+{
+	struct mlx5_flow_act flow_act = {0};
+	struct mlx5_flow_destination dest;
+	struct mlx5_flow_handle *flow_rule;
+	struct mlx5_flow_spec *spec;
+	void *misc;
+
+	spec = mlx5_vzalloc(sizeof(*spec));
+	if (!spec) {
+		esw_warn(esw->dev, "Failed to alloc match parameters\n");
+		flow_rule = ERR_PTR(-ENOMEM);
+		goto out;
+	}
+
+	misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
+	MLX5_SET(fte_match_set_misc, misc, source_port, vport);
+
+	misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
+	MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
+
+	spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
+	dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+	dest.tir_num = tirn;
+
+	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+	flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
+				       &flow_act, &dest, 1);
+	if (IS_ERR(flow_rule)) {
+		esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
+		goto out;
+	}
+
+out:
+	kvfree(spec);
+	return flow_rule;
+}
+
+static int esw_offloads_start(struct mlx5_eswitch *esw)
+{
+	int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
+
+	if (esw->mode != SRIOV_LEGACY) {
+		esw_warn(esw->dev, "Can't set offloads mode, SRIOV legacy not enabled\n");
+		return -EINVAL;
+	}
+
+	mlx5_eswitch_disable_sriov(esw);
+	err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
+	if (err) {
+		esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err);
+		err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
+		if (err1)
+			esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err);
+	}
+	if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
+		if (mlx5_eswitch_inline_mode_get(esw,
+						 num_vfs,
+						 &esw->offloads.inline_mode)) {
+			esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
+			esw_warn(esw->dev, "Inline mode is different between vports\n");
+		}
+	}
+	return err;
+}
+
+int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
+{
+	struct mlx5_eswitch_rep *rep;
+	int vport;
+	int err;
+
+	err = esw_create_offloads_fdb_table(esw, nvports);
+	if (err)
+		return err;
+
+	err = esw_create_offloads_table(esw);
+	if (err)
+		goto create_ft_err;
+
+	err = esw_create_vport_rx_group(esw);
+	if (err)
+		goto create_fg_err;
+
+	for (vport = 0; vport < nvports; vport++) {
+		rep = &esw->offloads.vport_reps[vport];
+		if (!rep->valid)
+			continue;
+
+		err = rep->load(esw, rep);
+		if (err)
+			goto err_reps;
+	}
+	return 0;
+
+err_reps:
+	for (vport--; vport >= 0; vport--) {
+		rep = &esw->offloads.vport_reps[vport];
+		if (!rep->valid)
+			continue;
+		rep->unload(esw, rep);
+	}
+	esw_destroy_vport_rx_group(esw);
+
+create_fg_err:
+	esw_destroy_offloads_table(esw);
+
+create_ft_err:
+	esw_destroy_offloads_fdb_table(esw);
+	return err;
+}
+
+static int esw_offloads_stop(struct mlx5_eswitch *esw)
+{
+	int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
+
+	mlx5_eswitch_disable_sriov(esw);
+	err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
+	if (err) {
+		esw_warn(esw->dev, "Failed setting eswitch to legacy, err %d\n", err);
+		err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
+		if (err1)
+			esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err);
+	}
+
+	return err;
+}
+
+void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
+{
+	struct mlx5_eswitch_rep *rep;
+	int vport;
+
+	for (vport = 0; vport < nvports; vport++) {
+		rep = &esw->offloads.vport_reps[vport];
+		if (!rep->valid)
+			continue;
+		rep->unload(esw, rep);
+	}
+
+	esw_destroy_vport_rx_group(esw);
+	esw_destroy_offloads_table(esw);
+	esw_destroy_offloads_fdb_table(esw);
+}
+
+static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
+{
+	switch (mode) {
+	case DEVLINK_ESWITCH_MODE_LEGACY:
+		*mlx5_mode = SRIOV_LEGACY;
+		break;
+	case DEVLINK_ESWITCH_MODE_SWITCHDEV:
+		*mlx5_mode = SRIOV_OFFLOADS;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
+{
+	switch (mlx5_mode) {
+	case SRIOV_LEGACY:
+		*mode = DEVLINK_ESWITCH_MODE_LEGACY;
+		break;
+	case SRIOV_OFFLOADS:
+		*mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
+{
+	switch (mode) {
+	case DEVLINK_ESWITCH_INLINE_MODE_NONE:
+		*mlx5_mode = MLX5_INLINE_MODE_NONE;
+		break;
+	case DEVLINK_ESWITCH_INLINE_MODE_LINK:
+		*mlx5_mode = MLX5_INLINE_MODE_L2;
+		break;
+	case DEVLINK_ESWITCH_INLINE_MODE_NETWORK:
+		*mlx5_mode = MLX5_INLINE_MODE_IP;
+		break;
+	case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT:
+		*mlx5_mode = MLX5_INLINE_MODE_TCP_UDP;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
+{
+	switch (mlx5_mode) {
+	case MLX5_INLINE_MODE_NONE:
+		*mode = DEVLINK_ESWITCH_INLINE_MODE_NONE;
+		break;
+	case MLX5_INLINE_MODE_L2:
+		*mode = DEVLINK_ESWITCH_INLINE_MODE_LINK;
+		break;
+	case MLX5_INLINE_MODE_IP:
+		*mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK;
+		break;
+	case MLX5_INLINE_MODE_TCP_UDP:
+		*mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
+{
+	struct mlx5_core_dev *dev;
+	u16 cur_mlx5_mode, mlx5_mode = 0;
+
+	dev = devlink_priv(devlink);
+
+	if (!MLX5_CAP_GEN(dev, vport_group_manager))
+		return -EOPNOTSUPP;
+
+	cur_mlx5_mode = dev->priv.eswitch->mode;
+
+	if (cur_mlx5_mode == SRIOV_NONE)
+		return -EOPNOTSUPP;
+
+	if (esw_mode_from_devlink(mode, &mlx5_mode))
+		return -EINVAL;
+
+	if (cur_mlx5_mode == mlx5_mode)
+		return 0;
+
+	if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
+		return esw_offloads_start(dev->priv.eswitch);
+	else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
+		return esw_offloads_stop(dev->priv.eswitch);
+	else
+		return -EINVAL;
+}
+
+int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
+{
+	struct mlx5_core_dev *dev;
+
+	dev = devlink_priv(devlink);
+
+	if (!MLX5_CAP_GEN(dev, vport_group_manager))
+		return -EOPNOTSUPP;
+
+	if (dev->priv.eswitch->mode == SRIOV_NONE)
+		return -EOPNOTSUPP;
+
+	return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
+}
+
+int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
+{
+	struct mlx5_core_dev *dev = devlink_priv(devlink);
+	struct mlx5_eswitch *esw = dev->priv.eswitch;
+	int num_vports = esw->enabled_vports;
+	int err;
+	int vport;
+	u8 mlx5_mode;
+
+	if (!MLX5_CAP_GEN(dev, vport_group_manager))
+		return -EOPNOTSUPP;
+
+	if (esw->mode == SRIOV_NONE)
+		return -EOPNOTSUPP;
+
+	if (MLX5_CAP_ETH(dev, wqe_inline_mode) !=
+	    MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
+		return -EOPNOTSUPP;
+
+	err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
+	if (err)
+		goto out;
+
+	for (vport = 1; vport < num_vports; vport++) {
+		err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
+		if (err) {
+			esw_warn(dev, "Failed to set min inline on vport %d\n",
+				 vport);
+			goto revert_inline_mode;
+		}
+	}
+
+	esw->offloads.inline_mode = mlx5_mode;
+	return 0;
+
+revert_inline_mode:
+	while (--vport > 0)
+		mlx5_modify_nic_vport_min_inline(dev,
+						 vport,
+						 esw->offloads.inline_mode);
+out:
+	return err;
+}
+
+int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
+{
+	struct mlx5_core_dev *dev = devlink_priv(devlink);
+	struct mlx5_eswitch *esw = dev->priv.eswitch;
+
+	if (!MLX5_CAP_GEN(dev, vport_group_manager))
+		return -EOPNOTSUPP;
+
+	if (esw->mode == SRIOV_NONE)
+		return -EOPNOTSUPP;
+
+	if (MLX5_CAP_ETH(dev, wqe_inline_mode) !=
+	    MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
+		return -EOPNOTSUPP;
+
+	return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
+}
+
+int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
+{
+	struct mlx5_core_dev *dev = esw->dev;
+	int vport;
+	u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
+
+	if (!MLX5_CAP_GEN(dev, vport_group_manager))
+		return -EOPNOTSUPP;
+
+	if (esw->mode == SRIOV_NONE)
+		return -EOPNOTSUPP;
+
+	if (MLX5_CAP_ETH(dev, wqe_inline_mode) !=
+	    MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
+		return -EOPNOTSUPP;
+
+	for (vport = 1; vport <= nvfs; vport++) {
+		mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
+		if (vport > 1 && prev_mlx5_mode != mlx5_mode)
+			return -EINVAL;
+		prev_mlx5_mode = mlx5_mode;
+	}
+
+	*mode = mlx5_mode;
+	return 0;
+}
+
+void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
+				     int vport_index,
+				     struct mlx5_eswitch_rep *__rep)
+{
+	struct mlx5_esw_offload *offloads = &esw->offloads;
+	struct mlx5_eswitch_rep *rep;
+
+	rep = &offloads->vport_reps[vport_index];
+
+	memset(rep, 0, sizeof(*rep));
+
+	rep->load   = __rep->load;
+	rep->unload = __rep->unload;
+	rep->vport  = __rep->vport;
+	rep->netdev = __rep->netdev;
+	ether_addr_copy(rep->hw_id, __rep->hw_id);
+
+	INIT_LIST_HEAD(&rep->vport_sqs_list);
+	rep->valid = true;
+}
+
+void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
+				       int vport_index)
+{
+	struct mlx5_esw_offload *offloads = &esw->offloads;
+	struct mlx5_eswitch_rep *rep;
+
+	rep = &offloads->vport_reps[vport_index];
+
+	if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport_index].enabled)
+		rep->unload(esw, rep);
+
+	rep->valid = false;
+}
+
+struct net_device *mlx5_eswitch_get_uplink_netdev(struct mlx5_eswitch *esw)
+{
+#define UPLINK_REP_INDEX 0
+	struct mlx5_esw_offload *offloads = &esw->offloads;
+	struct mlx5_eswitch_rep *rep;
+
+	rep = &offloads->vport_reps[UPLINK_REP_INDEX];
+	return rep->netdev;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/sriov/sriov.c b/drivers/net/ethernet/mellanox/mlx5/sriov/sriov.c
new file mode 100644
index 0000000..1d8c097
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/sriov/sriov.c
@@ -0,0 +1,236 @@
+/*
+ * Copyright (c) 2014, Mellanox Technologies inc.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/pci.h>
+#include "lib/driver.h"
+#include "mlx5.h"
+#ifdef CONFIG_MLX5_CORE_EN
+#include "sriov/eswitch.h"
+#endif
+
+bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev)
+{
+	struct mlx5_core_sriov *sriov = &dev->priv.sriov;
+
+	return !!sriov->num_vfs;
+}
+
+static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs)
+{
+	struct mlx5_core_sriov *sriov = &dev->priv.sriov;
+	int err;
+	int vf;
+
+	if (sriov->enabled_vfs) {
+		mlx5_core_warn(dev,
+			       "failed to enable SRIOV on device, already enabled with %d vfs\n",
+			       sriov->enabled_vfs);
+		return -EBUSY;
+	}
+
+#ifdef CONFIG_MLX5_CORE_EN
+	err = mlx5_eswitch_enable_sriov(dev->priv.eswitch, num_vfs, SRIOV_LEGACY);
+	if (err) {
+		mlx5_core_warn(dev,
+			       "failed to enable eswitch SRIOV (%d)\n", err);
+		return err;
+	}
+#endif
+
+	for (vf = 0; vf < num_vfs; vf++) {
+		err = mlx5_core_enable_hca(dev, vf + 1);
+		if (err) {
+			mlx5_core_warn(dev, "failed to enable VF %d (%d)\n", vf, err);
+			continue;
+		}
+		sriov->vfs_ctx[vf].enabled = 1;
+		sriov->enabled_vfs++;
+		mlx5_core_dbg(dev, "successfully enabled VF* %d\n", vf);
+
+	}
+
+	return 0;
+}
+
+static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev)
+{
+	struct mlx5_core_sriov *sriov = &dev->priv.sriov;
+	int err;
+	int vf;
+
+	if (!sriov->enabled_vfs)
+		return;
+
+	for (vf = 0; vf < sriov->num_vfs; vf++) {
+		if (!sriov->vfs_ctx[vf].enabled)
+			continue;
+		err = mlx5_core_disable_hca(dev, vf + 1);
+		if (err) {
+			mlx5_core_warn(dev, "failed to disable VF %d\n", vf);
+			continue;
+		}
+		sriov->vfs_ctx[vf].enabled = 0;
+		sriov->enabled_vfs--;
+	}
+
+#ifdef CONFIG_MLX5_CORE_EN
+	mlx5_eswitch_disable_sriov(dev->priv.eswitch);
+#endif
+
+	if (mlx5_wait_for_vf_pages(dev))
+		mlx5_core_warn(dev, "timeout reclaiming VFs pages\n");
+}
+
+static int mlx5_pci_enable_sriov(struct pci_dev *pdev, int num_vfs)
+{
+	struct mlx5_core_dev *dev  = pci_get_drvdata(pdev);
+	int err = 0;
+
+	if (pci_num_vf(pdev)) {
+		mlx5_core_warn(dev, "Unable to enable pci sriov, already enabled\n");
+		return -EBUSY;
+	}
+
+	err = pci_enable_sriov(pdev, num_vfs);
+	if (err)
+		mlx5_core_warn(dev, "pci_enable_sriov failed : %d\n", err);
+
+	return err;
+}
+
+static void mlx5_pci_disable_sriov(struct pci_dev *pdev)
+{
+	pci_disable_sriov(pdev);
+}
+
+static int mlx5_sriov_enable(struct pci_dev *pdev, int num_vfs)
+{
+	struct mlx5_core_dev *dev  = pci_get_drvdata(pdev);
+	struct mlx5_core_sriov *sriov = &dev->priv.sriov;
+	int err = 0;
+
+	err = mlx5_device_enable_sriov(dev, num_vfs);
+	if (err) {
+		mlx5_core_warn(dev, "mlx5_device_enable_sriov failed : %d\n", err);
+		return err;
+	}
+
+	err = mlx5_pci_enable_sriov(pdev, num_vfs);
+	if (err) {
+		mlx5_core_warn(dev, "mlx5_pci_enable_sriov failed : %d\n", err);
+		mlx5_device_disable_sriov(dev);
+		return err;
+	}
+
+	sriov->num_vfs = num_vfs;
+
+	return 0;
+}
+
+static void mlx5_sriov_disable(struct pci_dev *pdev)
+{
+	struct mlx5_core_dev *dev  = pci_get_drvdata(pdev);
+	struct mlx5_core_sriov *sriov = &dev->priv.sriov;
+
+	mlx5_pci_disable_sriov(pdev);
+	mlx5_device_disable_sriov(dev);
+	sriov->num_vfs = 0;
+}
+
+int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+	struct mlx5_core_dev *dev  = pci_get_drvdata(pdev);
+	int err = 0;
+
+	mlx5_core_dbg(dev, "requested num_vfs %d\n", num_vfs);
+	if (!mlx5_core_is_pf(dev))
+		return -EPERM;
+
+	if (num_vfs && mlx5_lag_is_active(dev)) {
+		mlx5_core_warn(dev, "can't turn sriov on while LAG is active");
+		return -EINVAL;
+	}
+
+	if (num_vfs)
+		err = mlx5_sriov_enable(pdev, num_vfs);
+	else
+		mlx5_sriov_disable(pdev);
+
+	return err ? err : num_vfs;
+}
+
+int mlx5_sriov_attach(struct mlx5_core_dev *dev)
+{
+	struct mlx5_core_sriov *sriov = &dev->priv.sriov;
+
+	if (!mlx5_core_is_pf(dev) || !sriov->num_vfs)
+		return 0;
+
+	/* If sriov VFs exist in PCI level, enable them in device level */
+	return mlx5_device_enable_sriov(dev, sriov->num_vfs);
+}
+
+void mlx5_sriov_detach(struct mlx5_core_dev *dev)
+{
+	if (!mlx5_core_is_pf(dev))
+		return;
+
+	mlx5_device_disable_sriov(dev);
+}
+
+int mlx5_sriov_init(struct mlx5_core_dev *dev)
+{
+	struct mlx5_core_sriov *sriov = &dev->priv.sriov;
+	struct pci_dev *pdev = dev->pdev;
+	int total_vfs;
+
+	if (!mlx5_core_is_pf(dev))
+		return 0;
+
+	total_vfs = pci_sriov_get_totalvfs(pdev);
+	sriov->num_vfs = pci_num_vf(pdev);
+	sriov->vfs_ctx = kcalloc(total_vfs, sizeof(*sriov->vfs_ctx), GFP_KERNEL);
+	if (!sriov->vfs_ctx)
+		return -ENOMEM;
+
+	return 0;
+}
+
+void mlx5_sriov_cleanup(struct mlx5_core_dev *dev)
+{
+	struct mlx5_core_sriov *sriov = &dev->priv.sriov;
+
+	if (!mlx5_core_is_pf(dev))
+		return;
+
+	kfree(sriov->vfs_ctx);
+}
diff --git a/include/linux/mlx5/cmd.h b/include/linux/mlx5/cmd.h
deleted file mode 100644
index 68cd08f..0000000
--- a/include/linux/mlx5/cmd.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef MLX5_CMD_H
-#define MLX5_CMD_H
-
-#include <linux/types.h>
-
-struct manage_pages_layout {
-	u64	ptr;
-	u32	reserved;
-	u16	num_entries;
-	u16	func_id;
-};
-
-
-struct mlx5_cmd_alloc_uar_imm_out {
-	u32	rsvd[3];
-	u32	uarn;
-};
-
-#endif /* MLX5_CMD_H */
diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h
deleted file mode 100644
index 9589884..0000000
--- a/include/linux/mlx5/cq.h
+++ /dev/null
@@ -1,184 +0,0 @@
-/*
- * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef MLX5_CORE_CQ_H
-#define MLX5_CORE_CQ_H
-
-#include <rdma/ib_verbs.h>
-#include <linux/mlx5/driver.h>
-
-
-struct mlx5_core_cq {
-	u32			cqn;
-	int			cqe_sz;
-	__be32		       *set_ci_db;
-	__be32		       *arm_db;
-	struct mlx5_uars_page  *uar;
-	atomic_t		refcount;
-	struct completion	free;
-	unsigned		vector;
-	unsigned int		irqn;
-	void (*comp)		(struct mlx5_core_cq *);
-	void (*event)		(struct mlx5_core_cq *, enum mlx5_event);
-	u32			cons_index;
-	unsigned		arm_sn;
-	struct mlx5_rsc_debug	*dbg;
-	int			pid;
-	struct {
-		struct list_head list;
-		void (*comp)(struct mlx5_core_cq *);
-		void		*priv;
-	} tasklet_ctx;
-	int			reset_notify_added;
-	struct list_head	reset_notify;
-};
-
-
-enum {
-	MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR		= 0x01,
-	MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR		= 0x02,
-	MLX5_CQE_SYNDROME_LOCAL_PROT_ERR		= 0x04,
-	MLX5_CQE_SYNDROME_WR_FLUSH_ERR			= 0x05,
-	MLX5_CQE_SYNDROME_MW_BIND_ERR			= 0x06,
-	MLX5_CQE_SYNDROME_BAD_RESP_ERR			= 0x10,
-	MLX5_CQE_SYNDROME_LOCAL_ACCESS_ERR		= 0x11,
-	MLX5_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR		= 0x12,
-	MLX5_CQE_SYNDROME_REMOTE_ACCESS_ERR		= 0x13,
-	MLX5_CQE_SYNDROME_REMOTE_OP_ERR			= 0x14,
-	MLX5_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR	= 0x15,
-	MLX5_CQE_SYNDROME_RNR_RETRY_EXC_ERR		= 0x16,
-	MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR		= 0x22,
-};
-
-enum {
-	MLX5_CQE_OWNER_MASK	= 1,
-	MLX5_CQE_REQ		= 0,
-	MLX5_CQE_RESP_WR_IMM	= 1,
-	MLX5_CQE_RESP_SEND	= 2,
-	MLX5_CQE_RESP_SEND_IMM	= 3,
-	MLX5_CQE_RESP_SEND_INV	= 4,
-	MLX5_CQE_RESIZE_CQ	= 5,
-	MLX5_CQE_SIG_ERR	= 12,
-	MLX5_CQE_REQ_ERR	= 13,
-	MLX5_CQE_RESP_ERR	= 14,
-	MLX5_CQE_INVALID	= 15,
-};
-
-enum {
-	MLX5_CQ_MODIFY_PERIOD	= 1 << 0,
-	MLX5_CQ_MODIFY_COUNT	= 1 << 1,
-	MLX5_CQ_MODIFY_OVERRUN	= 1 << 2,
-};
-
-enum {
-	MLX5_CQ_OPMOD_RESIZE		= 1,
-	MLX5_MODIFY_CQ_MASK_LOG_SIZE	= 1 << 0,
-	MLX5_MODIFY_CQ_MASK_PG_OFFSET	= 1 << 1,
-	MLX5_MODIFY_CQ_MASK_PG_SIZE	= 1 << 2,
-};
-
-struct mlx5_cq_modify_params {
-	int	type;
-	union {
-		struct {
-			u32	page_offset;
-			u8	log_cq_size;
-		} resize;
-
-		struct {
-		} moder;
-
-		struct {
-		} mapping;
-	} params;
-};
-
-enum {
-	CQE_SIZE_64 = 0,
-	CQE_SIZE_128 = 1,
-};
-
-static inline int cqe_sz_to_mlx_sz(u8 size)
-{
-	return size == 64 ? CQE_SIZE_64 : CQE_SIZE_128;
-}
-
-static inline void mlx5_cq_set_ci(struct mlx5_core_cq *cq)
-{
-	*cq->set_ci_db = cpu_to_be32(cq->cons_index & 0xffffff);
-}
-
-enum {
-	MLX5_CQ_DB_REQ_NOT_SOL		= 1 << 24,
-	MLX5_CQ_DB_REQ_NOT		= 0 << 24
-};
-
-static inline void mlx5_cq_arm(struct mlx5_core_cq *cq, u32 cmd,
-			       void __iomem *uar_page,
-			       u32 cons_index)
-{
-	__be32 doorbell[2];
-	u32 sn;
-	u32 ci;
-
-	sn = cq->arm_sn & 3;
-	ci = cons_index & 0xffffff;
-
-	*cq->arm_db = cpu_to_be32(sn << 28 | cmd | ci);
-
-	/* Make sure that the doorbell record in host memory is
-	 * written before ringing the doorbell via PCI MMIO.
-	 */
-	wmb();
-
-	doorbell[0] = cpu_to_be32(sn << 28 | cmd | ci);
-	doorbell[1] = cpu_to_be32(cq->cqn);
-
-	mlx5_write64(doorbell, uar_page + MLX5_CQ_DOORBELL, NULL);
-}
-
-int mlx5_init_cq_table(struct mlx5_core_dev *dev);
-void mlx5_cleanup_cq_table(struct mlx5_core_dev *dev);
-int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
-			u32 *in, int inlen);
-int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
-int mlx5_core_query_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
-		       u32 *out, int outlen);
-int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
-			u32 *in, int inlen);
-int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
-				   struct mlx5_core_cq *cq, u16 cq_period,
-				   u16 cq_max_count);
-int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
-void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq);
-
-#endif /* MLX5_CORE_CQ_H */
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
deleted file mode 100644
index dd345e8..0000000
--- a/include/linux/mlx5/device.h
+++ /dev/null
@@ -1,1100 +0,0 @@
-/*
- * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef MLX5_DEVICE_H
-#define MLX5_DEVICE_H
-
-#include <linux/types.h>
-#include <rdma/ib_verbs.h>
-#include <linux/mlx5/mlx5_ifc.h>
-
-#if defined(__LITTLE_ENDIAN)
-#define MLX5_SET_HOST_ENDIANNESS	0
-#elif defined(__BIG_ENDIAN)
-#define MLX5_SET_HOST_ENDIANNESS	0x80
-#else
-#error Host endianness not defined
-#endif
-
-/* helper macros */
-#define __mlx5_nullp(typ) ((struct mlx5_ifc_##typ##_bits *)0)
-#define __mlx5_bit_sz(typ, fld) sizeof(__mlx5_nullp(typ)->fld)
-#define __mlx5_bit_off(typ, fld) ((unsigned)(unsigned long)(&(__mlx5_nullp(typ)->fld)))
-#define __mlx5_dw_off(typ, fld) (__mlx5_bit_off(typ, fld) / 32)
-#define __mlx5_64_off(typ, fld) (__mlx5_bit_off(typ, fld) / 64)
-#define __mlx5_dw_bit_off(typ, fld) (32 - __mlx5_bit_sz(typ, fld) - (__mlx5_bit_off(typ, fld) & 0x1f))
-#define __mlx5_mask(typ, fld) ((u32)((1ull << __mlx5_bit_sz(typ, fld)) - 1))
-#define __mlx5_dw_mask(typ, fld) (__mlx5_mask(typ, fld) << __mlx5_dw_bit_off(typ, fld))
-#define __mlx5_st_sz_bits(typ) sizeof(struct mlx5_ifc_##typ##_bits)
-
-#define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8)
-#define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8)
-#define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32)
-#define MLX5_ST_SZ_QW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 64)
-#define MLX5_UN_SZ_BYTES(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 8)
-#define MLX5_UN_SZ_DW(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 32)
-#define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8)
-#define MLX5_ADDR_OF(typ, p, fld) ((char *)(p) + MLX5_BYTE_OFF(typ, fld))
-
-/* insert a value to a struct */
-#define MLX5_SET(typ, p, fld, v) do { \
-	BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32);             \
-	*((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
-	cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \
-		     (~__mlx5_dw_mask(typ, fld))) | (((v) & __mlx5_mask(typ, fld)) \
-		     << __mlx5_dw_bit_off(typ, fld))); \
-} while (0)
-
-#define MLX5_SET_TO_ONES(typ, p, fld) do { \
-	BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32);             \
-	*((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
-	cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \
-		     (~__mlx5_dw_mask(typ, fld))) | ((__mlx5_mask(typ, fld)) \
-		     << __mlx5_dw_bit_off(typ, fld))); \
-} while (0)
-
-#define MLX5_GET(typ, p, fld) ((be32_to_cpu(*((__be32 *)(p) +\
-__mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \
-__mlx5_mask(typ, fld))
-
-#define MLX5_GET_PR(typ, p, fld) ({ \
-	u32 ___t = MLX5_GET(typ, p, fld); \
-	pr_debug(#fld " = 0x%x\n", ___t); \
-	___t; \
-})
-
-#define __MLX5_SET64(typ, p, fld, v) do { \
-	BUILD_BUG_ON(__mlx5_bit_sz(typ, fld) != 64); \
-	*((__be64 *)(p) + __mlx5_64_off(typ, fld)) = cpu_to_be64(v); \
-} while (0)
-
-#define MLX5_SET64(typ, p, fld, v) do { \
-	BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 64); \
-	__MLX5_SET64(typ, p, fld, v); \
-} while (0)
-
-#define MLX5_ARRAY_SET64(typ, p, fld, idx, v) do { \
-	BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 64); \
-	__MLX5_SET64(typ, p, fld[idx], v); \
-} while (0)
-
-#define MLX5_GET64(typ, p, fld) be64_to_cpu(*((__be64 *)(p) + __mlx5_64_off(typ, fld)))
-
-#define MLX5_GET64_PR(typ, p, fld) ({ \
-	u64 ___t = MLX5_GET64(typ, p, fld); \
-	pr_debug(#fld " = 0x%llx\n", ___t); \
-	___t; \
-})
-
-/* Big endian getters */
-#define MLX5_GET64_BE(typ, p, fld) (*((__be64 *)(p) +\
-	__mlx5_64_off(typ, fld)))
-
-#define MLX5_GET_BE(type_t, typ, p, fld) ({				  \
-		type_t tmp;						  \
-		switch (sizeof(tmp)) {					  \
-		case sizeof(u8):					  \
-			tmp = (__force type_t)MLX5_GET(typ, p, fld);	  \
-			break;						  \
-		case sizeof(u16):					  \
-			tmp = (__force type_t)cpu_to_be16(MLX5_GET(typ, p, fld)); \
-			break;						  \
-		case sizeof(u32):					  \
-			tmp = (__force type_t)cpu_to_be32(MLX5_GET(typ, p, fld)); \
-			break;						  \
-		case sizeof(u64):					  \
-			tmp = (__force type_t)MLX5_GET64_BE(typ, p, fld); \
-			break;						  \
-			}						  \
-		tmp;							  \
-		})
-
-enum mlx5_inline_modes {
-	MLX5_INLINE_MODE_NONE,
-	MLX5_INLINE_MODE_L2,
-	MLX5_INLINE_MODE_IP,
-	MLX5_INLINE_MODE_TCP_UDP,
-};
-
-enum {
-	MLX5_MAX_COMMANDS		= 32,
-	MLX5_CMD_DATA_BLOCK_SIZE	= 512,
-	MLX5_PCI_CMD_XPORT		= 7,
-	MLX5_MKEY_BSF_OCTO_SIZE		= 4,
-	MLX5_MAX_PSVS			= 4,
-};
-
-enum {
-	MLX5_EXTENDED_UD_AV		= 0x80000000,
-};
-
-enum {
-	MLX5_CQ_STATE_ARMED		= 9,
-	MLX5_CQ_STATE_ALWAYS_ARMED	= 0xb,
-	MLX5_CQ_STATE_FIRED		= 0xa,
-};
-
-enum {
-	MLX5_STAT_RATE_OFFSET	= 5,
-};
-
-enum {
-	MLX5_INLINE_SEG = 0x80000000,
-};
-
-enum {
-	MLX5_HW_START_PADDING = MLX5_INLINE_SEG,
-};
-
-enum {
-	MLX5_MIN_PKEY_TABLE_SIZE = 128,
-	MLX5_MAX_LOG_PKEY_TABLE  = 5,
-};
-
-enum {
-	MLX5_MKEY_INBOX_PG_ACCESS = 1 << 31
-};
-
-enum {
-	MLX5_PFAULT_SUBTYPE_WQE = 0,
-	MLX5_PFAULT_SUBTYPE_RDMA = 1,
-};
-
-enum {
-	MLX5_PERM_LOCAL_READ	= 1 << 2,
-	MLX5_PERM_LOCAL_WRITE	= 1 << 3,
-	MLX5_PERM_REMOTE_READ	= 1 << 4,
-	MLX5_PERM_REMOTE_WRITE	= 1 << 5,
-	MLX5_PERM_ATOMIC	= 1 << 6,
-	MLX5_PERM_UMR_EN	= 1 << 7,
-};
-
-enum {
-	MLX5_PCIE_CTRL_SMALL_FENCE	= 1 << 0,
-	MLX5_PCIE_CTRL_RELAXED_ORDERING	= 1 << 2,
-	MLX5_PCIE_CTRL_NO_SNOOP		= 1 << 3,
-	MLX5_PCIE_CTRL_TLP_PROCE_EN	= 1 << 6,
-	MLX5_PCIE_CTRL_TPH_MASK		= 3 << 4,
-};
-
-enum {
-	MLX5_EN_RD	= (u64)1,
-	MLX5_EN_WR	= (u64)2
-};
-
-enum {
-	MLX5_ADAPTER_PAGE_SHIFT		= 12,
-	MLX5_ADAPTER_PAGE_SIZE		= 1 << MLX5_ADAPTER_PAGE_SHIFT,
-};
-
-enum {
-	MLX5_BFREGS_PER_UAR		= 4,
-	MLX5_MAX_UARS			= 1 << 8,
-	MLX5_NON_FP_BFREGS_PER_UAR	= 2,
-	MLX5_FP_BFREGS_PER_UAR		= MLX5_BFREGS_PER_UAR -
-					  MLX5_NON_FP_BFREGS_PER_UAR,
-	MLX5_MAX_BFREGS			= MLX5_MAX_UARS *
-					  MLX5_NON_FP_BFREGS_PER_UAR,
-	MLX5_UARS_IN_PAGE		= PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
-	MLX5_NON_FP_BFREGS_IN_PAGE	= MLX5_NON_FP_BFREGS_PER_UAR * MLX5_UARS_IN_PAGE,
-};
-
-enum {
-	MLX5_MKEY_MASK_LEN		= 1ull << 0,
-	MLX5_MKEY_MASK_PAGE_SIZE	= 1ull << 1,
-	MLX5_MKEY_MASK_START_ADDR	= 1ull << 6,
-	MLX5_MKEY_MASK_PD		= 1ull << 7,
-	MLX5_MKEY_MASK_EN_RINVAL	= 1ull << 8,
-	MLX5_MKEY_MASK_EN_SIGERR	= 1ull << 9,
-	MLX5_MKEY_MASK_BSF_EN		= 1ull << 12,
-	MLX5_MKEY_MASK_KEY		= 1ull << 13,
-	MLX5_MKEY_MASK_QPN		= 1ull << 14,
-	MLX5_MKEY_MASK_LR		= 1ull << 17,
-	MLX5_MKEY_MASK_LW		= 1ull << 18,
-	MLX5_MKEY_MASK_RR		= 1ull << 19,
-	MLX5_MKEY_MASK_RW		= 1ull << 20,
-	MLX5_MKEY_MASK_A		= 1ull << 21,
-	MLX5_MKEY_MASK_SMALL_FENCE	= 1ull << 23,
-	MLX5_MKEY_MASK_FREE		= 1ull << 29,
-};
-
-enum {
-	MLX5_UMR_TRANSLATION_OFFSET_EN	= (1 << 4),
-
-	MLX5_UMR_CHECK_NOT_FREE		= (1 << 5),
-	MLX5_UMR_CHECK_FREE		= (2 << 5),
-
-	MLX5_UMR_INLINE			= (1 << 7),
-};
-
-#define MLX5_UMR_MTT_ALIGNMENT 0x40
-#define MLX5_UMR_MTT_MASK      (MLX5_UMR_MTT_ALIGNMENT - 1)
-#define MLX5_UMR_MTT_MIN_CHUNK_SIZE MLX5_UMR_MTT_ALIGNMENT
-
-#define MLX5_USER_INDEX_LEN (MLX5_FLD_SZ_BYTES(qpc, user_index) * 8)
-
-enum {
-	MLX5_EVENT_QUEUE_TYPE_QP = 0,
-	MLX5_EVENT_QUEUE_TYPE_RQ = 1,
-	MLX5_EVENT_QUEUE_TYPE_SQ = 2,
-};
-
-enum mlx5_event {
-	MLX5_EVENT_TYPE_COMP		   = 0x0,
-
-	MLX5_EVENT_TYPE_PATH_MIG	   = 0x01,
-	MLX5_EVENT_TYPE_COMM_EST	   = 0x02,
-	MLX5_EVENT_TYPE_SQ_DRAINED	   = 0x03,
-	MLX5_EVENT_TYPE_SRQ_LAST_WQE	   = 0x13,
-	MLX5_EVENT_TYPE_SRQ_RQ_LIMIT	   = 0x14,
-
-	MLX5_EVENT_TYPE_CQ_ERROR	   = 0x04,
-	MLX5_EVENT_TYPE_WQ_CATAS_ERROR	   = 0x05,
-	MLX5_EVENT_TYPE_PATH_MIG_FAILED	   = 0x07,
-	MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10,
-	MLX5_EVENT_TYPE_WQ_ACCESS_ERROR	   = 0x11,
-	MLX5_EVENT_TYPE_SRQ_CATAS_ERROR	   = 0x12,
-
-	MLX5_EVENT_TYPE_INTERNAL_ERROR	   = 0x08,
-	MLX5_EVENT_TYPE_PORT_CHANGE	   = 0x09,
-	MLX5_EVENT_TYPE_GPIO_EVENT	   = 0x15,
-	MLX5_EVENT_TYPE_PORT_MODULE_EVENT  = 0x16,
-	MLX5_EVENT_TYPE_REMOTE_CONFIG	   = 0x19,
-
-	MLX5_EVENT_TYPE_DB_BF_CONGESTION   = 0x1a,
-	MLX5_EVENT_TYPE_STALL_EVENT	   = 0x1b,
-
-	MLX5_EVENT_TYPE_CMD		   = 0x0a,
-	MLX5_EVENT_TYPE_PAGE_REQUEST	   = 0xb,
-
-	MLX5_EVENT_TYPE_PAGE_FAULT	   = 0xc,
-	MLX5_EVENT_TYPE_NIC_VPORT_CHANGE   = 0xd,
-};
-
-enum {
-	MLX5_PORT_CHANGE_SUBTYPE_DOWN		= 1,
-	MLX5_PORT_CHANGE_SUBTYPE_ACTIVE		= 4,
-	MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED	= 5,
-	MLX5_PORT_CHANGE_SUBTYPE_LID		= 6,
-	MLX5_PORT_CHANGE_SUBTYPE_PKEY		= 7,
-	MLX5_PORT_CHANGE_SUBTYPE_GUID		= 8,
-	MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG	= 9,
-};
-
-enum {
-	MLX5_DEV_CAP_FLAG_XRC		= 1LL <<  3,
-	MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR	= 1LL <<  8,
-	MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR	= 1LL <<  9,
-	MLX5_DEV_CAP_FLAG_APM		= 1LL << 17,
-	MLX5_DEV_CAP_FLAG_ATOMIC	= 1LL << 18,
-	MLX5_DEV_CAP_FLAG_BLOCK_MCAST	= 1LL << 23,
-	MLX5_DEV_CAP_FLAG_ON_DMND_PG	= 1LL << 24,
-	MLX5_DEV_CAP_FLAG_CQ_MODER	= 1LL << 29,
-	MLX5_DEV_CAP_FLAG_RESIZE_CQ	= 1LL << 30,
-	MLX5_DEV_CAP_FLAG_DCT		= 1LL << 37,
-	MLX5_DEV_CAP_FLAG_SIG_HAND_OVER	= 1LL << 40,
-	MLX5_DEV_CAP_FLAG_CMDIF_CSUM	= 3LL << 46,
-};
-
-enum {
-	MLX5_ROCE_VERSION_1		= 0,
-	MLX5_ROCE_VERSION_2		= 2,
-};
-
-enum {
-	MLX5_ROCE_VERSION_1_CAP		= 1 << MLX5_ROCE_VERSION_1,
-	MLX5_ROCE_VERSION_2_CAP		= 1 << MLX5_ROCE_VERSION_2,
-};
-
-enum {
-	MLX5_ROCE_L3_TYPE_IPV4		= 0,
-	MLX5_ROCE_L3_TYPE_IPV6		= 1,
-};
-
-enum {
-	MLX5_ROCE_L3_TYPE_IPV4_CAP	= 1 << 1,
-	MLX5_ROCE_L3_TYPE_IPV6_CAP	= 1 << 2,
-};
-
-enum {
-	MLX5_OPCODE_NOP			= 0x00,
-	MLX5_OPCODE_SEND_INVAL		= 0x01,
-	MLX5_OPCODE_RDMA_WRITE		= 0x08,
-	MLX5_OPCODE_RDMA_WRITE_IMM	= 0x09,
-	MLX5_OPCODE_SEND		= 0x0a,
-	MLX5_OPCODE_SEND_IMM		= 0x0b,
-	MLX5_OPCODE_LSO			= 0x0e,
-	MLX5_OPCODE_RDMA_READ		= 0x10,
-	MLX5_OPCODE_ATOMIC_CS		= 0x11,
-	MLX5_OPCODE_ATOMIC_FA		= 0x12,
-	MLX5_OPCODE_ATOMIC_MASKED_CS	= 0x14,
-	MLX5_OPCODE_ATOMIC_MASKED_FA	= 0x15,
-	MLX5_OPCODE_BIND_MW		= 0x18,
-	MLX5_OPCODE_CONFIG_CMD		= 0x1f,
-
-	MLX5_RECV_OPCODE_RDMA_WRITE_IMM	= 0x00,
-	MLX5_RECV_OPCODE_SEND		= 0x01,
-	MLX5_RECV_OPCODE_SEND_IMM	= 0x02,
-	MLX5_RECV_OPCODE_SEND_INVAL	= 0x03,
-
-	MLX5_CQE_OPCODE_ERROR		= 0x1e,
-	MLX5_CQE_OPCODE_RESIZE		= 0x16,
-
-	MLX5_OPCODE_SET_PSV		= 0x20,
-	MLX5_OPCODE_GET_PSV		= 0x21,
-	MLX5_OPCODE_CHECK_PSV		= 0x22,
-	MLX5_OPCODE_RGET_PSV		= 0x26,
-	MLX5_OPCODE_RCHECK_PSV		= 0x27,
-
-	MLX5_OPCODE_UMR			= 0x25,
-
-};
-
-enum {
-	MLX5_SET_PORT_RESET_QKEY	= 0,
-	MLX5_SET_PORT_GUID0		= 16,
-	MLX5_SET_PORT_NODE_GUID		= 17,
-	MLX5_SET_PORT_SYS_GUID		= 18,
-	MLX5_SET_PORT_GID_TABLE		= 19,
-	MLX5_SET_PORT_PKEY_TABLE	= 20,
-};
-
-enum {
-	MLX5_BW_NO_LIMIT   = 0,
-	MLX5_100_MBPS_UNIT = 3,
-	MLX5_GBPS_UNIT	   = 4,
-};
-
-enum {
-	MLX5_MAX_PAGE_SHIFT		= 31
-};
-
-enum {
-	MLX5_CAP_OFF_CMDIF_CSUM		= 46,
-};
-
-enum {
-	/*
-	 * Max wqe size for rdma read is 512 bytes, so this
-	 * limits our max_sge_rd as the wqe needs to fit:
-	 * - ctrl segment (16 bytes)
-	 * - rdma segment (16 bytes)
-	 * - scatter elements (16 bytes each)
-	 */
-	MLX5_MAX_SGE_RD	= (512 - 16 - 16) / 16
-};
-
-enum mlx5_odp_transport_cap_bits {
-	MLX5_ODP_SUPPORT_SEND	 = 1 << 31,
-	MLX5_ODP_SUPPORT_RECV	 = 1 << 30,
-	MLX5_ODP_SUPPORT_WRITE	 = 1 << 29,
-	MLX5_ODP_SUPPORT_READ	 = 1 << 28,
-};
-
-struct mlx5_odp_caps {
-	char reserved[0x10];
-	struct {
-		__be32			rc_odp_caps;
-		__be32			uc_odp_caps;
-		__be32			ud_odp_caps;
-	} per_transport_caps;
-	char reserved2[0xe4];
-};
-
-struct mlx5_cmd_layout {
-	u8		type;
-	u8		rsvd0[3];
-	__be32		inlen;
-	__be64		in_ptr;
-	__be32		in[4];
-	__be32		out[4];
-	__be64		out_ptr;
-	__be32		outlen;
-	u8		token;
-	u8		sig;
-	u8		rsvd1;
-	u8		status_own;
-};
-
-struct health_buffer {
-	__be32		assert_var[5];
-	__be32		rsvd0[3];
-	__be32		assert_exit_ptr;
-	__be32		assert_callra;
-	__be32		rsvd1[2];
-	__be32		fw_ver;
-	__be32		hw_id;
-	__be32		rsvd2;
-	u8		irisc_index;
-	u8		synd;
-	__be16		ext_synd;
-};
-
-struct mlx5_init_seg {
-	__be32			fw_rev;
-	__be32			cmdif_rev_fw_sub;
-	__be32			rsvd0[2];
-	__be32			cmdq_addr_h;
-	__be32			cmdq_addr_l_sz;
-	__be32			cmd_dbell;
-	__be32			rsvd1[120];
-	__be32			initializing;
-	struct health_buffer	health;
-	__be32			rsvd2[880];
-	__be32			internal_timer_h;
-	__be32			internal_timer_l;
-	__be32			rsvd3[2];
-	__be32			health_counter;
-	__be32			rsvd4[1019];
-	__be64			ieee1588_clk;
-	__be32			ieee1588_clk_type;
-	__be32			clr_intx;
-};
-
-struct mlx5_eqe_comp {
-	__be32	reserved[6];
-	__be32	cqn;
-};
-
-struct mlx5_eqe_qp_srq {
-	__be32	reserved1[5];
-	u8	type;
-	u8	reserved2[3];
-	__be32	qp_srq_n;
-};
-
-struct mlx5_eqe_cq_err {
-	__be32	cqn;
-	u8	reserved1[7];
-	u8	syndrome;
-};
-
-struct mlx5_eqe_port_state {
-	u8	reserved0[8];
-	u8	port;
-};
-
-struct mlx5_eqe_gpio {
-	__be32	reserved0[2];
-	__be64	gpio_event;
-};
-
-struct mlx5_eqe_congestion {
-	u8	type;
-	u8	rsvd0;
-	u8	congestion_level;
-};
-
-struct mlx5_eqe_stall_vl {
-	u8	rsvd0[3];
-	u8	port_vl;
-};
-
-struct mlx5_eqe_cmd {
-	__be32	vector;
-	__be32	rsvd[6];
-};
-
-struct mlx5_eqe_page_req {
-	u8		rsvd0[2];
-	__be16		func_id;
-	__be32		num_pages;
-	__be32		rsvd1[5];
-};
-
-struct mlx5_eqe_page_fault {
-	__be32 bytes_committed;
-	union {
-		struct {
-			u16     reserved1;
-			__be16  wqe_index;
-			u16	reserved2;
-			__be16  packet_length;
-			__be32  token;
-			u8	reserved4[8];
-			__be32  pftype_wq;
-		} __packed wqe;
-		struct {
-			__be32  r_key;
-			u16	reserved1;
-			__be16  packet_length;
-			__be32  rdma_op_len;
-			__be64  rdma_va;
-			__be32  pftype_token;
-		} __packed rdma;
-	} __packed;
-} __packed;
-
-struct mlx5_eqe_vport_change {
-	u8		rsvd0[2];
-	__be16		vport_num;
-	__be32		rsvd1[6];
-} __packed;
-
-struct mlx5_eqe_port_module {
-	u8        reserved_at_0[1];
-	u8        module;
-	u8        reserved_at_2[1];
-	u8        module_status;
-	u8        reserved_at_4[2];
-	u8        error_type;
-} __packed;
-
-union ev_data {
-	__be32				raw[7];
-	struct mlx5_eqe_cmd		cmd;
-	struct mlx5_eqe_comp		comp;
-	struct mlx5_eqe_qp_srq		qp_srq;
-	struct mlx5_eqe_cq_err		cq_err;
-	struct mlx5_eqe_port_state	port;
-	struct mlx5_eqe_gpio		gpio;
-	struct mlx5_eqe_congestion	cong;
-	struct mlx5_eqe_stall_vl	stall_vl;
-	struct mlx5_eqe_page_req	req_pages;
-	struct mlx5_eqe_page_fault	page_fault;
-	struct mlx5_eqe_vport_change	vport_change;
-	struct mlx5_eqe_port_module	port_module;
-} __packed;
-
-struct mlx5_eqe {
-	u8		rsvd0;
-	u8		type;
-	u8		rsvd1;
-	u8		sub_type;
-	__be32		rsvd2[7];
-	union ev_data	data;
-	__be16		rsvd3;
-	u8		signature;
-	u8		owner;
-} __packed;
-
-struct mlx5_cmd_prot_block {
-	u8		data[MLX5_CMD_DATA_BLOCK_SIZE];
-	u8		rsvd0[48];
-	__be64		next;
-	__be32		block_num;
-	u8		rsvd1;
-	u8		token;
-	u8		ctrl_sig;
-	u8		sig;
-};
-
-enum {
-	MLX5_CQE_SYND_FLUSHED_IN_ERROR = 5,
-};
-
-struct mlx5_err_cqe {
-	u8	rsvd0[32];
-	__be32	srqn;
-	u8	rsvd1[18];
-	u8	vendor_err_synd;
-	u8	syndrome;
-	__be32	s_wqe_opcode_qpn;
-	__be16	wqe_counter;
-	u8	signature;
-	u8	op_own;
-};
-
-struct mlx5_cqe64 {
-	u8		outer_l3_tunneled;
-	u8		rsvd0;
-	__be16		wqe_id;
-	u8		lro_tcppsh_abort_dupack;
-	u8		lro_min_ttl;
-	__be16		lro_tcp_win;
-	__be32		lro_ack_seq_num;
-	__be32		rss_hash_result;
-	u8		rss_hash_type;
-	u8		ml_path;
-	u8		rsvd20[2];
-	__be16		check_sum;
-	__be16		slid;
-	__be32		flags_rqpn;
-	u8		hds_ip_ext;
-	u8		l4_l3_hdr_type;
-	__be16		vlan_info;
-	__be32		srqn; /* [31:24]: lro_num_seg, [23:0]: srqn */
-	__be32		imm_inval_pkey;
-	u8		rsvd40[4];
-	__be32		byte_cnt;
-	__be32		timestamp_h;
-	__be32		timestamp_l;
-	__be32		sop_drop_qpn;
-	__be16		wqe_counter;
-	u8		signature;
-	u8		op_own;
-};
-
-struct mlx5_mini_cqe8 {
-	union {
-		__be32 rx_hash_result;
-		struct {
-			__be16 checksum;
-			__be16 rsvd;
-		};
-		struct {
-			__be16 wqe_counter;
-			u8  s_wqe_opcode;
-			u8  reserved;
-		} s_wqe_info;
-	};
-	__be32 byte_cnt;
-};
-
-enum {
-	MLX5_NO_INLINE_DATA,
-	MLX5_INLINE_DATA32_SEG,
-	MLX5_INLINE_DATA64_SEG,
-	MLX5_COMPRESSED,
-};
-
-enum {
-	MLX5_CQE_FORMAT_CSUM = 0x1,
-};
-
-#define MLX5_MINI_CQE_ARRAY_SIZE 8
-
-static inline int mlx5_get_cqe_format(struct mlx5_cqe64 *cqe)
-{
-	return (cqe->op_own >> 2) & 0x3;
-}
-
-static inline int get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe)
-{
-	return (cqe->lro_tcppsh_abort_dupack >> 6) & 1;
-}
-
-static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe)
-{
-	return (cqe->l4_l3_hdr_type >> 4) & 0x7;
-}
-
-static inline u8 get_cqe_l3_hdr_type(struct mlx5_cqe64 *cqe)
-{
-	return (cqe->l4_l3_hdr_type >> 2) & 0x3;
-}
-
-static inline u8 cqe_is_tunneled(struct mlx5_cqe64 *cqe)
-{
-	return cqe->outer_l3_tunneled & 0x1;
-}
-
-static inline int cqe_has_vlan(struct mlx5_cqe64 *cqe)
-{
-	return !!(cqe->l4_l3_hdr_type & 0x1);
-}
-
-static inline u64 get_cqe_ts(struct mlx5_cqe64 *cqe)
-{
-	u32 hi, lo;
-
-	hi = be32_to_cpu(cqe->timestamp_h);
-	lo = be32_to_cpu(cqe->timestamp_l);
-
-	return (u64)lo | ((u64)hi << 32);
-}
-
-struct mpwrq_cqe_bc {
-	__be16	filler_consumed_strides;
-	__be16	byte_cnt;
-};
-
-static inline u16 mpwrq_get_cqe_byte_cnt(struct mlx5_cqe64 *cqe)
-{
-	struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt;
-
-	return be16_to_cpu(bc->byte_cnt);
-}
-
-static inline u16 mpwrq_get_cqe_bc_consumed_strides(struct mpwrq_cqe_bc *bc)
-{
-	return 0x7fff & be16_to_cpu(bc->filler_consumed_strides);
-}
-
-static inline u16 mpwrq_get_cqe_consumed_strides(struct mlx5_cqe64 *cqe)
-{
-	struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt;
-
-	return mpwrq_get_cqe_bc_consumed_strides(bc);
-}
-
-static inline bool mpwrq_is_filler_cqe(struct mlx5_cqe64 *cqe)
-{
-	struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt;
-
-	return 0x8000 & be16_to_cpu(bc->filler_consumed_strides);
-}
-
-static inline u16 mpwrq_get_cqe_stride_index(struct mlx5_cqe64 *cqe)
-{
-	return be16_to_cpu(cqe->wqe_counter);
-}
-
-enum {
-	CQE_L4_HDR_TYPE_NONE			= 0x0,
-	CQE_L4_HDR_TYPE_TCP_NO_ACK		= 0x1,
-	CQE_L4_HDR_TYPE_UDP			= 0x2,
-	CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA		= 0x3,
-	CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA	= 0x4,
-};
-
-enum {
-	CQE_RSS_HTYPE_IP	= 0x3 << 6,
-	CQE_RSS_HTYPE_L4	= 0x3 << 2,
-};
-
-enum {
-	MLX5_CQE_ROCE_L3_HEADER_TYPE_GRH	= 0x0,
-	MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV6	= 0x1,
-	MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV4	= 0x2,
-};
-
-enum {
-	CQE_L2_OK	= 1 << 0,
-	CQE_L3_OK	= 1 << 1,
-	CQE_L4_OK	= 1 << 2,
-};
-
-struct mlx5_sig_err_cqe {
-	u8		rsvd0[16];
-	__be32		expected_trans_sig;
-	__be32		actual_trans_sig;
-	__be32		expected_reftag;
-	__be32		actual_reftag;
-	__be16		syndrome;
-	u8		rsvd22[2];
-	__be32		mkey;
-	__be64		err_offset;
-	u8		rsvd30[8];
-	__be32		qpn;
-	u8		rsvd38[2];
-	u8		signature;
-	u8		op_own;
-};
-
-struct mlx5_wqe_srq_next_seg {
-	u8			rsvd0[2];
-	__be16			next_wqe_index;
-	u8			signature;
-	u8			rsvd1[11];
-};
-
-union mlx5_ext_cqe {
-	struct ib_grh	grh;
-	u8		inl[64];
-};
-
-struct mlx5_cqe128 {
-	union mlx5_ext_cqe	inl_grh;
-	struct mlx5_cqe64	cqe64;
-};
-
-enum {
-	MLX5_MKEY_STATUS_FREE = 1 << 6,
-};
-
-enum {
-	MLX5_MKEY_REMOTE_INVAL	= 1 << 24,
-	MLX5_MKEY_FLAG_SYNC_UMR = 1 << 29,
-	MLX5_MKEY_BSF_EN	= 1 << 30,
-	MLX5_MKEY_LEN64		= 1 << 31,
-};
-
-struct mlx5_mkey_seg {
-	/* This is a two bit field occupying bits 31-30.
-	 * bit 31 is always 0,
-	 * bit 30 is zero for regular MRs and 1 (e.g free) for UMRs that do not have tanslation
-	 */
-	u8		status;
-	u8		pcie_control;
-	u8		flags;
-	u8		version;
-	__be32		qpn_mkey7_0;
-	u8		rsvd1[4];
-	__be32		flags_pd;
-	__be64		start_addr;
-	__be64		len;
-	__be32		bsfs_octo_size;
-	u8		rsvd2[16];
-	__be32		xlt_oct_size;
-	u8		rsvd3[3];
-	u8		log2_page_size;
-	u8		rsvd4[4];
-};
-
-#define MLX5_ATTR_EXTENDED_PORT_INFO	cpu_to_be16(0xff90)
-
-enum {
-	MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO	= 1 <<  0
-};
-
-enum {
-	VPORT_STATE_DOWN		= 0x0,
-	VPORT_STATE_UP			= 0x1,
-};
-
-enum {
-	MLX5_ESW_VPORT_ADMIN_STATE_DOWN  = 0x0,
-	MLX5_ESW_VPORT_ADMIN_STATE_UP    = 0x1,
-	MLX5_ESW_VPORT_ADMIN_STATE_AUTO  = 0x2,
-};
-
-enum {
-	MLX5_L3_PROT_TYPE_IPV4		= 0,
-	MLX5_L3_PROT_TYPE_IPV6		= 1,
-};
-
-enum {
-	MLX5_L4_PROT_TYPE_TCP		= 0,
-	MLX5_L4_PROT_TYPE_UDP		= 1,
-};
-
-enum {
-	MLX5_HASH_FIELD_SEL_SRC_IP	= 1 << 0,
-	MLX5_HASH_FIELD_SEL_DST_IP	= 1 << 1,
-	MLX5_HASH_FIELD_SEL_L4_SPORT	= 1 << 2,
-	MLX5_HASH_FIELD_SEL_L4_DPORT	= 1 << 3,
-	MLX5_HASH_FIELD_SEL_IPSEC_SPI	= 1 << 4,
-};
-
-enum {
-	MLX5_MATCH_OUTER_HEADERS	= 1 << 0,
-	MLX5_MATCH_MISC_PARAMETERS	= 1 << 1,
-	MLX5_MATCH_INNER_HEADERS	= 1 << 2,
-
-};
-
-enum {
-	MLX5_FLOW_TABLE_TYPE_NIC_RCV	= 0,
-	MLX5_FLOW_TABLE_TYPE_ESWITCH	= 4,
-};
-
-enum {
-	MLX5_FLOW_CONTEXT_DEST_TYPE_VPORT	= 0,
-	MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE	= 1,
-	MLX5_FLOW_CONTEXT_DEST_TYPE_TIR		= 2,
-};
-
-enum mlx5_list_type {
-	MLX5_NVPRT_LIST_TYPE_UC   = 0x0,
-	MLX5_NVPRT_LIST_TYPE_MC   = 0x1,
-	MLX5_NVPRT_LIST_TYPE_VLAN = 0x2,
-};
-
-enum {
-	MLX5_RQC_RQ_TYPE_MEMORY_RQ_INLINE = 0x0,
-	MLX5_RQC_RQ_TYPE_MEMORY_RQ_RPM    = 0x1,
-};
-
-enum mlx5_wol_mode {
-	MLX5_WOL_DISABLE        = 0,
-	MLX5_WOL_SECURED_MAGIC  = 1 << 1,
-	MLX5_WOL_MAGIC          = 1 << 2,
-	MLX5_WOL_ARP            = 1 << 3,
-	MLX5_WOL_BROADCAST      = 1 << 4,
-	MLX5_WOL_MULTICAST      = 1 << 5,
-	MLX5_WOL_UNICAST        = 1 << 6,
-	MLX5_WOL_PHY_ACTIVITY   = 1 << 7,
-};
-
-/* MLX5 DEV CAPs */
-
-/* TODO: EAT.ME */
-enum mlx5_cap_mode {
-	HCA_CAP_OPMOD_GET_MAX	= 0,
-	HCA_CAP_OPMOD_GET_CUR	= 1,
-};
-
-enum mlx5_cap_type {
-	MLX5_CAP_GENERAL = 0,
-	MLX5_CAP_ETHERNET_OFFLOADS,
-	MLX5_CAP_ODP,
-	MLX5_CAP_ATOMIC,
-	MLX5_CAP_ROCE,
-	MLX5_CAP_IPOIB_OFFLOADS,
-	MLX5_CAP_EOIB_OFFLOADS,
-	MLX5_CAP_FLOW_TABLE,
-	MLX5_CAP_ESWITCH_FLOW_TABLE,
-	MLX5_CAP_ESWITCH,
-	MLX5_CAP_RESERVED,
-	MLX5_CAP_VECTOR_CALC,
-	MLX5_CAP_QOS,
-	/* NUM OF CAP Types */
-	MLX5_CAP_NUM
-};
-
-/* GET Dev Caps macros */
-#define MLX5_CAP_GEN(mdev, cap) \
-	MLX5_GET(cmd_hca_cap, mdev->hca_caps_cur[MLX5_CAP_GENERAL], cap)
-
-#define MLX5_CAP_GEN_MAX(mdev, cap) \
-	MLX5_GET(cmd_hca_cap, mdev->hca_caps_max[MLX5_CAP_GENERAL], cap)
-
-#define MLX5_CAP_ETH(mdev, cap) \
-	MLX5_GET(per_protocol_networking_offload_caps,\
-		 mdev->hca_caps_cur[MLX5_CAP_ETHERNET_OFFLOADS], cap)
-
-#define MLX5_CAP_ETH_MAX(mdev, cap) \
-	MLX5_GET(per_protocol_networking_offload_caps,\
-		 mdev->hca_caps_max[MLX5_CAP_ETHERNET_OFFLOADS], cap)
-
-#define MLX5_CAP_ROCE(mdev, cap) \
-	MLX5_GET(roce_cap, mdev->hca_caps_cur[MLX5_CAP_ROCE], cap)
-
-#define MLX5_CAP_ROCE_MAX(mdev, cap) \
-	MLX5_GET(roce_cap, mdev->hca_caps_max[MLX5_CAP_ROCE], cap)
-
-#define MLX5_CAP_ATOMIC(mdev, cap) \
-	MLX5_GET(atomic_caps, mdev->hca_caps_cur[MLX5_CAP_ATOMIC], cap)
-
-#define MLX5_CAP_ATOMIC_MAX(mdev, cap) \
-	MLX5_GET(atomic_caps, mdev->hca_caps_max[MLX5_CAP_ATOMIC], cap)
-
-#define MLX5_CAP_FLOWTABLE(mdev, cap) \
-	MLX5_GET(flow_table_nic_cap, mdev->hca_caps_cur[MLX5_CAP_FLOW_TABLE], cap)
-
-#define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \
-	MLX5_GET(flow_table_nic_cap, mdev->hca_caps_max[MLX5_CAP_FLOW_TABLE], cap)
-
-#define MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) \
-	MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.cap)
-
-#define MLX5_CAP_FLOWTABLE_NIC_RX_MAX(mdev, cap) \
-	MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive.cap)
-
-#define MLX5_CAP_FLOWTABLE_SNIFFER_RX(mdev, cap) \
-	MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive_sniffer.cap)
-
-#define MLX5_CAP_FLOWTABLE_SNIFFER_RX_MAX(mdev, cap) \
-	MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive_sniffer.cap)
-
-#define MLX5_CAP_FLOWTABLE_SNIFFER_TX(mdev, cap) \
-	MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit_sniffer.cap)
-
-#define MLX5_CAP_FLOWTABLE_SNIFFER_TX_MAX(mdev, cap) \
-	MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit_sniffer.cap)
-
-#define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \
-	MLX5_GET(flow_table_eswitch_cap, \
-		 mdev->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
-
-#define MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, cap) \
-	MLX5_GET(flow_table_eswitch_cap, \
-		 mdev->hca_caps_max[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
-
-#define MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) \
-	MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_nic_esw_fdb.cap)
-
-#define MLX5_CAP_ESW_FLOWTABLE_FDB_MAX(mdev, cap) \
-	MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_nic_esw_fdb.cap)
-
-#define MLX5_CAP_ESW_EGRESS_ACL(mdev, cap) \
-	MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_egress.cap)
-
-#define MLX5_CAP_ESW_EGRESS_ACL_MAX(mdev, cap) \
-	MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_egress.cap)
-
-#define MLX5_CAP_ESW_INGRESS_ACL(mdev, cap) \
-	MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_ingress.cap)
-
-#define MLX5_CAP_ESW_INGRESS_ACL_MAX(mdev, cap) \
-	MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_ingress.cap)
-
-#define MLX5_CAP_ESW(mdev, cap) \
-	MLX5_GET(e_switch_cap, \
-		 mdev->hca_caps_cur[MLX5_CAP_ESWITCH], cap)
-
-#define MLX5_CAP_ESW_MAX(mdev, cap) \
-	MLX5_GET(e_switch_cap, \
-		 mdev->hca_caps_max[MLX5_CAP_ESWITCH], cap)
-
-#define MLX5_CAP_ODP(mdev, cap)\
-	MLX5_GET(odp_cap, mdev->hca_caps_cur[MLX5_CAP_ODP], cap)
-
-#define MLX5_CAP_VECTOR_CALC(mdev, cap) \
-	MLX5_GET(vector_calc_cap, \
-		 mdev->hca_caps_cur[MLX5_CAP_VECTOR_CALC], cap)
-
-#define MLX5_CAP_QOS(mdev, cap)\
-	MLX5_GET(qos_cap, mdev->hca_caps_cur[MLX5_CAP_QOS], cap)
-
-enum {
-	MLX5_CMD_STAT_OK			= 0x0,
-	MLX5_CMD_STAT_INT_ERR			= 0x1,
-	MLX5_CMD_STAT_BAD_OP_ERR		= 0x2,
-	MLX5_CMD_STAT_BAD_PARAM_ERR		= 0x3,
-	MLX5_CMD_STAT_BAD_SYS_STATE_ERR		= 0x4,
-	MLX5_CMD_STAT_BAD_RES_ERR		= 0x5,
-	MLX5_CMD_STAT_RES_BUSY			= 0x6,
-	MLX5_CMD_STAT_LIM_ERR			= 0x8,
-	MLX5_CMD_STAT_BAD_RES_STATE_ERR		= 0x9,
-	MLX5_CMD_STAT_IX_ERR			= 0xa,
-	MLX5_CMD_STAT_NO_RES_ERR		= 0xf,
-	MLX5_CMD_STAT_BAD_INP_LEN_ERR		= 0x50,
-	MLX5_CMD_STAT_BAD_OUTP_LEN_ERR		= 0x51,
-	MLX5_CMD_STAT_BAD_QP_STATE_ERR		= 0x10,
-	MLX5_CMD_STAT_BAD_PKT_ERR		= 0x30,
-	MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR	= 0x40,
-};
-
-enum {
-	MLX5_IEEE_802_3_COUNTERS_GROUP	      = 0x0,
-	MLX5_RFC_2863_COUNTERS_GROUP	      = 0x1,
-	MLX5_RFC_2819_COUNTERS_GROUP	      = 0x2,
-	MLX5_RFC_3635_COUNTERS_GROUP	      = 0x3,
-	MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP = 0x5,
-	MLX5_PER_PRIORITY_COUNTERS_GROUP      = 0x10,
-	MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11,
-	MLX5_PHYSICAL_LAYER_COUNTERS_GROUP    = 0x12,
-	MLX5_INFINIBAND_PORT_COUNTERS_GROUP   = 0x20,
-};
-
-enum {
-	MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP       = 0x0,
-	MLX5_PCIE_TIMERS_AND_STATES_COUNTERS_GROUP = 0x2,
-};
-
-static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
-{
-	if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE)
-		return 0;
-	return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz;
-}
-
-#define MLX5_BY_PASS_NUM_REGULAR_PRIOS 8
-#define MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS 8
-#define MLX5_BY_PASS_NUM_MULTICAST_PRIOS 1
-#define MLX5_BY_PASS_NUM_PRIOS (MLX5_BY_PASS_NUM_REGULAR_PRIOS +\
-				MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS +\
-				MLX5_BY_PASS_NUM_MULTICAST_PRIOS)
-
-#endif /* MLX5_DEVICE_H */
diff --git a/include/linux/mlx5/doorbell.h b/include/linux/mlx5/doorbell.h
deleted file mode 100644
index 0787de2..0000000
--- a/include/linux/mlx5/doorbell.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef MLX5_DOORBELL_H
-#define MLX5_DOORBELL_H
-
-#define MLX5_BF_OFFSET	      0x800
-#define MLX5_CQ_DOORBELL      0x20
-
-#if BITS_PER_LONG == 64
-/* Assume that we can just write a 64-bit doorbell atomically.  s390
- * actually doesn't have writeq() but S/390 systems don't even have
- * PCI so we won't worry about it.
- */
-
-#define MLX5_DECLARE_DOORBELL_LOCK(name)
-#define MLX5_INIT_DOORBELL_LOCK(ptr)    do { } while (0)
-#define MLX5_GET_DOORBELL_LOCK(ptr)      (NULL)
-
-static inline void mlx5_write64(__be32 val[2], void __iomem *dest,
-				spinlock_t *doorbell_lock)
-{
-	__raw_writeq(*(u64 *)val, dest);
-}
-
-#else
-
-/* Just fall back to a spinlock to protect the doorbell if
- * BITS_PER_LONG is 32 -- there's no portable way to do atomic 64-bit
- * MMIO writes.
- */
-
-#define MLX5_DECLARE_DOORBELL_LOCK(name) spinlock_t name;
-#define MLX5_INIT_DOORBELL_LOCK(ptr)     spin_lock_init(ptr)
-#define MLX5_GET_DOORBELL_LOCK(ptr)      (ptr)
-
-static inline void mlx5_write64(__be32 val[2], void __iomem *dest,
-				spinlock_t *doorbell_lock)
-{
-	unsigned long flags;
-
-	if (doorbell_lock)
-		spin_lock_irqsave(doorbell_lock, flags);
-	__raw_writel((__force u32) val[0], dest);
-	__raw_writel((__force u32) val[1], dest + 4);
-	if (doorbell_lock)
-		spin_unlock_irqrestore(doorbell_lock, flags);
-}
-
-#endif
-
-#endif /* MLX5_DOORBELL_H */
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
deleted file mode 100644
index 10e6325..0000000
--- a/include/linux/mlx5/driver.h
+++ /dev/null
@@ -1,1127 +0,0 @@
-/*
- * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef MLX5_DRIVER_H
-#define MLX5_DRIVER_H
-
-#include <linux/kernel.h>
-#include <linux/completion.h>
-#include <linux/pci.h>
-#include <linux/spinlock_types.h>
-#include <linux/semaphore.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include <linux/radix-tree.h>
-#include <linux/workqueue.h>
-#include <linux/mempool.h>
-#include <linux/interrupt.h>
-
-#include <linux/mlx5/device.h>
-#include <linux/mlx5/doorbell.h>
-#include <linux/mlx5/srq.h>
-
-enum {
-	MLX5_BOARD_ID_LEN = 64,
-	MLX5_MAX_NAME_LEN = 16,
-};
-
-enum {
-	/* one minute for the sake of bringup. Generally, commands must always
-	 * complete and we may need to increase this timeout value
-	 */
-	MLX5_CMD_TIMEOUT_MSEC	= 60 * 1000,
-	MLX5_CMD_WQ_MAX_NAME	= 32,
-};
-
-enum {
-	CMD_OWNER_SW		= 0x0,
-	CMD_OWNER_HW		= 0x1,
-	CMD_STATUS_SUCCESS	= 0,
-};
-
-enum mlx5_sqp_t {
-	MLX5_SQP_SMI		= 0,
-	MLX5_SQP_GSI		= 1,
-	MLX5_SQP_IEEE_1588	= 2,
-	MLX5_SQP_SNIFFER	= 3,
-	MLX5_SQP_SYNC_UMR	= 4,
-};
-
-enum {
-	MLX5_MAX_PORTS	= 2,
-};
-
-enum {
-	MLX5_EQ_VEC_PAGES	 = 0,
-	MLX5_EQ_VEC_CMD		 = 1,
-	MLX5_EQ_VEC_ASYNC	 = 2,
-	MLX5_EQ_VEC_PFAULT	 = 3,
-	MLX5_EQ_VEC_COMP_BASE,
-};
-
-enum {
-	MLX5_MAX_IRQ_NAME	= 32
-};
-
-enum {
-	MLX5_ATOMIC_MODE_IB_COMP	= 1 << 16,
-	MLX5_ATOMIC_MODE_CX		= 2 << 16,
-	MLX5_ATOMIC_MODE_8B		= 3 << 16,
-	MLX5_ATOMIC_MODE_16B		= 4 << 16,
-	MLX5_ATOMIC_MODE_32B		= 5 << 16,
-	MLX5_ATOMIC_MODE_64B		= 6 << 16,
-	MLX5_ATOMIC_MODE_128B		= 7 << 16,
-	MLX5_ATOMIC_MODE_256B		= 8 << 16,
-};
-
-enum {
-	MLX5_REG_QETCR		 = 0x4005,
-	MLX5_REG_QTCT		 = 0x400a,
-	MLX5_REG_DCBX_PARAM      = 0x4020,
-	MLX5_REG_DCBX_APP        = 0x4021,
-	MLX5_REG_PCAP		 = 0x5001,
-	MLX5_REG_PMTU		 = 0x5003,
-	MLX5_REG_PTYS		 = 0x5004,
-	MLX5_REG_PAOS		 = 0x5006,
-	MLX5_REG_PFCC            = 0x5007,
-	MLX5_REG_PPCNT		 = 0x5008,
-	MLX5_REG_PMAOS		 = 0x5012,
-	MLX5_REG_PUDE		 = 0x5009,
-	MLX5_REG_PMPE		 = 0x5010,
-	MLX5_REG_PELC		 = 0x500e,
-	MLX5_REG_PVLC		 = 0x500f,
-	MLX5_REG_PCMR		 = 0x5041,
-	MLX5_REG_PMLP		 = 0x5002,
-	MLX5_REG_NODE_DESC	 = 0x6001,
-	MLX5_REG_HOST_ENDIANNESS = 0x7004,
-	MLX5_REG_MCIA		 = 0x9014,
-	MLX5_REG_MLCR		 = 0x902b,
-	MLX5_REG_MPCNT		 = 0x9051,
-};
-
-enum mlx5_dcbx_oper_mode {
-	MLX5E_DCBX_PARAM_VER_OPER_HOST  = 0x0,
-	MLX5E_DCBX_PARAM_VER_OPER_AUTO  = 0x3,
-};
-
-enum {
-	MLX5_ATOMIC_OPS_CMP_SWAP	= 1 << 0,
-	MLX5_ATOMIC_OPS_FETCH_ADD	= 1 << 1,
-};
-
-enum mlx5_page_fault_resume_flags {
-	MLX5_PAGE_FAULT_RESUME_REQUESTOR = 1 << 0,
-	MLX5_PAGE_FAULT_RESUME_WRITE	 = 1 << 1,
-	MLX5_PAGE_FAULT_RESUME_RDMA	 = 1 << 2,
-	MLX5_PAGE_FAULT_RESUME_ERROR	 = 1 << 7,
-};
-
-enum dbg_rsc_type {
-	MLX5_DBG_RSC_QP,
-	MLX5_DBG_RSC_EQ,
-	MLX5_DBG_RSC_CQ,
-};
-
-struct mlx5_field_desc {
-	struct dentry	       *dent;
-	int			i;
-};
-
-struct mlx5_rsc_debug {
-	struct mlx5_core_dev   *dev;
-	void		       *object;
-	enum dbg_rsc_type	type;
-	struct dentry	       *root;
-	struct mlx5_field_desc	fields[0];
-};
-
-enum mlx5_dev_event {
-	MLX5_DEV_EVENT_SYS_ERROR,
-	MLX5_DEV_EVENT_PORT_UP,
-	MLX5_DEV_EVENT_PORT_DOWN,
-	MLX5_DEV_EVENT_PORT_INITIALIZED,
-	MLX5_DEV_EVENT_LID_CHANGE,
-	MLX5_DEV_EVENT_PKEY_CHANGE,
-	MLX5_DEV_EVENT_GUID_CHANGE,
-	MLX5_DEV_EVENT_CLIENT_REREG,
-};
-
-enum mlx5_port_status {
-	MLX5_PORT_UP        = 1,
-	MLX5_PORT_DOWN      = 2,
-};
-
-enum mlx5_eq_type {
-	MLX5_EQ_TYPE_COMP,
-	MLX5_EQ_TYPE_ASYNC,
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
-	MLX5_EQ_TYPE_PF,
-#endif
-};
-
-struct mlx5_bfreg_info {
-	u32		       *sys_pages;
-	int			num_low_latency_bfregs;
-	unsigned int	       *count;
-
-	/*
-	 * protect bfreg allocation data structs
-	 */
-	struct mutex		lock;
-	u32			ver;
-	bool			lib_uar_4k;
-	u32			num_sys_pages;
-};
-
-struct mlx5_cmd_first {
-	__be32		data[4];
-};
-
-struct mlx5_cmd_msg {
-	struct list_head		list;
-	struct cmd_msg_cache	       *parent;
-	u32				len;
-	struct mlx5_cmd_first		first;
-	struct mlx5_cmd_mailbox	       *next;
-};
-
-struct mlx5_cmd_debug {
-	struct dentry	       *dbg_root;
-	struct dentry	       *dbg_in;
-	struct dentry	       *dbg_out;
-	struct dentry	       *dbg_outlen;
-	struct dentry	       *dbg_status;
-	struct dentry	       *dbg_run;
-	void		       *in_msg;
-	void		       *out_msg;
-	u8			status;
-	u16			inlen;
-	u16			outlen;
-};
-
-struct cmd_msg_cache {
-	/* protect block chain allocations
-	 */
-	spinlock_t		lock;
-	struct list_head	head;
-	unsigned int		max_inbox_size;
-	unsigned int		num_ent;
-};
-
-enum {
-	MLX5_NUM_COMMAND_CACHES = 5,
-};
-
-struct mlx5_cmd_stats {
-	u64		sum;
-	u64		n;
-	struct dentry  *root;
-	struct dentry  *avg;
-	struct dentry  *count;
-	/* protect command average calculations */
-	spinlock_t	lock;
-};
-
-struct mlx5_cmd {
-	void	       *cmd_alloc_buf;
-	dma_addr_t	alloc_dma;
-	int		alloc_size;
-	void	       *cmd_buf;
-	dma_addr_t	dma;
-	u16		cmdif_rev;
-	u8		log_sz;
-	u8		log_stride;
-	int		max_reg_cmds;
-	int		events;
-	u32 __iomem    *vector;
-
-	/* protect command queue allocations
-	 */
-	spinlock_t	alloc_lock;
-
-	/* protect token allocations
-	 */
-	spinlock_t	token_lock;
-	u8		token;
-	unsigned long	bitmask;
-	char		wq_name[MLX5_CMD_WQ_MAX_NAME];
-	struct workqueue_struct *wq;
-	struct semaphore sem;
-	struct semaphore pages_sem;
-	int	mode;
-	struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS];
-	struct pci_pool *pool;
-	struct mlx5_cmd_debug dbg;
-	struct cmd_msg_cache cache[MLX5_NUM_COMMAND_CACHES];
-	int checksum_disabled;
-	struct mlx5_cmd_stats stats[MLX5_CMD_OP_MAX];
-};
-
-struct mlx5_port_caps {
-	int	gid_table_len;
-	int	pkey_table_len;
-	u8	ext_port_cap;
-};
-
-struct mlx5_cmd_mailbox {
-	void	       *buf;
-	dma_addr_t	dma;
-	struct mlx5_cmd_mailbox *next;
-};
-
-struct mlx5_buf_list {
-	void		       *buf;
-	dma_addr_t		map;
-};
-
-struct mlx5_buf {
-	struct mlx5_buf_list	direct;
-	int			npages;
-	int			size;
-	u8			page_shift;
-};
-
-struct mlx5_frag_buf {
-	struct mlx5_buf_list	*frags;
-	int			npages;
-	int			size;
-	u8			page_shift;
-};
-
-struct mlx5_eq_tasklet {
-	struct list_head list;
-	struct list_head process_list;
-	struct tasklet_struct task;
-	/* lock on completion tasklet list */
-	spinlock_t lock;
-};
-
-struct mlx5_eq_pagefault {
-	struct work_struct       work;
-	/* Pagefaults lock */
-	spinlock_t		 lock;
-	struct workqueue_struct *wq;
-	mempool_t		*pool;
-};
-
-struct mlx5_eq {
-	struct mlx5_core_dev   *dev;
-	__be32 __iomem	       *doorbell;
-	u32			cons_index;
-	struct mlx5_buf		buf;
-	int			size;
-	unsigned int		irqn;
-	u8			eqn;
-	int			nent;
-	u64			mask;
-	struct list_head	list;
-	int			index;
-	struct mlx5_rsc_debug	*dbg;
-	enum mlx5_eq_type	type;
-	union {
-		struct mlx5_eq_tasklet   tasklet_ctx;
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
-		struct mlx5_eq_pagefault pf_ctx;
-#endif
-	};
-};
-
-struct mlx5_core_psv {
-	u32	psv_idx;
-	struct psv_layout {
-		u32	pd;
-		u16	syndrome;
-		u16	reserved;
-		u16	bg;
-		u16	app_tag;
-		u32	ref_tag;
-	} psv;
-};
-
-struct mlx5_core_sig_ctx {
-	struct mlx5_core_psv	psv_memory;
-	struct mlx5_core_psv	psv_wire;
-	struct ib_sig_err       err_item;
-	bool			sig_status_checked;
-	bool			sig_err_exists;
-	u32			sigerr_count;
-};
-
-enum {
-	MLX5_MKEY_MR = 1,
-	MLX5_MKEY_MW,
-};
-
-struct mlx5_core_mkey {
-	u64			iova;
-	u64			size;
-	u32			key;
-	u32			pd;
-	u32			type;
-};
-
-#define MLX5_24BIT_MASK		((1 << 24) - 1)
-
-enum mlx5_res_type {
-	MLX5_RES_QP	= MLX5_EVENT_QUEUE_TYPE_QP,
-	MLX5_RES_RQ	= MLX5_EVENT_QUEUE_TYPE_RQ,
-	MLX5_RES_SQ	= MLX5_EVENT_QUEUE_TYPE_SQ,
-	MLX5_RES_SRQ	= 3,
-	MLX5_RES_XSRQ	= 4,
-};
-
-struct mlx5_core_rsc_common {
-	enum mlx5_res_type	res;
-	atomic_t		refcount;
-	struct completion	free;
-};
-
-struct mlx5_core_srq {
-	struct mlx5_core_rsc_common	common; /* must be first */
-	u32		srqn;
-	int		max;
-	int		max_gs;
-	int		max_avail_gather;
-	int		wqe_shift;
-	void (*event)	(struct mlx5_core_srq *, enum mlx5_event);
-
-	atomic_t		refcount;
-	struct completion	free;
-};
-
-struct mlx5_eq_table {
-	void __iomem	       *update_ci;
-	void __iomem	       *update_arm_ci;
-	struct list_head	comp_eqs_list;
-	struct mlx5_eq		pages_eq;
-	struct mlx5_eq		async_eq;
-	struct mlx5_eq		cmd_eq;
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
-	struct mlx5_eq		pfault_eq;
-#endif
-	int			num_comp_vectors;
-	/* protect EQs list
-	 */
-	spinlock_t		lock;
-};
-
-struct mlx5_uars_page {
-	void __iomem	       *map;
-	bool			wc;
-	u32			index;
-	struct list_head	list;
-	unsigned int		bfregs;
-	unsigned long	       *reg_bitmap; /* for non fast path bf regs */
-	unsigned long	       *fp_bitmap;
-	unsigned int		reg_avail;
-	unsigned int		fp_avail;
-	struct kref		ref_count;
-	struct mlx5_core_dev   *mdev;
-};
-
-struct mlx5_bfreg_head {
-	/* protect blue flame registers allocations */
-	struct mutex		lock;
-	struct list_head	list;
-};
-
-struct mlx5_bfreg_data {
-	struct mlx5_bfreg_head	reg_head;
-	struct mlx5_bfreg_head	wc_head;
-};
-
-struct mlx5_sq_bfreg {
-	void __iomem	       *map;
-	struct mlx5_uars_page  *up;
-	bool			wc;
-	u32			index;
-	unsigned int		offset;
-};
-
-struct mlx5_core_health {
-	struct health_buffer __iomem   *health;
-	__be32 __iomem		       *health_counter;
-	struct timer_list		timer;
-	u32				prev;
-	int				miss_counter;
-	bool				sick;
-	/* wq spinlock to synchronize draining */
-	spinlock_t			wq_lock;
-	struct workqueue_struct	       *wq;
-	unsigned long			flags;
-	struct work_struct		work;
-	struct delayed_work		recover_work;
-};
-
-struct mlx5_cq_table {
-	/* protect radix tree
-	 */
-	spinlock_t		lock;
-	struct radix_tree_root	tree;
-};
-
-struct mlx5_qp_table {
-	/* protect radix tree
-	 */
-	spinlock_t		lock;
-	struct radix_tree_root	tree;
-};
-
-struct mlx5_srq_table {
-	/* protect radix tree
-	 */
-	spinlock_t		lock;
-	struct radix_tree_root	tree;
-};
-
-struct mlx5_mkey_table {
-	/* protect radix tree
-	 */
-	rwlock_t		lock;
-	struct radix_tree_root	tree;
-};
-
-struct mlx5_vf_context {
-	int	enabled;
-};
-
-struct mlx5_core_sriov {
-	struct mlx5_vf_context	*vfs_ctx;
-	int			num_vfs;
-	int			enabled_vfs;
-};
-
-struct mlx5_irq_info {
-	cpumask_var_t mask;
-	char name[MLX5_MAX_IRQ_NAME];
-};
-
-struct mlx5_fc_stats {
-	struct rb_root counters;
-	struct list_head addlist;
-	/* protect addlist add/splice operations */
-	spinlock_t addlist_lock;
-
-	struct workqueue_struct *wq;
-	struct delayed_work work;
-	unsigned long next_query;
-};
-
-struct mlx5_eswitch;
-struct mlx5_lag;
-struct mlx5_pagefault;
-
-struct mlx5_rl_entry {
-	u32                     rate;
-	u16                     index;
-	u16                     refcount;
-};
-
-struct mlx5_rl_table {
-	/* protect rate limit table */
-	struct mutex            rl_lock;
-	u16                     max_size;
-	u32                     max_rate;
-	u32                     min_rate;
-	struct mlx5_rl_entry   *rl_entry;
-};
-
-enum port_module_event_status_type {
-	MLX5_MODULE_STATUS_PLUGGED   = 0x1,
-	MLX5_MODULE_STATUS_UNPLUGGED = 0x2,
-	MLX5_MODULE_STATUS_ERROR     = 0x3,
-	MLX5_MODULE_STATUS_NUM       = 0x3,
-};
-
-enum  port_module_event_error_type {
-	MLX5_MODULE_EVENT_ERROR_POWER_BUDGET_EXCEEDED,
-	MLX5_MODULE_EVENT_ERROR_LONG_RANGE_FOR_NON_MLNX_CABLE_MODULE,
-	MLX5_MODULE_EVENT_ERROR_BUS_STUCK,
-	MLX5_MODULE_EVENT_ERROR_NO_EEPROM_RETRY_TIMEOUT,
-	MLX5_MODULE_EVENT_ERROR_ENFORCE_PART_NUMBER_LIST,
-	MLX5_MODULE_EVENT_ERROR_UNKNOWN_IDENTIFIER,
-	MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE,
-	MLX5_MODULE_EVENT_ERROR_BAD_CABLE,
-	MLX5_MODULE_EVENT_ERROR_UNKNOWN,
-	MLX5_MODULE_EVENT_ERROR_NUM,
-};
-
-struct mlx5_port_module_event_stats {
-	u64 status_counters[MLX5_MODULE_STATUS_NUM];
-	u64 error_counters[MLX5_MODULE_EVENT_ERROR_NUM];
-};
-
-struct mlx5_priv {
-	char			name[MLX5_MAX_NAME_LEN];
-	struct mlx5_eq_table	eq_table;
-	struct msix_entry	*msix_arr;
-	struct mlx5_irq_info	*irq_info;
-
-	/* pages stuff */
-	struct workqueue_struct *pg_wq;
-	struct rb_root		page_root;
-	int			fw_pages;
-	atomic_t		reg_pages;
-	struct list_head	free_list;
-	int			vfs_pages;
-
-	struct mlx5_core_health health;
-
-	struct mlx5_srq_table	srq_table;
-
-	/* start: qp staff */
-	struct mlx5_qp_table	qp_table;
-	struct dentry	       *qp_debugfs;
-	struct dentry	       *eq_debugfs;
-	struct dentry	       *cq_debugfs;
-	struct dentry	       *cmdif_debugfs;
-	/* end: qp staff */
-
-	/* start: cq staff */
-	struct mlx5_cq_table	cq_table;
-	/* end: cq staff */
-
-	/* start: mkey staff */
-	struct mlx5_mkey_table	mkey_table;
-	/* end: mkey staff */
-
-	/* start: alloc staff */
-	/* protect buffer alocation according to numa node */
-	struct mutex            alloc_mutex;
-	int                     numa_node;
-
-	struct mutex            pgdir_mutex;
-	struct list_head        pgdir_list;
-	/* end: alloc staff */
-	struct dentry	       *dbg_root;
-
-	/* protect mkey key part */
-	spinlock_t		mkey_lock;
-	u8			mkey_key;
-
-	struct list_head        dev_list;
-	struct list_head        ctx_list;
-	spinlock_t              ctx_lock;
-
-	struct mlx5_flow_steering *steering;
-	struct mlx5_eswitch     *eswitch;
-	struct mlx5_core_sriov	sriov;
-	struct mlx5_lag		*lag;
-	unsigned long		pci_dev_data;
-	struct mlx5_fc_stats		fc_stats;
-	struct mlx5_rl_table            rl_table;
-
-	struct mlx5_port_module_event_stats  pme_stats;
-
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
-	void		      (*pfault)(struct mlx5_core_dev *dev,
-					void *context,
-					struct mlx5_pagefault *pfault);
-	void		       *pfault_ctx;
-	struct srcu_struct      pfault_srcu;
-#endif
-	struct mlx5_bfreg_data		bfregs;
-	struct mlx5_uars_page	       *uar;
-};
-
-enum mlx5_device_state {
-	MLX5_DEVICE_STATE_UP,
-	MLX5_DEVICE_STATE_INTERNAL_ERROR,
-};
-
-enum mlx5_interface_state {
-	MLX5_INTERFACE_STATE_DOWN = BIT(0),
-	MLX5_INTERFACE_STATE_UP = BIT(1),
-	MLX5_INTERFACE_STATE_SHUTDOWN = BIT(2),
-};
-
-enum mlx5_pci_status {
-	MLX5_PCI_STATUS_DISABLED,
-	MLX5_PCI_STATUS_ENABLED,
-};
-
-enum mlx5_pagefault_type_flags {
-	MLX5_PFAULT_REQUESTOR = 1 << 0,
-	MLX5_PFAULT_WRITE     = 1 << 1,
-	MLX5_PFAULT_RDMA      = 1 << 2,
-};
-
-/* Contains the details of a pagefault. */
-struct mlx5_pagefault {
-	u32			bytes_committed;
-	u32			token;
-	u8			event_subtype;
-	u8			type;
-	union {
-		/* Initiator or send message responder pagefault details. */
-		struct {
-			/* Received packet size, only valid for responders. */
-			u32	packet_size;
-			/*
-			 * Number of resource holding WQE, depends on type.
-			 */
-			u32	wq_num;
-			/*
-			 * WQE index. Refers to either the send queue or
-			 * receive queue, according to event_subtype.
-			 */
-			u16	wqe_index;
-		} wqe;
-		/* RDMA responder pagefault details */
-		struct {
-			u32	r_key;
-			/*
-			 * Received packet size, minimal size page fault
-			 * resolution required for forward progress.
-			 */
-			u32	packet_size;
-			u32	rdma_op_len;
-			u64	rdma_va;
-		} rdma;
-	};
-
-	struct mlx5_eq	       *eq;
-	struct work_struct	work;
-};
-
-struct mlx5_td {
-	struct list_head tirs_list;
-	u32              tdn;
-};
-
-struct mlx5e_resources {
-	u32                        pdn;
-	struct mlx5_td             td;
-	struct mlx5_core_mkey      mkey;
-};
-
-struct mlx5_core_dev {
-	struct pci_dev	       *pdev;
-	/* sync pci state */
-	struct mutex		pci_status_mutex;
-	enum mlx5_pci_status	pci_status;
-	u8			rev_id;
-	char			board_id[MLX5_BOARD_ID_LEN];
-	struct mlx5_cmd		cmd;
-	struct mlx5_port_caps	port_caps[MLX5_MAX_PORTS];
-	u32 hca_caps_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
-	u32 hca_caps_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
-	phys_addr_t		iseg_base;
-	struct mlx5_init_seg __iomem *iseg;
-	enum mlx5_device_state	state;
-	/* sync interface state */
-	struct mutex		intf_state_mutex;
-	unsigned long		intf_state;
-	void			(*event) (struct mlx5_core_dev *dev,
-					  enum mlx5_dev_event event,
-					  unsigned long param);
-	struct mlx5_priv	priv;
-	struct mlx5_profile	*profile;
-	atomic_t		num_qps;
-	u32			issi;
-	struct mlx5e_resources  mlx5e_res;
-#ifdef CONFIG_RFS_ACCEL
-	struct cpu_rmap         *rmap;
-#endif
-};
-
-struct mlx5_db {
-	__be32			*db;
-	union {
-		struct mlx5_db_pgdir		*pgdir;
-		struct mlx5_ib_user_db_page	*user_page;
-	}			u;
-	dma_addr_t		dma;
-	int			index;
-};
-
-enum {
-	MLX5_COMP_EQ_SIZE = 1024,
-};
-
-enum {
-	MLX5_PTYS_IB = 1 << 0,
-	MLX5_PTYS_EN = 1 << 2,
-};
-
-typedef void (*mlx5_cmd_cbk_t)(int status, void *context);
-
-struct mlx5_cmd_work_ent {
-	struct mlx5_cmd_msg    *in;
-	struct mlx5_cmd_msg    *out;
-	void		       *uout;
-	int			uout_size;
-	mlx5_cmd_cbk_t		callback;
-	struct delayed_work	cb_timeout_work;
-	void		       *context;
-	int			idx;
-	struct completion	done;
-	struct mlx5_cmd        *cmd;
-	struct work_struct	work;
-	struct mlx5_cmd_layout *lay;
-	int			ret;
-	int			page_queue;
-	u8			status;
-	u8			token;
-	u64			ts1;
-	u64			ts2;
-	u16			op;
-};
-
-struct mlx5_pas {
-	u64	pa;
-	u8	log_sz;
-};
-
-enum port_state_policy {
-	MLX5_POLICY_DOWN	= 0,
-	MLX5_POLICY_UP		= 1,
-	MLX5_POLICY_FOLLOW	= 2,
-	MLX5_POLICY_INVALID	= 0xffffffff
-};
-
-enum phy_port_state {
-	MLX5_AAA_111
-};
-
-struct mlx5_hca_vport_context {
-	u32			field_select;
-	bool			sm_virt_aware;
-	bool			has_smi;
-	bool			has_raw;
-	enum port_state_policy	policy;
-	enum phy_port_state	phys_state;
-	enum ib_port_state	vport_state;
-	u8			port_physical_state;
-	u64			sys_image_guid;
-	u64			port_guid;
-	u64			node_guid;
-	u32			cap_mask1;
-	u32			cap_mask1_perm;
-	u32			cap_mask2;
-	u32			cap_mask2_perm;
-	u16			lid;
-	u8			init_type_reply; /* bitmask: see ib spec 14.2.5.6 InitTypeReply */
-	u8			lmc;
-	u8			subnet_timeout;
-	u16			sm_lid;
-	u8			sm_sl;
-	u16			qkey_violation_counter;
-	u16			pkey_violation_counter;
-	bool			grh_required;
-};
-
-static inline void *mlx5_buf_offset(struct mlx5_buf *buf, int offset)
-{
-		return buf->direct.buf + offset;
-}
-
-extern struct workqueue_struct *mlx5_core_wq;
-
-#define STRUCT_FIELD(header, field) \
-	.struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field),      \
-	.struct_size_bytes   = sizeof((struct ib_unpacked_ ## header *)0)->field
-
-static inline struct mlx5_core_dev *pci2mlx5_core_dev(struct pci_dev *pdev)
-{
-	return pci_get_drvdata(pdev);
-}
-
-extern struct dentry *mlx5_debugfs_root;
-
-static inline u16 fw_rev_maj(struct mlx5_core_dev *dev)
-{
-	return ioread32be(&dev->iseg->fw_rev) & 0xffff;
-}
-
-static inline u16 fw_rev_min(struct mlx5_core_dev *dev)
-{
-	return ioread32be(&dev->iseg->fw_rev) >> 16;
-}
-
-static inline u16 fw_rev_sub(struct mlx5_core_dev *dev)
-{
-	return ioread32be(&dev->iseg->cmdif_rev_fw_sub) & 0xffff;
-}
-
-static inline u16 cmdif_rev(struct mlx5_core_dev *dev)
-{
-	return ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
-}
-
-static inline void *mlx5_vzalloc(unsigned long size)
-{
-	void *rtn;
-
-	rtn = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
-	if (!rtn)
-		rtn = vzalloc(size);
-	return rtn;
-}
-
-static inline u32 mlx5_base_mkey(const u32 key)
-{
-	return key & 0xffffff00u;
-}
-
-int mlx5_cmd_init(struct mlx5_core_dev *dev);
-void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
-void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
-void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
-
-int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
-		  int out_size);
-int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
-		     void *out, int out_size, mlx5_cmd_cbk_t callback,
-		     void *context);
-void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome);
-
-int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
-int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
-int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
-void mlx5_health_cleanup(struct mlx5_core_dev *dev);
-int mlx5_health_init(struct mlx5_core_dev *dev);
-void mlx5_start_health_poll(struct mlx5_core_dev *dev);
-void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
-void mlx5_drain_health_wq(struct mlx5_core_dev *dev);
-int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
-			struct mlx5_buf *buf, int node);
-int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf);
-void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf);
-int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
-			     struct mlx5_frag_buf *buf, int node);
-void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf);
-struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
-						      gfp_t flags, int npages);
-void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev,
-				 struct mlx5_cmd_mailbox *head);
-int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
-			 struct mlx5_srq_attr *in);
-int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq);
-int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
-			struct mlx5_srq_attr *out);
-int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
-		      u16 lwm, int is_srq);
-void mlx5_init_mkey_table(struct mlx5_core_dev *dev);
-void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev);
-int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
-			     struct mlx5_core_mkey *mkey,
-			     u32 *in, int inlen,
-			     u32 *out, int outlen,
-			     mlx5_cmd_cbk_t callback, void *context);
-int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
-			  struct mlx5_core_mkey *mkey,
-			  u32 *in, int inlen);
-int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
-			   struct mlx5_core_mkey *mkey);
-int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey,
-			 u32 *out, int outlen);
-int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *_mkey,
-			     u32 *mkey);
-int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn);
-int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn);
-int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
-		      u16 opmod, u8 port);
-void mlx5_pagealloc_init(struct mlx5_core_dev *dev);
-void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
-int mlx5_pagealloc_start(struct mlx5_core_dev *dev);
-void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
-void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
-				 s32 npages);
-int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot);
-int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev);
-void mlx5_register_debugfs(void);
-void mlx5_unregister_debugfs(void);
-int mlx5_eq_init(struct mlx5_core_dev *dev);
-void mlx5_eq_cleanup(struct mlx5_core_dev *dev);
-void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas);
-void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas);
-void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn);
-void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type);
-void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type);
-struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn);
-void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec);
-void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type);
-int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
-		       int nent, u64 mask, const char *name,
-		       enum mlx5_eq_type type);
-int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
-int mlx5_start_eqs(struct mlx5_core_dev *dev);
-int mlx5_stop_eqs(struct mlx5_core_dev *dev);
-int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
-		    unsigned int *irqn);
-int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
-int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
-
-int mlx5_qp_debugfs_init(struct mlx5_core_dev *dev);
-void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev);
-int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
-			 int size_in, void *data_out, int size_out,
-			 u16 reg_num, int arg, int write);
-
-int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
-void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
-int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
-		       u32 *out, int outlen);
-int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev);
-void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev);
-int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
-void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
-int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db);
-int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db,
-		       int node);
-void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db);
-
-const char *mlx5_command_str(int command);
-int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev);
-void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev);
-int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn,
-			 int npsvs, u32 *sig_index);
-int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num);
-void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common);
-int mlx5_query_odp_caps(struct mlx5_core_dev *dev,
-			struct mlx5_odp_caps *odp_caps);
-int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev,
-			     u8 port_num, void *out, size_t sz);
-#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
-int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 token,
-				u32 wq_num, u8 type, int error);
-#endif
-
-int mlx5_init_rl_table(struct mlx5_core_dev *dev);
-void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev);
-int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u32 rate, u16 *index);
-void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, u32 rate);
-bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate);
-int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
-		     bool map_wc, bool fast_path);
-void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg);
-
-static inline int fw_initializing(struct mlx5_core_dev *dev)
-{
-	return ioread32be(&dev->iseg->initializing) >> 31;
-}
-
-static inline u32 mlx5_mkey_to_idx(u32 mkey)
-{
-	return mkey >> 8;
-}
-
-static inline u32 mlx5_idx_to_mkey(u32 mkey_idx)
-{
-	return mkey_idx << 8;
-}
-
-static inline u8 mlx5_mkey_variant(u32 mkey)
-{
-	return mkey & 0xff;
-}
-
-enum {
-	MLX5_PROF_MASK_QP_SIZE		= (u64)1 << 0,
-	MLX5_PROF_MASK_MR_CACHE		= (u64)1 << 1,
-};
-
-enum {
-	MAX_MR_CACHE_ENTRIES    = 21,
-};
-
-enum {
-	MLX5_INTERFACE_PROTOCOL_IB  = 0,
-	MLX5_INTERFACE_PROTOCOL_ETH = 1,
-};
-
-struct mlx5_interface {
-	void *			(*add)(struct mlx5_core_dev *dev);
-	void			(*remove)(struct mlx5_core_dev *dev, void *context);
-	int			(*attach)(struct mlx5_core_dev *dev, void *context);
-	void			(*detach)(struct mlx5_core_dev *dev, void *context);
-	void			(*event)(struct mlx5_core_dev *dev, void *context,
-					 enum mlx5_dev_event event, unsigned long param);
-	void			(*pfault)(struct mlx5_core_dev *dev,
-					  void *context,
-					  struct mlx5_pagefault *pfault);
-	void *                  (*get_dev)(void *context);
-	int			protocol;
-	struct list_head	list;
-};
-
-void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol);
-int mlx5_register_interface(struct mlx5_interface *intf);
-void mlx5_unregister_interface(struct mlx5_interface *intf);
-int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id);
-
-int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev);
-int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev);
-bool mlx5_lag_is_active(struct mlx5_core_dev *dev);
-struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev);
-struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev);
-void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up);
-
-struct mlx5_profile {
-	u64	mask;
-	u8	log_max_qp;
-	struct {
-		int	size;
-		int	limit;
-	} mr_cache[MAX_MR_CACHE_ENTRIES];
-};
-
-enum {
-	MLX5_PCI_DEV_IS_VF		= 1 << 0,
-};
-
-static inline int mlx5_core_is_pf(struct mlx5_core_dev *dev)
-{
-	return !(dev->priv.pci_dev_data & MLX5_PCI_DEV_IS_VF);
-}
-
-static inline int mlx5_get_gid_table_len(u16 param)
-{
-	if (param > 4) {
-		pr_warn("gid table length is zero\n");
-		return 0;
-	}
-
-	return 8 * (1 << param);
-}
-
-static inline bool mlx5_rl_is_supported(struct mlx5_core_dev *dev)
-{
-	return !!(dev->priv.rl_table.max_size);
-}
-
-enum {
-	MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32,
-};
-
-#endif /* MLX5_DRIVER_H */
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
deleted file mode 100644
index 949b24b..0000000
--- a/include/linux/mlx5/fs.h
+++ /dev/null
@@ -1,160 +0,0 @@
-/*
- * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef _MLX5_FS_
-#define _MLX5_FS_
-
-#include <linux/mlx5/driver.h>
-#include <linux/mlx5/mlx5_ifc.h>
-
-#define MLX5_FS_DEFAULT_FLOW_TAG 0x0
-
-enum {
-	MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO	= 1 << 16,
-};
-
-enum {
-	MLX5_FLOW_TABLE_TUNNEL_EN = BIT(0),
-};
-
-#define LEFTOVERS_RULE_NUM	 2
-static inline void build_leftovers_ft_param(int *priority,
-					    int *n_ent,
-					    int *n_grp)
-{
-	*priority = 0; /* Priority of leftovers_prio-0 */
-	*n_ent = LEFTOVERS_RULE_NUM;
-	*n_grp = LEFTOVERS_RULE_NUM;
-}
-
-enum mlx5_flow_namespace_type {
-	MLX5_FLOW_NAMESPACE_BYPASS,
-	MLX5_FLOW_NAMESPACE_LAG,
-	MLX5_FLOW_NAMESPACE_OFFLOADS,
-	MLX5_FLOW_NAMESPACE_ETHTOOL,
-	MLX5_FLOW_NAMESPACE_KERNEL,
-	MLX5_FLOW_NAMESPACE_LEFTOVERS,
-	MLX5_FLOW_NAMESPACE_ANCHOR,
-	MLX5_FLOW_NAMESPACE_FDB,
-	MLX5_FLOW_NAMESPACE_ESW_EGRESS,
-	MLX5_FLOW_NAMESPACE_ESW_INGRESS,
-	MLX5_FLOW_NAMESPACE_SNIFFER_RX,
-	MLX5_FLOW_NAMESPACE_SNIFFER_TX,
-};
-
-struct mlx5_flow_table;
-struct mlx5_flow_group;
-struct mlx5_flow_namespace;
-struct mlx5_flow_handle;
-
-struct mlx5_flow_spec {
-	u8   match_criteria_enable;
-	u32  match_criteria[MLX5_ST_SZ_DW(fte_match_param)];
-	u32  match_value[MLX5_ST_SZ_DW(fte_match_param)];
-};
-
-struct mlx5_flow_destination {
-	enum mlx5_flow_destination_type	type;
-	union {
-		u32			tir_num;
-		struct mlx5_flow_table	*ft;
-		u32			vport_num;
-		struct mlx5_fc		*counter;
-	};
-};
-
-struct mlx5_flow_namespace *
-mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
-			enum mlx5_flow_namespace_type type);
-
-struct mlx5_flow_table *
-mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
-				    int prio,
-				    int num_flow_table_entries,
-				    int max_num_groups,
-				    u32 level,
-				    u32 flags);
-
-struct mlx5_flow_table *
-mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
-		       int prio,
-		       int num_flow_table_entries,
-		       u32 level,
-		       u32 flags);
-struct mlx5_flow_table *
-mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
-			     int prio,
-			     int num_flow_table_entries,
-			     u32 level, u16 vport);
-struct mlx5_flow_table *mlx5_create_lag_demux_flow_table(
-					       struct mlx5_flow_namespace *ns,
-					       int prio, u32 level);
-int mlx5_destroy_flow_table(struct mlx5_flow_table *ft);
-
-/* inbox should be set with the following values:
- * start_flow_index
- * end_flow_index
- * match_criteria_enable
- * match_criteria
- */
-struct mlx5_flow_group *
-mlx5_create_flow_group(struct mlx5_flow_table *ft, u32 *in);
-void mlx5_destroy_flow_group(struct mlx5_flow_group *fg);
-
-struct mlx5_flow_act {
-	u32 action;
-	u32 flow_tag;
-	u32 encap_id;
-};
-
-/* Single destination per rule.
- * Group ID is implied by the match criteria.
- */
-struct mlx5_flow_handle *
-mlx5_add_flow_rules(struct mlx5_flow_table *ft,
-		    struct mlx5_flow_spec *spec,
-		    struct mlx5_flow_act *flow_act,
-		    struct mlx5_flow_destination *dest,
-		    int dest_num);
-void mlx5_del_flow_rules(struct mlx5_flow_handle *fr);
-
-int mlx5_modify_rule_destination(struct mlx5_flow_handle *handler,
-				 struct mlx5_flow_destination *new_dest,
-				 struct mlx5_flow_destination *old_dest);
-
-struct mlx5_fc *mlx5_flow_rule_counter(struct mlx5_flow_handle *handler);
-struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging);
-void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter);
-void mlx5_fc_query_cached(struct mlx5_fc *counter,
-			  u64 *bytes, u64 *packets, u64 *lastuse);
-
-#endif
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
deleted file mode 100644
index 1223fef..0000000
--- a/include/linux/mlx5/mlx5_ifc.h
+++ /dev/null
@@ -1,8287 +0,0 @@
-/*
- * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
-*/
-#ifndef MLX5_IFC_H
-#define MLX5_IFC_H
-
-enum {
-	MLX5_EVENT_TYPE_CODING_COMPLETION_EVENTS                   = 0x0,
-	MLX5_EVENT_TYPE_CODING_PATH_MIGRATED_SUCCEEDED             = 0x1,
-	MLX5_EVENT_TYPE_CODING_COMMUNICATION_ESTABLISHED           = 0x2,
-	MLX5_EVENT_TYPE_CODING_SEND_QUEUE_DRAINED                  = 0x3,
-	MLX5_EVENT_TYPE_CODING_LAST_WQE_REACHED                    = 0x13,
-	MLX5_EVENT_TYPE_CODING_SRQ_LIMIT                           = 0x14,
-	MLX5_EVENT_TYPE_CODING_DCT_ALL_CONNECTIONS_CLOSED          = 0x1c,
-	MLX5_EVENT_TYPE_CODING_DCT_ACCESS_KEY_VIOLATION            = 0x1d,
-	MLX5_EVENT_TYPE_CODING_CQ_ERROR                            = 0x4,
-	MLX5_EVENT_TYPE_CODING_LOCAL_WQ_CATASTROPHIC_ERROR         = 0x5,
-	MLX5_EVENT_TYPE_CODING_PATH_MIGRATION_FAILED               = 0x7,
-	MLX5_EVENT_TYPE_CODING_PAGE_FAULT_EVENT                    = 0xc,
-	MLX5_EVENT_TYPE_CODING_INVALID_REQUEST_LOCAL_WQ_ERROR      = 0x10,
-	MLX5_EVENT_TYPE_CODING_LOCAL_ACCESS_VIOLATION_WQ_ERROR     = 0x11,
-	MLX5_EVENT_TYPE_CODING_LOCAL_SRQ_CATASTROPHIC_ERROR        = 0x12,
-	MLX5_EVENT_TYPE_CODING_INTERNAL_ERROR                      = 0x8,
-	MLX5_EVENT_TYPE_CODING_PORT_STATE_CHANGE                   = 0x9,
-	MLX5_EVENT_TYPE_CODING_GPIO_EVENT                          = 0x15,
-	MLX5_EVENT_TYPE_CODING_REMOTE_CONFIGURATION_PROTOCOL_EVENT = 0x19,
-	MLX5_EVENT_TYPE_CODING_DOORBELL_BLUEFLAME_CONGESTION_EVENT = 0x1a,
-	MLX5_EVENT_TYPE_CODING_STALL_VL_EVENT                      = 0x1b,
-	MLX5_EVENT_TYPE_CODING_DROPPED_PACKET_LOGGED_EVENT         = 0x1f,
-	MLX5_EVENT_TYPE_CODING_COMMAND_INTERFACE_COMPLETION        = 0xa,
-	MLX5_EVENT_TYPE_CODING_PAGE_REQUEST                        = 0xb
-};
-
-enum {
-	MLX5_MODIFY_TIR_BITMASK_LRO                   = 0x0,
-	MLX5_MODIFY_TIR_BITMASK_INDIRECT_TABLE        = 0x1,
-	MLX5_MODIFY_TIR_BITMASK_HASH                  = 0x2,
-	MLX5_MODIFY_TIR_BITMASK_TUNNELED_OFFLOAD_EN   = 0x3
-};
-
-enum {
-	MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE        = 0x0,
-	MLX5_SET_HCA_CAP_OP_MOD_ATOMIC                = 0x3,
-};
-
-enum {
-	MLX5_CMD_OP_QUERY_HCA_CAP                 = 0x100,
-	MLX5_CMD_OP_QUERY_ADAPTER                 = 0x101,
-	MLX5_CMD_OP_INIT_HCA                      = 0x102,
-	MLX5_CMD_OP_TEARDOWN_HCA                  = 0x103,
-	MLX5_CMD_OP_ENABLE_HCA                    = 0x104,
-	MLX5_CMD_OP_DISABLE_HCA                   = 0x105,
-	MLX5_CMD_OP_QUERY_PAGES                   = 0x107,
-	MLX5_CMD_OP_MANAGE_PAGES                  = 0x108,
-	MLX5_CMD_OP_SET_HCA_CAP                   = 0x109,
-	MLX5_CMD_OP_QUERY_ISSI                    = 0x10a,
-	MLX5_CMD_OP_SET_ISSI                      = 0x10b,
-	MLX5_CMD_OP_SET_DRIVER_VERSION            = 0x10d,
-	MLX5_CMD_OP_CREATE_MKEY                   = 0x200,
-	MLX5_CMD_OP_QUERY_MKEY                    = 0x201,
-	MLX5_CMD_OP_DESTROY_MKEY                  = 0x202,
-	MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS        = 0x203,
-	MLX5_CMD_OP_PAGE_FAULT_RESUME             = 0x204,
-	MLX5_CMD_OP_CREATE_EQ                     = 0x301,
-	MLX5_CMD_OP_DESTROY_EQ                    = 0x302,
-	MLX5_CMD_OP_QUERY_EQ                      = 0x303,
-	MLX5_CMD_OP_GEN_EQE                       = 0x304,
-	MLX5_CMD_OP_CREATE_CQ                     = 0x400,
-	MLX5_CMD_OP_DESTROY_CQ                    = 0x401,
-	MLX5_CMD_OP_QUERY_CQ                      = 0x402,
-	MLX5_CMD_OP_MODIFY_CQ                     = 0x403,
-	MLX5_CMD_OP_CREATE_QP                     = 0x500,
-	MLX5_CMD_OP_DESTROY_QP                    = 0x501,
-	MLX5_CMD_OP_RST2INIT_QP                   = 0x502,
-	MLX5_CMD_OP_INIT2RTR_QP                   = 0x503,
-	MLX5_CMD_OP_RTR2RTS_QP                    = 0x504,
-	MLX5_CMD_OP_RTS2RTS_QP                    = 0x505,
-	MLX5_CMD_OP_SQERR2RTS_QP                  = 0x506,
-	MLX5_CMD_OP_2ERR_QP                       = 0x507,
-	MLX5_CMD_OP_2RST_QP                       = 0x50a,
-	MLX5_CMD_OP_QUERY_QP                      = 0x50b,
-	MLX5_CMD_OP_SQD_RTS_QP                    = 0x50c,
-	MLX5_CMD_OP_INIT2INIT_QP                  = 0x50e,
-	MLX5_CMD_OP_CREATE_PSV                    = 0x600,
-	MLX5_CMD_OP_DESTROY_PSV                   = 0x601,
-	MLX5_CMD_OP_CREATE_SRQ                    = 0x700,
-	MLX5_CMD_OP_DESTROY_SRQ                   = 0x701,
-	MLX5_CMD_OP_QUERY_SRQ                     = 0x702,
-	MLX5_CMD_OP_ARM_RQ                        = 0x703,
-	MLX5_CMD_OP_CREATE_XRC_SRQ                = 0x705,
-	MLX5_CMD_OP_DESTROY_XRC_SRQ               = 0x706,
-	MLX5_CMD_OP_QUERY_XRC_SRQ                 = 0x707,
-	MLX5_CMD_OP_ARM_XRC_SRQ                   = 0x708,
-	MLX5_CMD_OP_CREATE_DCT                    = 0x710,
-	MLX5_CMD_OP_DESTROY_DCT                   = 0x711,
-	MLX5_CMD_OP_DRAIN_DCT                     = 0x712,
-	MLX5_CMD_OP_QUERY_DCT                     = 0x713,
-	MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION     = 0x714,
-	MLX5_CMD_OP_CREATE_XRQ                    = 0x717,
-	MLX5_CMD_OP_DESTROY_XRQ                   = 0x718,
-	MLX5_CMD_OP_QUERY_XRQ                     = 0x719,
-	MLX5_CMD_OP_ARM_XRQ                       = 0x71a,
-	MLX5_CMD_OP_QUERY_VPORT_STATE             = 0x750,
-	MLX5_CMD_OP_MODIFY_VPORT_STATE            = 0x751,
-	MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT       = 0x752,
-	MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT      = 0x753,
-	MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT       = 0x754,
-	MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT      = 0x755,
-	MLX5_CMD_OP_QUERY_ROCE_ADDRESS            = 0x760,
-	MLX5_CMD_OP_SET_ROCE_ADDRESS              = 0x761,
-	MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT       = 0x762,
-	MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT      = 0x763,
-	MLX5_CMD_OP_QUERY_HCA_VPORT_GID           = 0x764,
-	MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY          = 0x765,
-	MLX5_CMD_OP_QUERY_VPORT_COUNTER           = 0x770,
-	MLX5_CMD_OP_ALLOC_Q_COUNTER               = 0x771,
-	MLX5_CMD_OP_DEALLOC_Q_COUNTER             = 0x772,
-	MLX5_CMD_OP_QUERY_Q_COUNTER               = 0x773,
-	MLX5_CMD_OP_SET_RATE_LIMIT                = 0x780,
-	MLX5_CMD_OP_QUERY_RATE_LIMIT              = 0x781,
-	MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT      = 0x782,
-	MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT     = 0x783,
-	MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT       = 0x784,
-	MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT      = 0x785,
-	MLX5_CMD_OP_CREATE_QOS_PARA_VPORT         = 0x786,
-	MLX5_CMD_OP_DESTROY_QOS_PARA_VPORT        = 0x787,
-	MLX5_CMD_OP_ALLOC_PD                      = 0x800,
-	MLX5_CMD_OP_DEALLOC_PD                    = 0x801,
-	MLX5_CMD_OP_ALLOC_UAR                     = 0x802,
-	MLX5_CMD_OP_DEALLOC_UAR                   = 0x803,
-	MLX5_CMD_OP_CONFIG_INT_MODERATION         = 0x804,
-	MLX5_CMD_OP_ACCESS_REG                    = 0x805,
-	MLX5_CMD_OP_ATTACH_TO_MCG                 = 0x806,
-	MLX5_CMD_OP_DETACH_FROM_MCG               = 0x807,
-	MLX5_CMD_OP_GET_DROPPED_PACKET_LOG        = 0x80a,
-	MLX5_CMD_OP_MAD_IFC                       = 0x50d,
-	MLX5_CMD_OP_QUERY_MAD_DEMUX               = 0x80b,
-	MLX5_CMD_OP_SET_MAD_DEMUX                 = 0x80c,
-	MLX5_CMD_OP_NOP                           = 0x80d,
-	MLX5_CMD_OP_ALLOC_XRCD                    = 0x80e,
-	MLX5_CMD_OP_DEALLOC_XRCD                  = 0x80f,
-	MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN        = 0x816,
-	MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN      = 0x817,
-	MLX5_CMD_OP_QUERY_CONG_STATUS             = 0x822,
-	MLX5_CMD_OP_MODIFY_CONG_STATUS            = 0x823,
-	MLX5_CMD_OP_QUERY_CONG_PARAMS             = 0x824,
-	MLX5_CMD_OP_MODIFY_CONG_PARAMS            = 0x825,
-	MLX5_CMD_OP_QUERY_CONG_STATISTICS         = 0x826,
-	MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT           = 0x827,
-	MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT        = 0x828,
-	MLX5_CMD_OP_SET_L2_TABLE_ENTRY            = 0x829,
-	MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY          = 0x82a,
-	MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY         = 0x82b,
-	MLX5_CMD_OP_SET_WOL_ROL                   = 0x830,
-	MLX5_CMD_OP_QUERY_WOL_ROL                 = 0x831,
-	MLX5_CMD_OP_CREATE_LAG                    = 0x840,
-	MLX5_CMD_OP_MODIFY_LAG                    = 0x841,
-	MLX5_CMD_OP_QUERY_LAG                     = 0x842,
-	MLX5_CMD_OP_DESTROY_LAG                   = 0x843,
-	MLX5_CMD_OP_CREATE_VPORT_LAG              = 0x844,
-	MLX5_CMD_OP_DESTROY_VPORT_LAG             = 0x845,
-	MLX5_CMD_OP_CREATE_TIR                    = 0x900,
-	MLX5_CMD_OP_MODIFY_TIR                    = 0x901,
-	MLX5_CMD_OP_DESTROY_TIR                   = 0x902,
-	MLX5_CMD_OP_QUERY_TIR                     = 0x903,
-	MLX5_CMD_OP_CREATE_SQ                     = 0x904,
-	MLX5_CMD_OP_MODIFY_SQ                     = 0x905,
-	MLX5_CMD_OP_DESTROY_SQ                    = 0x906,
-	MLX5_CMD_OP_QUERY_SQ                      = 0x907,
-	MLX5_CMD_OP_CREATE_RQ                     = 0x908,
-	MLX5_CMD_OP_MODIFY_RQ                     = 0x909,
-	MLX5_CMD_OP_DESTROY_RQ                    = 0x90a,
-	MLX5_CMD_OP_QUERY_RQ                      = 0x90b,
-	MLX5_CMD_OP_CREATE_RMP                    = 0x90c,
-	MLX5_CMD_OP_MODIFY_RMP                    = 0x90d,
-	MLX5_CMD_OP_DESTROY_RMP                   = 0x90e,
-	MLX5_CMD_OP_QUERY_RMP                     = 0x90f,
-	MLX5_CMD_OP_CREATE_TIS                    = 0x912,
-	MLX5_CMD_OP_MODIFY_TIS                    = 0x913,
-	MLX5_CMD_OP_DESTROY_TIS                   = 0x914,
-	MLX5_CMD_OP_QUERY_TIS                     = 0x915,
-	MLX5_CMD_OP_CREATE_RQT                    = 0x916,
-	MLX5_CMD_OP_MODIFY_RQT                    = 0x917,
-	MLX5_CMD_OP_DESTROY_RQT                   = 0x918,
-	MLX5_CMD_OP_QUERY_RQT                     = 0x919,
-	MLX5_CMD_OP_SET_FLOW_TABLE_ROOT		  = 0x92f,
-	MLX5_CMD_OP_CREATE_FLOW_TABLE             = 0x930,
-	MLX5_CMD_OP_DESTROY_FLOW_TABLE            = 0x931,
-	MLX5_CMD_OP_QUERY_FLOW_TABLE              = 0x932,
-	MLX5_CMD_OP_CREATE_FLOW_GROUP             = 0x933,
-	MLX5_CMD_OP_DESTROY_FLOW_GROUP            = 0x934,
-	MLX5_CMD_OP_QUERY_FLOW_GROUP              = 0x935,
-	MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY          = 0x936,
-	MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY        = 0x937,
-	MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY       = 0x938,
-	MLX5_CMD_OP_ALLOC_FLOW_COUNTER            = 0x939,
-	MLX5_CMD_OP_DEALLOC_FLOW_COUNTER          = 0x93a,
-	MLX5_CMD_OP_QUERY_FLOW_COUNTER            = 0x93b,
-	MLX5_CMD_OP_MODIFY_FLOW_TABLE             = 0x93c,
-	MLX5_CMD_OP_ALLOC_ENCAP_HEADER            = 0x93d,
-	MLX5_CMD_OP_DEALLOC_ENCAP_HEADER          = 0x93e,
-	MLX5_CMD_OP_MAX
-};
-
-struct mlx5_ifc_flow_table_fields_supported_bits {
-	u8         outer_dmac[0x1];
-	u8         outer_smac[0x1];
-	u8         outer_ether_type[0x1];
-	u8         reserved_at_3[0x1];
-	u8         outer_first_prio[0x1];
-	u8         outer_first_cfi[0x1];
-	u8         outer_first_vid[0x1];
-	u8         reserved_at_7[0x1];
-	u8         outer_second_prio[0x1];
-	u8         outer_second_cfi[0x1];
-	u8         outer_second_vid[0x1];
-	u8         reserved_at_b[0x1];
-	u8         outer_sip[0x1];
-	u8         outer_dip[0x1];
-	u8         outer_frag[0x1];
-	u8         outer_ip_protocol[0x1];
-	u8         outer_ip_ecn[0x1];
-	u8         outer_ip_dscp[0x1];
-	u8         outer_udp_sport[0x1];
-	u8         outer_udp_dport[0x1];
-	u8         outer_tcp_sport[0x1];
-	u8         outer_tcp_dport[0x1];
-	u8         outer_tcp_flags[0x1];
-	u8         outer_gre_protocol[0x1];
-	u8         outer_gre_key[0x1];
-	u8         outer_vxlan_vni[0x1];
-	u8         reserved_at_1a[0x5];
-	u8         source_eswitch_port[0x1];
-
-	u8         inner_dmac[0x1];
-	u8         inner_smac[0x1];
-	u8         inner_ether_type[0x1];
-	u8         reserved_at_23[0x1];
-	u8         inner_first_prio[0x1];
-	u8         inner_first_cfi[0x1];
-	u8         inner_first_vid[0x1];
-	u8         reserved_at_27[0x1];
-	u8         inner_second_prio[0x1];
-	u8         inner_second_cfi[0x1];
-	u8         inner_second_vid[0x1];
-	u8         reserved_at_2b[0x1];
-	u8         inner_sip[0x1];
-	u8         inner_dip[0x1];
-	u8         inner_frag[0x1];
-	u8         inner_ip_protocol[0x1];
-	u8         inner_ip_ecn[0x1];
-	u8         inner_ip_dscp[0x1];
-	u8         inner_udp_sport[0x1];
-	u8         inner_udp_dport[0x1];
-	u8         inner_tcp_sport[0x1];
-	u8         inner_tcp_dport[0x1];
-	u8         inner_tcp_flags[0x1];
-	u8         reserved_at_37[0x9];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_flow_table_prop_layout_bits {
-	u8         ft_support[0x1];
-	u8         reserved_at_1[0x1];
-	u8         flow_counter[0x1];
-	u8	   flow_modify_en[0x1];
-	u8         modify_root[0x1];
-	u8         identified_miss_table_mode[0x1];
-	u8         flow_table_modify[0x1];
-	u8         encap[0x1];
-	u8         decap[0x1];
-	u8         reserved_at_9[0x17];
-
-	u8         reserved_at_20[0x2];
-	u8         log_max_ft_size[0x6];
-	u8         reserved_at_28[0x10];
-	u8         max_ft_level[0x8];
-
-	u8         reserved_at_40[0x20];
-
-	u8         reserved_at_60[0x18];
-	u8         log_max_ft_num[0x8];
-
-	u8         reserved_at_80[0x18];
-	u8         log_max_destination[0x8];
-
-	u8         reserved_at_a0[0x18];
-	u8         log_max_flow[0x8];
-
-	u8         reserved_at_c0[0x40];
-
-	struct mlx5_ifc_flow_table_fields_supported_bits ft_field_support;
-
-	struct mlx5_ifc_flow_table_fields_supported_bits ft_field_bitmask_support;
-};
-
-struct mlx5_ifc_odp_per_transport_service_cap_bits {
-	u8         send[0x1];
-	u8         receive[0x1];
-	u8         write[0x1];
-	u8         read[0x1];
-	u8         atomic[0x1];
-	u8         srq_receive[0x1];
-	u8         reserved_at_6[0x1a];
-};
-
-struct mlx5_ifc_ipv4_layout_bits {
-	u8         reserved_at_0[0x60];
-
-	u8         ipv4[0x20];
-};
-
-struct mlx5_ifc_ipv6_layout_bits {
-	u8         ipv6[16][0x8];
-};
-
-union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits {
-	struct mlx5_ifc_ipv6_layout_bits ipv6_layout;
-	struct mlx5_ifc_ipv4_layout_bits ipv4_layout;
-	u8         reserved_at_0[0x80];
-};
-
-struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
-	u8         smac_47_16[0x20];
-
-	u8         smac_15_0[0x10];
-	u8         ethertype[0x10];
-
-	u8         dmac_47_16[0x20];
-
-	u8         dmac_15_0[0x10];
-	u8         first_prio[0x3];
-	u8         first_cfi[0x1];
-	u8         first_vid[0xc];
-
-	u8         ip_protocol[0x8];
-	u8         ip_dscp[0x6];
-	u8         ip_ecn[0x2];
-	u8         vlan_tag[0x1];
-	u8         reserved_at_91[0x1];
-	u8         frag[0x1];
-	u8         reserved_at_93[0x4];
-	u8         tcp_flags[0x9];
-
-	u8         tcp_sport[0x10];
-	u8         tcp_dport[0x10];
-
-	u8         reserved_at_c0[0x20];
-
-	u8         udp_sport[0x10];
-	u8         udp_dport[0x10];
-
-	union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits src_ipv4_src_ipv6;
-
-	union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits dst_ipv4_dst_ipv6;
-};
-
-struct mlx5_ifc_fte_match_set_misc_bits {
-	u8         reserved_at_0[0x8];
-	u8         source_sqn[0x18];
-
-	u8         reserved_at_20[0x10];
-	u8         source_port[0x10];
-
-	u8         outer_second_prio[0x3];
-	u8         outer_second_cfi[0x1];
-	u8         outer_second_vid[0xc];
-	u8         inner_second_prio[0x3];
-	u8         inner_second_cfi[0x1];
-	u8         inner_second_vid[0xc];
-
-	u8         outer_second_vlan_tag[0x1];
-	u8         inner_second_vlan_tag[0x1];
-	u8         reserved_at_62[0xe];
-	u8         gre_protocol[0x10];
-
-	u8         gre_key_h[0x18];
-	u8         gre_key_l[0x8];
-
-	u8         vxlan_vni[0x18];
-	u8         reserved_at_b8[0x8];
-
-	u8         reserved_at_c0[0x20];
-
-	u8         reserved_at_e0[0xc];
-	u8         outer_ipv6_flow_label[0x14];
-
-	u8         reserved_at_100[0xc];
-	u8         inner_ipv6_flow_label[0x14];
-
-	u8         reserved_at_120[0xe0];
-};
-
-struct mlx5_ifc_cmd_pas_bits {
-	u8         pa_h[0x20];
-
-	u8         pa_l[0x14];
-	u8         reserved_at_34[0xc];
-};
-
-struct mlx5_ifc_uint64_bits {
-	u8         hi[0x20];
-
-	u8         lo[0x20];
-};
-
-enum {
-	MLX5_ADS_STAT_RATE_NO_LIMIT  = 0x0,
-	MLX5_ADS_STAT_RATE_2_5GBPS   = 0x7,
-	MLX5_ADS_STAT_RATE_10GBPS    = 0x8,
-	MLX5_ADS_STAT_RATE_30GBPS    = 0x9,
-	MLX5_ADS_STAT_RATE_5GBPS     = 0xa,
-	MLX5_ADS_STAT_RATE_20GBPS    = 0xb,
-	MLX5_ADS_STAT_RATE_40GBPS    = 0xc,
-	MLX5_ADS_STAT_RATE_60GBPS    = 0xd,
-	MLX5_ADS_STAT_RATE_80GBPS    = 0xe,
-	MLX5_ADS_STAT_RATE_120GBPS   = 0xf,
-};
-
-struct mlx5_ifc_ads_bits {
-	u8         fl[0x1];
-	u8         free_ar[0x1];
-	u8         reserved_at_2[0xe];
-	u8         pkey_index[0x10];
-
-	u8         reserved_at_20[0x8];
-	u8         grh[0x1];
-	u8         mlid[0x7];
-	u8         rlid[0x10];
-
-	u8         ack_timeout[0x5];
-	u8         reserved_at_45[0x3];
-	u8         src_addr_index[0x8];
-	u8         reserved_at_50[0x4];
-	u8         stat_rate[0x4];
-	u8         hop_limit[0x8];
-
-	u8         reserved_at_60[0x4];
-	u8         tclass[0x8];
-	u8         flow_label[0x14];
-
-	u8         rgid_rip[16][0x8];
-
-	u8         reserved_at_100[0x4];
-	u8         f_dscp[0x1];
-	u8         f_ecn[0x1];
-	u8         reserved_at_106[0x1];
-	u8         f_eth_prio[0x1];
-	u8         ecn[0x2];
-	u8         dscp[0x6];
-	u8         udp_sport[0x10];
-
-	u8         dei_cfi[0x1];
-	u8         eth_prio[0x3];
-	u8         sl[0x4];
-	u8         port[0x8];
-	u8         rmac_47_32[0x10];
-
-	u8         rmac_31_0[0x20];
-};
-
-struct mlx5_ifc_flow_table_nic_cap_bits {
-	u8         nic_rx_multi_path_tirs[0x1];
-	u8         nic_rx_multi_path_tirs_fts[0x1];
-	u8         allow_sniffer_and_nic_rx_shared_tir[0x1];
-	u8         reserved_at_3[0x1fd];
-
-	struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive;
-
-	u8         reserved_at_400[0x200];
-
-	struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive_sniffer;
-
-	struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit;
-
-	u8         reserved_at_a00[0x200];
-
-	struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit_sniffer;
-
-	u8         reserved_at_e00[0x7200];
-};
-
-struct mlx5_ifc_flow_table_eswitch_cap_bits {
-	u8     reserved_at_0[0x200];
-
-	struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_esw_fdb;
-
-	struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_esw_acl_ingress;
-
-	struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_esw_acl_egress;
-
-	u8      reserved_at_800[0x7800];
-};
-
-struct mlx5_ifc_e_switch_cap_bits {
-	u8         vport_svlan_strip[0x1];
-	u8         vport_cvlan_strip[0x1];
-	u8         vport_svlan_insert[0x1];
-	u8         vport_cvlan_insert_if_not_exist[0x1];
-	u8         vport_cvlan_insert_overwrite[0x1];
-	u8         reserved_at_5[0x19];
-	u8         nic_vport_node_guid_modify[0x1];
-	u8         nic_vport_port_guid_modify[0x1];
-
-	u8         vxlan_encap_decap[0x1];
-	u8         nvgre_encap_decap[0x1];
-	u8         reserved_at_22[0x9];
-	u8         log_max_encap_headers[0x5];
-	u8         reserved_2b[0x6];
-	u8         max_encap_header_size[0xa];
-
-	u8         reserved_40[0x7c0];
-
-};
-
-struct mlx5_ifc_qos_cap_bits {
-	u8         packet_pacing[0x1];
-	u8         esw_scheduling[0x1];
-	u8         reserved_at_2[0x1e];
-
-	u8         reserved_at_20[0x20];
-
-	u8         packet_pacing_max_rate[0x20];
-
-	u8         packet_pacing_min_rate[0x20];
-
-	u8         reserved_at_80[0x10];
-	u8         packet_pacing_rate_table_size[0x10];
-
-	u8         esw_element_type[0x10];
-	u8         esw_tsar_type[0x10];
-
-	u8         reserved_at_c0[0x10];
-	u8         max_qos_para_vport[0x10];
-
-	u8         max_tsar_bw_share[0x20];
-
-	u8         reserved_at_100[0x700];
-};
-
-struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
-	u8         csum_cap[0x1];
-	u8         vlan_cap[0x1];
-	u8         lro_cap[0x1];
-	u8         lro_psh_flag[0x1];
-	u8         lro_time_stamp[0x1];
-	u8         reserved_at_5[0x3];
-	u8         self_lb_en_modifiable[0x1];
-	u8         reserved_at_9[0x2];
-	u8         max_lso_cap[0x5];
-	u8         multi_pkt_send_wqe[0x2];
-	u8	   wqe_inline_mode[0x2];
-	u8         rss_ind_tbl_cap[0x4];
-	u8         reg_umr_sq[0x1];
-	u8         scatter_fcs[0x1];
-	u8         reserved_at_1a[0x1];
-	u8         tunnel_lso_const_out_ip_id[0x1];
-	u8         reserved_at_1c[0x2];
-	u8         tunnel_statless_gre[0x1];
-	u8         tunnel_stateless_vxlan[0x1];
-
-	u8         reserved_at_20[0x20];
-
-	u8         reserved_at_40[0x10];
-	u8         lro_min_mss_size[0x10];
-
-	u8         reserved_at_60[0x120];
-
-	u8         lro_timer_supported_periods[4][0x20];
-
-	u8         reserved_at_200[0x600];
-};
-
-struct mlx5_ifc_roce_cap_bits {
-	u8         roce_apm[0x1];
-	u8         reserved_at_1[0x1f];
-
-	u8         reserved_at_20[0x60];
-
-	u8         reserved_at_80[0xc];
-	u8         l3_type[0x4];
-	u8         reserved_at_90[0x8];
-	u8         roce_version[0x8];
-
-	u8         reserved_at_a0[0x10];
-	u8         r_roce_dest_udp_port[0x10];
-
-	u8         r_roce_max_src_udp_port[0x10];
-	u8         r_roce_min_src_udp_port[0x10];
-
-	u8         reserved_at_e0[0x10];
-	u8         roce_address_table_size[0x10];
-
-	u8         reserved_at_100[0x700];
-};
-
-enum {
-	MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_1_BYTE     = 0x0,
-	MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_2_BYTES    = 0x2,
-	MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_4_BYTES    = 0x4,
-	MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_8_BYTES    = 0x8,
-	MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_16_BYTES   = 0x10,
-	MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_32_BYTES   = 0x20,
-	MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_64_BYTES   = 0x40,
-	MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_128_BYTES  = 0x80,
-	MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_256_BYTES  = 0x100,
-};
-
-enum {
-	MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_1_BYTE     = 0x1,
-	MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_2_BYTES    = 0x2,
-	MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_4_BYTES    = 0x4,
-	MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_8_BYTES    = 0x8,
-	MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_16_BYTES   = 0x10,
-	MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_32_BYTES   = 0x20,
-	MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_64_BYTES   = 0x40,
-	MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_128_BYTES  = 0x80,
-	MLX5_ATOMIC_CAPS_ATOMIC_SIZE_DC_256_BYTES  = 0x100,
-};
-
-struct mlx5_ifc_atomic_caps_bits {
-	u8         reserved_at_0[0x40];
-
-	u8         atomic_req_8B_endianess_mode[0x2];
-	u8         reserved_at_42[0x4];
-	u8         supported_atomic_req_8B_endianess_mode_1[0x1];
-
-	u8         reserved_at_47[0x19];
-
-	u8         reserved_at_60[0x20];
-
-	u8         reserved_at_80[0x10];
-	u8         atomic_operations[0x10];
-
-	u8         reserved_at_a0[0x10];
-	u8         atomic_size_qp[0x10];
-
-	u8         reserved_at_c0[0x10];
-	u8         atomic_size_dc[0x10];
-
-	u8         reserved_at_e0[0x720];
-};
-
-struct mlx5_ifc_odp_cap_bits {
-	u8         reserved_at_0[0x40];
-
-	u8         sig[0x1];
-	u8         reserved_at_41[0x1f];
-
-	u8         reserved_at_60[0x20];
-
-	struct mlx5_ifc_odp_per_transport_service_cap_bits rc_odp_caps;
-
-	struct mlx5_ifc_odp_per_transport_service_cap_bits uc_odp_caps;
-
-	struct mlx5_ifc_odp_per_transport_service_cap_bits ud_odp_caps;
-
-	u8         reserved_at_e0[0x720];
-};
-
-struct mlx5_ifc_calc_op {
-	u8        reserved_at_0[0x10];
-	u8        reserved_at_10[0x9];
-	u8        op_swap_endianness[0x1];
-	u8        op_min[0x1];
-	u8        op_xor[0x1];
-	u8        op_or[0x1];
-	u8        op_and[0x1];
-	u8        op_max[0x1];
-	u8        op_add[0x1];
-};
-
-struct mlx5_ifc_vector_calc_cap_bits {
-	u8         calc_matrix[0x1];
-	u8         reserved_at_1[0x1f];
-	u8         reserved_at_20[0x8];
-	u8         max_vec_count[0x8];
-	u8         reserved_at_30[0xd];
-	u8         max_chunk_size[0x3];
-	struct mlx5_ifc_calc_op calc0;
-	struct mlx5_ifc_calc_op calc1;
-	struct mlx5_ifc_calc_op calc2;
-	struct mlx5_ifc_calc_op calc3;
-
-	u8         reserved_at_e0[0x720];
-};
-
-enum {
-	MLX5_WQ_TYPE_LINKED_LIST  = 0x0,
-	MLX5_WQ_TYPE_CYCLIC       = 0x1,
-	MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ = 0x2,
-};
-
-enum {
-	MLX5_WQ_END_PAD_MODE_NONE   = 0x0,
-	MLX5_WQ_END_PAD_MODE_ALIGN  = 0x1,
-};
-
-enum {
-	MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_8_GID_ENTRIES    = 0x0,
-	MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_16_GID_ENTRIES   = 0x1,
-	MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_32_GID_ENTRIES   = 0x2,
-	MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_64_GID_ENTRIES   = 0x3,
-	MLX5_CMD_HCA_CAP_GID_TABLE_SIZE_128_GID_ENTRIES  = 0x4,
-};
-
-enum {
-	MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_128_ENTRIES  = 0x0,
-	MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_256_ENTRIES  = 0x1,
-	MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_512_ENTRIES  = 0x2,
-	MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_1K_ENTRIES   = 0x3,
-	MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_2K_ENTRIES   = 0x4,
-	MLX5_CMD_HCA_CAP_PKEY_TABLE_SIZE_4K_ENTRIES   = 0x5,
-};
-
-enum {
-	MLX5_CMD_HCA_CAP_PORT_TYPE_IB        = 0x0,
-	MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET  = 0x1,
-};
-
-enum {
-	MLX5_CMD_HCA_CAP_CMDIF_CHECKSUM_DISABLED       = 0x0,
-	MLX5_CMD_HCA_CAP_CMDIF_CHECKSUM_INITIAL_STATE  = 0x1,
-	MLX5_CMD_HCA_CAP_CMDIF_CHECKSUM_ENABLED        = 0x3,
-};
-
-enum {
-	MLX5_CAP_PORT_TYPE_IB  = 0x0,
-	MLX5_CAP_PORT_TYPE_ETH = 0x1,
-};
-
-struct mlx5_ifc_cmd_hca_cap_bits {
-	u8         reserved_at_0[0x80];
-
-	u8         log_max_srq_sz[0x8];
-	u8         log_max_qp_sz[0x8];
-	u8         reserved_at_90[0xb];
-	u8         log_max_qp[0x5];
-
-	u8         reserved_at_a0[0xb];
-	u8         log_max_srq[0x5];
-	u8         reserved_at_b0[0x10];
-
-	u8         reserved_at_c0[0x8];
-	u8         log_max_cq_sz[0x8];
-	u8         reserved_at_d0[0xb];
-	u8         log_max_cq[0x5];
-
-	u8         log_max_eq_sz[0x8];
-	u8         reserved_at_e8[0x2];
-	u8         log_max_mkey[0x6];
-	u8         reserved_at_f0[0xc];
-	u8         log_max_eq[0x4];
-
-	u8         max_indirection[0x8];
-	u8         fixed_buffer_size[0x1];
-	u8         log_max_mrw_sz[0x7];
-	u8         reserved_at_110[0x2];
-	u8         log_max_bsf_list_size[0x6];
-	u8         umr_extended_translation_offset[0x1];
-	u8         null_mkey[0x1];
-	u8         log_max_klm_list_size[0x6];
-
-	u8         reserved_at_120[0xa];
-	u8         log_max_ra_req_dc[0x6];
-	u8         reserved_at_130[0xa];
-	u8         log_max_ra_res_dc[0x6];
-
-	u8         reserved_at_140[0xa];
-	u8         log_max_ra_req_qp[0x6];
-	u8         reserved_at_150[0xa];
-	u8         log_max_ra_res_qp[0x6];
-
-	u8         pad_cap[0x1];
-	u8         cc_query_allowed[0x1];
-	u8         cc_modify_allowed[0x1];
-	u8         reserved_at_163[0xd];
-	u8         gid_table_size[0x10];
-
-	u8         out_of_seq_cnt[0x1];
-	u8         vport_counters[0x1];
-	u8         retransmission_q_counters[0x1];
-	u8         reserved_at_183[0x1];
-	u8         modify_rq_counter_set_id[0x1];
-	u8         reserved_at_185[0x1];
-	u8         max_qp_cnt[0xa];
-	u8         pkey_table_size[0x10];
-
-	u8         vport_group_manager[0x1];
-	u8         vhca_group_manager[0x1];
-	u8         ib_virt[0x1];
-	u8         eth_virt[0x1];
-	u8         reserved_at_1a4[0x1];
-	u8         ets[0x1];
-	u8         nic_flow_table[0x1];
-	u8         eswitch_flow_table[0x1];
-	u8	   early_vf_enable[0x1];
-	u8         reserved_at_1a9[0x2];
-	u8         local_ca_ack_delay[0x5];
-	u8         port_module_event[0x1];
-	u8         reserved_at_1b1[0x1];
-	u8         ports_check[0x1];
-	u8         reserved_at_1b3[0x1];
-	u8         disable_link_up[0x1];
-	u8         beacon_led[0x1];
-	u8         port_type[0x2];
-	u8         num_ports[0x8];
-
-	u8         reserved_at_1c0[0x3];
-	u8         log_max_msg[0x5];
-	u8         reserved_at_1c8[0x4];
-	u8         max_tc[0x4];
-	u8         reserved_at_1d0[0x1];
-	u8         dcbx[0x1];
-	u8         reserved_at_1d2[0x4];
-	u8         rol_s[0x1];
-	u8         rol_g[0x1];
-	u8         reserved_at_1d8[0x1];
-	u8         wol_s[0x1];
-	u8         wol_g[0x1];
-	u8         wol_a[0x1];
-	u8         wol_b[0x1];
-	u8         wol_m[0x1];
-	u8         wol_u[0x1];
-	u8         wol_p[0x1];
-
-	u8         stat_rate_support[0x10];
-	u8         reserved_at_1f0[0xc];
-	u8         cqe_version[0x4];
-
-	u8         compact_address_vector[0x1];
-	u8         striding_rq[0x1];
-	u8         reserved_at_202[0x2];
-	u8         ipoib_basic_offloads[0x1];
-	u8         reserved_at_205[0xa];
-	u8         drain_sigerr[0x1];
-	u8         cmdif_checksum[0x2];
-	u8         sigerr_cqe[0x1];
-	u8         reserved_at_213[0x1];
-	u8         wq_signature[0x1];
-	u8         sctr_data_cqe[0x1];
-	u8         reserved_at_216[0x1];
-	u8         sho[0x1];
-	u8         tph[0x1];
-	u8         rf[0x1];
-	u8         dct[0x1];
-	u8         qos[0x1];
-	u8         eth_net_offloads[0x1];
-	u8         roce[0x1];
-	u8         atomic[0x1];
-	u8         reserved_at_21f[0x1];
-
-	u8         cq_oi[0x1];
-	u8         cq_resize[0x1];
-	u8         cq_moderation[0x1];
-	u8         reserved_at_223[0x3];
-	u8         cq_eq_remap[0x1];
-	u8         pg[0x1];
-	u8         block_lb_mc[0x1];
-	u8         reserved_at_229[0x1];
-	u8         scqe_break_moderation[0x1];
-	u8         cq_period_start_from_cqe[0x1];
-	u8         cd[0x1];
-	u8         reserved_at_22d[0x1];
-	u8         apm[0x1];
-	u8         vector_calc[0x1];
-	u8         umr_ptr_rlky[0x1];
-	u8	   imaicl[0x1];
-	u8         reserved_at_232[0x4];
-	u8         qkv[0x1];
-	u8         pkv[0x1];
-	u8         set_deth_sqpn[0x1];
-	u8         reserved_at_239[0x3];
-	u8         xrc[0x1];
-	u8         ud[0x1];
-	u8         uc[0x1];
-	u8         rc[0x1];
-
-	u8         uar_4k[0x1];
-	u8         reserved_at_241[0x9];
-	u8         uar_sz[0x6];
-	u8         reserved_at_250[0x8];
-	u8         log_pg_sz[0x8];
-
-	u8         bf[0x1];
-	u8         driver_version[0x1];
-	u8         pad_tx_eth_packet[0x1];
-	u8         reserved_at_263[0x8];
-	u8         log_bf_reg_size[0x5];
-
-	u8         reserved_at_270[0xb];
-	u8         lag_master[0x1];
-	u8         num_lag_ports[0x4];
-
-	u8         reserved_at_280[0x10];
-	u8         max_wqe_sz_sq[0x10];
-
-	u8         reserved_at_2a0[0x10];
-	u8         max_wqe_sz_rq[0x10];
-
-	u8         reserved_at_2c0[0x10];
-	u8         max_wqe_sz_sq_dc[0x10];
-
-	u8         reserved_at_2e0[0x7];
-	u8         max_qp_mcg[0x19];
-
-	u8         reserved_at_300[0x18];
-	u8         log_max_mcg[0x8];
-
-	u8         reserved_at_320[0x3];
-	u8         log_max_transport_domain[0x5];
-	u8         reserved_at_328[0x3];
-	u8         log_max_pd[0x5];
-	u8         reserved_at_330[0xb];
-	u8         log_max_xrcd[0x5];
-
-	u8         reserved_at_340[0x8];
-	u8         log_max_flow_counter_bulk[0x8];
-	u8         max_flow_counter[0x10];
-
-
-	u8         reserved_at_360[0x3];
-	u8         log_max_rq[0x5];
-	u8         reserved_at_368[0x3];
-	u8         log_max_sq[0x5];
-	u8         reserved_at_370[0x3];
-	u8         log_max_tir[0x5];
-	u8         reserved_at_378[0x3];
-	u8         log_max_tis[0x5];
-
-	u8         basic_cyclic_rcv_wqe[0x1];
-	u8         reserved_at_381[0x2];
-	u8         log_max_rmp[0x5];
-	u8         reserved_at_388[0x3];
-	u8         log_max_rqt[0x5];
-	u8         reserved_at_390[0x3];
-	u8         log_max_rqt_size[0x5];
-	u8         reserved_at_398[0x3];
-	u8         log_max_tis_per_sq[0x5];
-
-	u8         reserved_at_3a0[0x3];
-	u8         log_max_stride_sz_rq[0x5];
-	u8         reserved_at_3a8[0x3];
-	u8         log_min_stride_sz_rq[0x5];
-	u8         reserved_at_3b0[0x3];
-	u8         log_max_stride_sz_sq[0x5];
-	u8         reserved_at_3b8[0x3];
-	u8         log_min_stride_sz_sq[0x5];
-
-	u8         reserved_at_3c0[0x1b];
-	u8         log_max_wq_sz[0x5];
-
-	u8         nic_vport_change_event[0x1];
-	u8         reserved_at_3e1[0xa];
-	u8         log_max_vlan_list[0x5];
-	u8         reserved_at_3f0[0x3];
-	u8         log_max_current_mc_list[0x5];
-	u8         reserved_at_3f8[0x3];
-	u8         log_max_current_uc_list[0x5];
-
-	u8         reserved_at_400[0x80];
-
-	u8         reserved_at_480[0x3];
-	u8         log_max_l2_table[0x5];
-	u8         reserved_at_488[0x8];
-	u8         log_uar_page_sz[0x10];
-
-	u8         reserved_at_4a0[0x20];
-	u8         device_frequency_mhz[0x20];
-	u8         device_frequency_khz[0x20];
-
-	u8         reserved_at_500[0x20];
-	u8	   num_of_uars_per_page[0x20];
-	u8         reserved_at_540[0x40];
-
-	u8         reserved_at_580[0x3f];
-	u8         cqe_compression[0x1];
-
-	u8         cqe_compression_timeout[0x10];
-	u8         cqe_compression_max_num[0x10];
-
-	u8         reserved_at_5e0[0x10];
-	u8         tag_matching[0x1];
-	u8         rndv_offload_rc[0x1];
-	u8         rndv_offload_dc[0x1];
-	u8         log_tag_matching_list_sz[0x5];
-	u8         reserved_at_5f8[0x3];
-	u8         log_max_xrq[0x5];
-
-	u8         reserved_at_600[0x200];
-};
-
-enum mlx5_flow_destination_type {
-	MLX5_FLOW_DESTINATION_TYPE_VPORT        = 0x0,
-	MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE   = 0x1,
-	MLX5_FLOW_DESTINATION_TYPE_TIR          = 0x2,
-
-	MLX5_FLOW_DESTINATION_TYPE_COUNTER      = 0x100,
-};
-
-struct mlx5_ifc_dest_format_struct_bits {
-	u8         destination_type[0x8];
-	u8         destination_id[0x18];
-
-	u8         reserved_at_20[0x20];
-};
-
-struct mlx5_ifc_flow_counter_list_bits {
-	u8         clear[0x1];
-	u8         num_of_counters[0xf];
-	u8         flow_counter_id[0x10];
-
-	u8         reserved_at_20[0x20];
-};
-
-union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits {
-	struct mlx5_ifc_dest_format_struct_bits dest_format_struct;
-	struct mlx5_ifc_flow_counter_list_bits flow_counter_list;
-	u8         reserved_at_0[0x40];
-};
-
-struct mlx5_ifc_fte_match_param_bits {
-	struct mlx5_ifc_fte_match_set_lyr_2_4_bits outer_headers;
-
-	struct mlx5_ifc_fte_match_set_misc_bits misc_parameters;
-
-	struct mlx5_ifc_fte_match_set_lyr_2_4_bits inner_headers;
-
-	u8         reserved_at_600[0xa00];
-};
-
-enum {
-	MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP     = 0x0,
-	MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP     = 0x1,
-	MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT   = 0x2,
-	MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT   = 0x3,
-	MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_IPSEC_SPI  = 0x4,
-};
-
-struct mlx5_ifc_rx_hash_field_select_bits {
-	u8         l3_prot_type[0x1];
-	u8         l4_prot_type[0x1];
-	u8         selected_fields[0x1e];
-};
-
-enum {
-	MLX5_WQ_WQ_TYPE_WQ_LINKED_LIST  = 0x0,
-	MLX5_WQ_WQ_TYPE_WQ_CYCLIC       = 0x1,
-};
-
-enum {
-	MLX5_WQ_END_PADDING_MODE_END_PAD_NONE   = 0x0,
-	MLX5_WQ_END_PADDING_MODE_END_PAD_ALIGN  = 0x1,
-};
-
-struct mlx5_ifc_wq_bits {
-	u8         wq_type[0x4];
-	u8         wq_signature[0x1];
-	u8         end_padding_mode[0x2];
-	u8         cd_slave[0x1];
-	u8         reserved_at_8[0x18];
-
-	u8         hds_skip_first_sge[0x1];
-	u8         log2_hds_buf_size[0x3];
-	u8         reserved_at_24[0x7];
-	u8         page_offset[0x5];
-	u8         lwm[0x10];
-
-	u8         reserved_at_40[0x8];
-	u8         pd[0x18];
-
-	u8         reserved_at_60[0x8];
-	u8         uar_page[0x18];
-
-	u8         dbr_addr[0x40];
-
-	u8         hw_counter[0x20];
-
-	u8         sw_counter[0x20];
-
-	u8         reserved_at_100[0xc];
-	u8         log_wq_stride[0x4];
-	u8         reserved_at_110[0x3];
-	u8         log_wq_pg_sz[0x5];
-	u8         reserved_at_118[0x3];
-	u8         log_wq_sz[0x5];
-
-	u8         reserved_at_120[0x15];
-	u8         log_wqe_num_of_strides[0x3];
-	u8         two_byte_shift_en[0x1];
-	u8         reserved_at_139[0x4];
-	u8         log_wqe_stride_size[0x3];
-
-	u8         reserved_at_140[0x4c0];
-
-	struct mlx5_ifc_cmd_pas_bits pas[0];
-};
-
-struct mlx5_ifc_rq_num_bits {
-	u8         reserved_at_0[0x8];
-	u8         rq_num[0x18];
-};
-
-struct mlx5_ifc_mac_address_layout_bits {
-	u8         reserved_at_0[0x10];
-	u8         mac_addr_47_32[0x10];
-
-	u8         mac_addr_31_0[0x20];
-};
-
-struct mlx5_ifc_vlan_layout_bits {
-	u8         reserved_at_0[0x14];
-	u8         vlan[0x0c];
-
-	u8         reserved_at_20[0x20];
-};
-
-struct mlx5_ifc_cong_control_r_roce_ecn_np_bits {
-	u8         reserved_at_0[0xa0];
-
-	u8         min_time_between_cnps[0x20];
-
-	u8         reserved_at_c0[0x12];
-	u8         cnp_dscp[0x6];
-	u8         reserved_at_d8[0x5];
-	u8         cnp_802p_prio[0x3];
-
-	u8         reserved_at_e0[0x720];
-};
-
-struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits {
-	u8         reserved_at_0[0x60];
-
-	u8         reserved_at_60[0x4];
-	u8         clamp_tgt_rate[0x1];
-	u8         reserved_at_65[0x3];
-	u8         clamp_tgt_rate_after_time_inc[0x1];
-	u8         reserved_at_69[0x17];
-
-	u8         reserved_at_80[0x20];
-
-	u8         rpg_time_reset[0x20];
-
-	u8         rpg_byte_reset[0x20];
-
-	u8         rpg_threshold[0x20];
-
-	u8         rpg_max_rate[0x20];
-
-	u8         rpg_ai_rate[0x20];
-
-	u8         rpg_hai_rate[0x20];
-
-	u8         rpg_gd[0x20];
-
-	u8         rpg_min_dec_fac[0x20];
-
-	u8         rpg_min_rate[0x20];
-
-	u8         reserved_at_1c0[0xe0];
-
-	u8         rate_to_set_on_first_cnp[0x20];
-
-	u8         dce_tcp_g[0x20];
-
-	u8         dce_tcp_rtt[0x20];
-
-	u8         rate_reduce_monitor_period[0x20];
-
-	u8         reserved_at_320[0x20];
-
-	u8         initial_alpha_value[0x20];
-
-	u8         reserved_at_360[0x4a0];
-};
-
-struct mlx5_ifc_cong_control_802_1qau_rp_bits {
-	u8         reserved_at_0[0x80];
-
-	u8         rppp_max_rps[0x20];
-
-	u8         rpg_time_reset[0x20];
-
-	u8         rpg_byte_reset[0x20];
-
-	u8         rpg_threshold[0x20];
-
-	u8         rpg_max_rate[0x20];
-
-	u8         rpg_ai_rate[0x20];
-
-	u8         rpg_hai_rate[0x20];
-
-	u8         rpg_gd[0x20];
-
-	u8         rpg_min_dec_fac[0x20];
-
-	u8         rpg_min_rate[0x20];
-
-	u8         reserved_at_1c0[0x640];
-};
-
-enum {
-	MLX5_RESIZE_FIELD_SELECT_RESIZE_FIELD_SELECT_LOG_CQ_SIZE    = 0x1,
-	MLX5_RESIZE_FIELD_SELECT_RESIZE_FIELD_SELECT_PAGE_OFFSET    = 0x2,
-	MLX5_RESIZE_FIELD_SELECT_RESIZE_FIELD_SELECT_LOG_PAGE_SIZE  = 0x4,
-};
-
-struct mlx5_ifc_resize_field_select_bits {
-	u8         resize_field_select[0x20];
-};
-
-enum {
-	MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_CQ_PERIOD     = 0x1,
-	MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_CQ_MAX_COUNT  = 0x2,
-	MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_OI            = 0x4,
-	MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_C_EQN         = 0x8,
-};
-
-struct mlx5_ifc_modify_field_select_bits {
-	u8         modify_field_select[0x20];
-};
-
-struct mlx5_ifc_field_select_r_roce_np_bits {
-	u8         field_select_r_roce_np[0x20];
-};
-
-struct mlx5_ifc_field_select_r_roce_rp_bits {
-	u8         field_select_r_roce_rp[0x20];
-};
-
-enum {
-	MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPPP_MAX_RPS     = 0x4,
-	MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_TIME_RESET   = 0x8,
-	MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_BYTE_RESET   = 0x10,
-	MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_THRESHOLD    = 0x20,
-	MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_MAX_RATE     = 0x40,
-	MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_AI_RATE      = 0x80,
-	MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_HAI_RATE     = 0x100,
-	MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_GD           = 0x200,
-	MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_MIN_DEC_FAC  = 0x400,
-	MLX5_FIELD_SELECT_802_1QAU_RP_FIELD_SELECT_8021QAURP_RPG_MIN_RATE     = 0x800,
-};
-
-struct mlx5_ifc_field_select_802_1qau_rp_bits {
-	u8         field_select_8021qaurp[0x20];
-};
-
-struct mlx5_ifc_phys_layer_cntrs_bits {
-	u8         time_since_last_clear_high[0x20];
-
-	u8         time_since_last_clear_low[0x20];
-
-	u8         symbol_errors_high[0x20];
-
-	u8         symbol_errors_low[0x20];
-
-	u8         sync_headers_errors_high[0x20];
-
-	u8         sync_headers_errors_low[0x20];
-
-	u8         edpl_bip_errors_lane0_high[0x20];
-
-	u8         edpl_bip_errors_lane0_low[0x20];
-
-	u8         edpl_bip_errors_lane1_high[0x20];
-
-	u8         edpl_bip_errors_lane1_low[0x20];
-
-	u8         edpl_bip_errors_lane2_high[0x20];
-
-	u8         edpl_bip_errors_lane2_low[0x20];
-
-	u8         edpl_bip_errors_lane3_high[0x20];
-
-	u8         edpl_bip_errors_lane3_low[0x20];
-
-	u8         fc_fec_corrected_blocks_lane0_high[0x20];
-
-	u8         fc_fec_corrected_blocks_lane0_low[0x20];
-
-	u8         fc_fec_corrected_blocks_lane1_high[0x20];
-
-	u8         fc_fec_corrected_blocks_lane1_low[0x20];
-
-	u8         fc_fec_corrected_blocks_lane2_high[0x20];
-
-	u8         fc_fec_corrected_blocks_lane2_low[0x20];
-
-	u8         fc_fec_corrected_blocks_lane3_high[0x20];
-
-	u8         fc_fec_corrected_blocks_lane3_low[0x20];
-
-	u8         fc_fec_uncorrectable_blocks_lane0_high[0x20];
-
-	u8         fc_fec_uncorrectable_blocks_lane0_low[0x20];
-
-	u8         fc_fec_uncorrectable_blocks_lane1_high[0x20];
-
-	u8         fc_fec_uncorrectable_blocks_lane1_low[0x20];
-
-	u8         fc_fec_uncorrectable_blocks_lane2_high[0x20];
-
-	u8         fc_fec_uncorrectable_blocks_lane2_low[0x20];
-
-	u8         fc_fec_uncorrectable_blocks_lane3_high[0x20];
-
-	u8         fc_fec_uncorrectable_blocks_lane3_low[0x20];
-
-	u8         rs_fec_corrected_blocks_high[0x20];
-
-	u8         rs_fec_corrected_blocks_low[0x20];
-
-	u8         rs_fec_uncorrectable_blocks_high[0x20];
-
-	u8         rs_fec_uncorrectable_blocks_low[0x20];
-
-	u8         rs_fec_no_errors_blocks_high[0x20];
-
-	u8         rs_fec_no_errors_blocks_low[0x20];
-
-	u8         rs_fec_single_error_blocks_high[0x20];
-
-	u8         rs_fec_single_error_blocks_low[0x20];
-
-	u8         rs_fec_corrected_symbols_total_high[0x20];
-
-	u8         rs_fec_corrected_symbols_total_low[0x20];
-
-	u8         rs_fec_corrected_symbols_lane0_high[0x20];
-
-	u8         rs_fec_corrected_symbols_lane0_low[0x20];
-
-	u8         rs_fec_corrected_symbols_lane1_high[0x20];
-
-	u8         rs_fec_corrected_symbols_lane1_low[0x20];
-
-	u8         rs_fec_corrected_symbols_lane2_high[0x20];
-
-	u8         rs_fec_corrected_symbols_lane2_low[0x20];
-
-	u8         rs_fec_corrected_symbols_lane3_high[0x20];
-
-	u8         rs_fec_corrected_symbols_lane3_low[0x20];
-
-	u8         link_down_events[0x20];
-
-	u8         successful_recovery_events[0x20];
-
-	u8         reserved_at_640[0x180];
-};
-
-struct mlx5_ifc_ib_port_cntrs_grp_data_layout_bits {
-	u8	   symbol_error_counter[0x10];
-
-	u8         link_error_recovery_counter[0x8];
-
-	u8         link_downed_counter[0x8];
-
-	u8         port_rcv_errors[0x10];
-
-	u8         port_rcv_remote_physical_errors[0x10];
-
-	u8         port_rcv_switch_relay_errors[0x10];
-
-	u8         port_xmit_discards[0x10];
-
-	u8         port_xmit_constraint_errors[0x8];
-
-	u8         port_rcv_constraint_errors[0x8];
-
-	u8         reserved_at_70[0x8];
-
-	u8         link_overrun_errors[0x8];
-
-	u8	   reserved_at_80[0x10];
-
-	u8         vl_15_dropped[0x10];
-
-	u8	   reserved_at_a0[0xa0];
-};
-
-struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits {
-	u8         transmit_queue_high[0x20];
-
-	u8         transmit_queue_low[0x20];
-
-	u8         reserved_at_40[0x780];
-};
-
-struct mlx5_ifc_eth_per_prio_grp_data_layout_bits {
-	u8         rx_octets_high[0x20];
-
-	u8         rx_octets_low[0x20];
-
-	u8         reserved_at_40[0xc0];
-
-	u8         rx_frames_high[0x20];
-
-	u8         rx_frames_low[0x20];
-
-	u8         tx_octets_high[0x20];
-
-	u8         tx_octets_low[0x20];
-
-	u8         reserved_at_180[0xc0];
-
-	u8         tx_frames_high[0x20];
-
-	u8         tx_frames_low[0x20];
-
-	u8         rx_pause_high[0x20];
-
-	u8         rx_pause_low[0x20];
-
-	u8         rx_pause_duration_high[0x20];
-
-	u8         rx_pause_duration_low[0x20];
-
-	u8         tx_pause_high[0x20];
-
-	u8         tx_pause_low[0x20];
-
-	u8         tx_pause_duration_high[0x20];
-
-	u8         tx_pause_duration_low[0x20];
-
-	u8         rx_pause_transition_high[0x20];
-
-	u8         rx_pause_transition_low[0x20];
-
-	u8         reserved_at_3c0[0x400];
-};
-
-struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits {
-	u8         port_transmit_wait_high[0x20];
-
-	u8         port_transmit_wait_low[0x20];
-
-	u8         reserved_at_40[0x780];
-};
-
-struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits {
-	u8         dot3stats_alignment_errors_high[0x20];
-
-	u8         dot3stats_alignment_errors_low[0x20];
-
-	u8         dot3stats_fcs_errors_high[0x20];
-
-	u8         dot3stats_fcs_errors_low[0x20];
-
-	u8         dot3stats_single_collision_frames_high[0x20];
-
-	u8         dot3stats_single_collision_frames_low[0x20];
-
-	u8         dot3stats_multiple_collision_frames_high[0x20];
-
-	u8         dot3stats_multiple_collision_frames_low[0x20];
-
-	u8         dot3stats_sqe_test_errors_high[0x20];
-
-	u8         dot3stats_sqe_test_errors_low[0x20];
-
-	u8         dot3stats_deferred_transmissions_high[0x20];
-
-	u8         dot3stats_deferred_transmissions_low[0x20];
-
-	u8         dot3stats_late_collisions_high[0x20];
-
-	u8         dot3stats_late_collisions_low[0x20];
-
-	u8         dot3stats_excessive_collisions_high[0x20];
-
-	u8         dot3stats_excessive_collisions_low[0x20];
-
-	u8         dot3stats_internal_mac_transmit_errors_high[0x20];
-
-	u8         dot3stats_internal_mac_transmit_errors_low[0x20];
-
-	u8         dot3stats_carrier_sense_errors_high[0x20];
-
-	u8         dot3stats_carrier_sense_errors_low[0x20];
-
-	u8         dot3stats_frame_too_longs_high[0x20];
-
-	u8         dot3stats_frame_too_longs_low[0x20];
-
-	u8         dot3stats_internal_mac_receive_errors_high[0x20];
-
-	u8         dot3stats_internal_mac_receive_errors_low[0x20];
-
-	u8         dot3stats_symbol_errors_high[0x20];
-
-	u8         dot3stats_symbol_errors_low[0x20];
-
-	u8         dot3control_in_unknown_opcodes_high[0x20];
-
-	u8         dot3control_in_unknown_opcodes_low[0x20];
-
-	u8         dot3in_pause_frames_high[0x20];
-
-	u8         dot3in_pause_frames_low[0x20];
-
-	u8         dot3out_pause_frames_high[0x20];
-
-	u8         dot3out_pause_frames_low[0x20];
-
-	u8         reserved_at_400[0x3c0];
-};
-
-struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits {
-	u8         ether_stats_drop_events_high[0x20];
-
-	u8         ether_stats_drop_events_low[0x20];
-
-	u8         ether_stats_octets_high[0x20];
-
-	u8         ether_stats_octets_low[0x20];
-
-	u8         ether_stats_pkts_high[0x20];
-
-	u8         ether_stats_pkts_low[0x20];
-
-	u8         ether_stats_broadcast_pkts_high[0x20];
-
-	u8         ether_stats_broadcast_pkts_low[0x20];
-
-	u8         ether_stats_multicast_pkts_high[0x20];
-
-	u8         ether_stats_multicast_pkts_low[0x20];
-
-	u8         ether_stats_crc_align_errors_high[0x20];
-
-	u8         ether_stats_crc_align_errors_low[0x20];
-
-	u8         ether_stats_undersize_pkts_high[0x20];
-
-	u8         ether_stats_undersize_pkts_low[0x20];
-
-	u8         ether_stats_oversize_pkts_high[0x20];
-
-	u8         ether_stats_oversize_pkts_low[0x20];
-
-	u8         ether_stats_fragments_high[0x20];
-
-	u8         ether_stats_fragments_low[0x20];
-
-	u8         ether_stats_jabbers_high[0x20];
-
-	u8         ether_stats_jabbers_low[0x20];
-
-	u8         ether_stats_collisions_high[0x20];
-
-	u8         ether_stats_collisions_low[0x20];
-
-	u8         ether_stats_pkts64octets_high[0x20];
-
-	u8         ether_stats_pkts64octets_low[0x20];
-
-	u8         ether_stats_pkts65to127octets_high[0x20];
-
-	u8         ether_stats_pkts65to127octets_low[0x20];
-
-	u8         ether_stats_pkts128to255octets_high[0x20];
-
-	u8         ether_stats_pkts128to255octets_low[0x20];
-
-	u8         ether_stats_pkts256to511octets_high[0x20];
-
-	u8         ether_stats_pkts256to511octets_low[0x20];
-
-	u8         ether_stats_pkts512to1023octets_high[0x20];
-
-	u8         ether_stats_pkts512to1023octets_low[0x20];
-
-	u8         ether_stats_pkts1024to1518octets_high[0x20];
-
-	u8         ether_stats_pkts1024to1518octets_low[0x20];
-
-	u8         ether_stats_pkts1519to2047octets_high[0x20];
-
-	u8         ether_stats_pkts1519to2047octets_low[0x20];
-
-	u8         ether_stats_pkts2048to4095octets_high[0x20];
-
-	u8         ether_stats_pkts2048to4095octets_low[0x20];
-
-	u8         ether_stats_pkts4096to8191octets_high[0x20];
-
-	u8         ether_stats_pkts4096to8191octets_low[0x20];
-
-	u8         ether_stats_pkts8192to10239octets_high[0x20];
-
-	u8         ether_stats_pkts8192to10239octets_low[0x20];
-
-	u8         reserved_at_540[0x280];
-};
-
-struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits {
-	u8         if_in_octets_high[0x20];
-
-	u8         if_in_octets_low[0x20];
-
-	u8         if_in_ucast_pkts_high[0x20];
-
-	u8         if_in_ucast_pkts_low[0x20];
-
-	u8         if_in_discards_high[0x20];
-
-	u8         if_in_discards_low[0x20];
-
-	u8         if_in_errors_high[0x20];
-
-	u8         if_in_errors_low[0x20];
-
-	u8         if_in_unknown_protos_high[0x20];
-
-	u8         if_in_unknown_protos_low[0x20];
-
-	u8         if_out_octets_high[0x20];
-
-	u8         if_out_octets_low[0x20];
-
-	u8         if_out_ucast_pkts_high[0x20];
-
-	u8         if_out_ucast_pkts_low[0x20];
-
-	u8         if_out_discards_high[0x20];
-
-	u8         if_out_discards_low[0x20];
-
-	u8         if_out_errors_high[0x20];
-
-	u8         if_out_errors_low[0x20];
-
-	u8         if_in_multicast_pkts_high[0x20];
-
-	u8         if_in_multicast_pkts_low[0x20];
-
-	u8         if_in_broadcast_pkts_high[0x20];
-
-	u8         if_in_broadcast_pkts_low[0x20];
-
-	u8         if_out_multicast_pkts_high[0x20];
-
-	u8         if_out_multicast_pkts_low[0x20];
-
-	u8         if_out_broadcast_pkts_high[0x20];
-
-	u8         if_out_broadcast_pkts_low[0x20];
-
-	u8         reserved_at_340[0x480];
-};
-
-struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits {
-	u8         a_frames_transmitted_ok_high[0x20];
-
-	u8         a_frames_transmitted_ok_low[0x20];
-
-	u8         a_frames_received_ok_high[0x20];
-
-	u8         a_frames_received_ok_low[0x20];
-
-	u8         a_frame_check_sequence_errors_high[0x20];
-
-	u8         a_frame_check_sequence_errors_low[0x20];
-
-	u8         a_alignment_errors_high[0x20];
-
-	u8         a_alignment_errors_low[0x20];
-
-	u8         a_octets_transmitted_ok_high[0x20];
-
-	u8         a_octets_transmitted_ok_low[0x20];
-
-	u8         a_octets_received_ok_high[0x20];
-
-	u8         a_octets_received_ok_low[0x20];
-
-	u8         a_multicast_frames_xmitted_ok_high[0x20];
-
-	u8         a_multicast_frames_xmitted_ok_low[0x20];
-
-	u8         a_broadcast_frames_xmitted_ok_high[0x20];
-
-	u8         a_broadcast_frames_xmitted_ok_low[0x20];
-
-	u8         a_multicast_frames_received_ok_high[0x20];
-
-	u8         a_multicast_frames_received_ok_low[0x20];
-
-	u8         a_broadcast_frames_received_ok_high[0x20];
-
-	u8         a_broadcast_frames_received_ok_low[0x20];
-
-	u8         a_in_range_length_errors_high[0x20];
-
-	u8         a_in_range_length_errors_low[0x20];
-
-	u8         a_out_of_range_length_field_high[0x20];
-
-	u8         a_out_of_range_length_field_low[0x20];
-
-	u8         a_frame_too_long_errors_high[0x20];
-
-	u8         a_frame_too_long_errors_low[0x20];
-
-	u8         a_symbol_error_during_carrier_high[0x20];
-
-	u8         a_symbol_error_during_carrier_low[0x20];
-
-	u8         a_mac_control_frames_transmitted_high[0x20];
-
-	u8         a_mac_control_frames_transmitted_low[0x20];
-
-	u8         a_mac_control_frames_received_high[0x20];
-
-	u8         a_mac_control_frames_received_low[0x20];
-
-	u8         a_unsupported_opcodes_received_high[0x20];
-
-	u8         a_unsupported_opcodes_received_low[0x20];
-
-	u8         a_pause_mac_ctrl_frames_received_high[0x20];
-
-	u8         a_pause_mac_ctrl_frames_received_low[0x20];
-
-	u8         a_pause_mac_ctrl_frames_transmitted_high[0x20];
-
-	u8         a_pause_mac_ctrl_frames_transmitted_low[0x20];
-
-	u8         reserved_at_4c0[0x300];
-};
-
-struct mlx5_ifc_pcie_perf_cntrs_grp_data_layout_bits {
-	u8         life_time_counter_high[0x20];
-
-	u8         life_time_counter_low[0x20];
-
-	u8         rx_errors[0x20];
-
-	u8         tx_errors[0x20];
-
-	u8         l0_to_recovery_eieos[0x20];
-
-	u8         l0_to_recovery_ts[0x20];
-
-	u8         l0_to_recovery_framing[0x20];
-
-	u8         l0_to_recovery_retrain[0x20];
-
-	u8         crc_error_dllp[0x20];
-
-	u8         crc_error_tlp[0x20];
-
-	u8         reserved_at_140[0x680];
-};
-
-struct mlx5_ifc_pcie_tas_cntrs_grp_data_layout_bits {
-	u8         life_time_counter_high[0x20];
-
-	u8         life_time_counter_low[0x20];
-
-	u8         time_to_boot_image_start[0x20];
-
-	u8         time_to_link_image[0x20];
-
-	u8         calibration_time[0x20];
-
-	u8         time_to_first_perst[0x20];
-
-	u8         time_to_detect_state[0x20];
-
-	u8         time_to_l0[0x20];
-
-	u8         time_to_crs_en[0x20];
-
-	u8         time_to_plastic_image_start[0x20];
-
-	u8         time_to_iron_image_start[0x20];
-
-	u8         perst_handler[0x20];
-
-	u8         times_in_l1[0x20];
-
-	u8         times_in_l23[0x20];
-
-	u8         dl_down[0x20];
-
-	u8         config_cycle1usec[0x20];
-
-	u8         config_cycle2to7usec[0x20];
-
-	u8         config_cycle_8to15usec[0x20];
-
-	u8         config_cycle_16_to_63usec[0x20];
-
-	u8         config_cycle_64usec[0x20];
-
-	u8         correctable_err_msg_sent[0x20];
-
-	u8         non_fatal_err_msg_sent[0x20];
-
-	u8         fatal_err_msg_sent[0x20];
-
-	u8         reserved_at_2e0[0x4e0];
-};
-
-struct mlx5_ifc_cmd_inter_comp_event_bits {
-	u8         command_completion_vector[0x20];
-
-	u8         reserved_at_20[0xc0];
-};
-
-struct mlx5_ifc_stall_vl_event_bits {
-	u8         reserved_at_0[0x18];
-	u8         port_num[0x1];
-	u8         reserved_at_19[0x3];
-	u8         vl[0x4];
-
-	u8         reserved_at_20[0xa0];
-};
-
-struct mlx5_ifc_db_bf_congestion_event_bits {
-	u8         event_subtype[0x8];
-	u8         reserved_at_8[0x8];
-	u8         congestion_level[0x8];
-	u8         reserved_at_18[0x8];
-
-	u8         reserved_at_20[0xa0];
-};
-
-struct mlx5_ifc_gpio_event_bits {
-	u8         reserved_at_0[0x60];
-
-	u8         gpio_event_hi[0x20];
-
-	u8         gpio_event_lo[0x20];
-
-	u8         reserved_at_a0[0x40];
-};
-
-struct mlx5_ifc_port_state_change_event_bits {
-	u8         reserved_at_0[0x40];
-
-	u8         port_num[0x4];
-	u8         reserved_at_44[0x1c];
-
-	u8         reserved_at_60[0x80];
-};
-
-struct mlx5_ifc_dropped_packet_logged_bits {
-	u8         reserved_at_0[0xe0];
-};
-
-enum {
-	MLX5_CQ_ERROR_SYNDROME_CQ_OVERRUN                 = 0x1,
-	MLX5_CQ_ERROR_SYNDROME_CQ_ACCESS_VIOLATION_ERROR  = 0x2,
-};
-
-struct mlx5_ifc_cq_error_bits {
-	u8         reserved_at_0[0x8];
-	u8         cqn[0x18];
-
-	u8         reserved_at_20[0x20];
-
-	u8         reserved_at_40[0x18];
-	u8         syndrome[0x8];
-
-	u8         reserved_at_60[0x80];
-};
-
-struct mlx5_ifc_rdma_page_fault_event_bits {
-	u8         bytes_committed[0x20];
-
-	u8         r_key[0x20];
-
-	u8         reserved_at_40[0x10];
-	u8         packet_len[0x10];
-
-	u8         rdma_op_len[0x20];
-
-	u8         rdma_va[0x40];
-
-	u8         reserved_at_c0[0x5];
-	u8         rdma[0x1];
-	u8         write[0x1];
-	u8         requestor[0x1];
-	u8         qp_number[0x18];
-};
-
-struct mlx5_ifc_wqe_associated_page_fault_event_bits {
-	u8         bytes_committed[0x20];
-
-	u8         reserved_at_20[0x10];
-	u8         wqe_index[0x10];
-
-	u8         reserved_at_40[0x10];
-	u8         len[0x10];
-
-	u8         reserved_at_60[0x60];
-
-	u8         reserved_at_c0[0x5];
-	u8         rdma[0x1];
-	u8         write_read[0x1];
-	u8         requestor[0x1];
-	u8         qpn[0x18];
-};
-
-struct mlx5_ifc_qp_events_bits {
-	u8         reserved_at_0[0xa0];
-
-	u8         type[0x8];
-	u8         reserved_at_a8[0x18];
-
-	u8         reserved_at_c0[0x8];
-	u8         qpn_rqn_sqn[0x18];
-};
-
-struct mlx5_ifc_dct_events_bits {
-	u8         reserved_at_0[0xc0];
-
-	u8         reserved_at_c0[0x8];
-	u8         dct_number[0x18];
-};
-
-struct mlx5_ifc_comp_event_bits {
-	u8         reserved_at_0[0xc0];
-
-	u8         reserved_at_c0[0x8];
-	u8         cq_number[0x18];
-};
-
-enum {
-	MLX5_QPC_STATE_RST        = 0x0,
-	MLX5_QPC_STATE_INIT       = 0x1,
-	MLX5_QPC_STATE_RTR        = 0x2,
-	MLX5_QPC_STATE_RTS        = 0x3,
-	MLX5_QPC_STATE_SQER       = 0x4,
-	MLX5_QPC_STATE_ERR        = 0x6,
-	MLX5_QPC_STATE_SQD        = 0x7,
-	MLX5_QPC_STATE_SUSPENDED  = 0x9,
-};
-
-enum {
-	MLX5_QPC_ST_RC            = 0x0,
-	MLX5_QPC_ST_UC            = 0x1,
-	MLX5_QPC_ST_UD            = 0x2,
-	MLX5_QPC_ST_XRC           = 0x3,
-	MLX5_QPC_ST_DCI           = 0x5,
-	MLX5_QPC_ST_QP0           = 0x7,
-	MLX5_QPC_ST_QP1           = 0x8,
-	MLX5_QPC_ST_RAW_DATAGRAM  = 0x9,
-	MLX5_QPC_ST_REG_UMR       = 0xc,
-};
-
-enum {
-	MLX5_QPC_PM_STATE_ARMED     = 0x0,
-	MLX5_QPC_PM_STATE_REARM     = 0x1,
-	MLX5_QPC_PM_STATE_RESERVED  = 0x2,
-	MLX5_QPC_PM_STATE_MIGRATED  = 0x3,
-};
-
-enum {
-	MLX5_QPC_END_PADDING_MODE_SCATTER_AS_IS                = 0x0,
-	MLX5_QPC_END_PADDING_MODE_PAD_TO_CACHE_LINE_ALIGNMENT  = 0x1,
-};
-
-enum {
-	MLX5_QPC_MTU_256_BYTES        = 0x1,
-	MLX5_QPC_MTU_512_BYTES        = 0x2,
-	MLX5_QPC_MTU_1K_BYTES         = 0x3,
-	MLX5_QPC_MTU_2K_BYTES         = 0x4,
-	MLX5_QPC_MTU_4K_BYTES         = 0x5,
-	MLX5_QPC_MTU_RAW_ETHERNET_QP  = 0x7,
-};
-
-enum {
-	MLX5_QPC_ATOMIC_MODE_IB_SPEC     = 0x1,
-	MLX5_QPC_ATOMIC_MODE_ONLY_8B     = 0x2,
-	MLX5_QPC_ATOMIC_MODE_UP_TO_8B    = 0x3,
-	MLX5_QPC_ATOMIC_MODE_UP_TO_16B   = 0x4,
-	MLX5_QPC_ATOMIC_MODE_UP_TO_32B   = 0x5,
-	MLX5_QPC_ATOMIC_MODE_UP_TO_64B   = 0x6,
-	MLX5_QPC_ATOMIC_MODE_UP_TO_128B  = 0x7,
-	MLX5_QPC_ATOMIC_MODE_UP_TO_256B  = 0x8,
-};
-
-enum {
-	MLX5_QPC_CS_REQ_DISABLE    = 0x0,
-	MLX5_QPC_CS_REQ_UP_TO_32B  = 0x11,
-	MLX5_QPC_CS_REQ_UP_TO_64B  = 0x22,
-};
-
-enum {
-	MLX5_QPC_CS_RES_DISABLE    = 0x0,
-	MLX5_QPC_CS_RES_UP_TO_32B  = 0x1,
-	MLX5_QPC_CS_RES_UP_TO_64B  = 0x2,
-};
-
-struct mlx5_ifc_qpc_bits {
-	u8         state[0x4];
-	u8         lag_tx_port_affinity[0x4];
-	u8         st[0x8];
-	u8         reserved_at_10[0x3];
-	u8         pm_state[0x2];
-	u8         reserved_at_15[0x7];
-	u8         end_padding_mode[0x2];
-	u8         reserved_at_1e[0x2];
-
-	u8         wq_signature[0x1];
-	u8         block_lb_mc[0x1];
-	u8         atomic_like_write_en[0x1];
-	u8         latency_sensitive[0x1];
-	u8         reserved_at_24[0x1];
-	u8         drain_sigerr[0x1];
-	u8         reserved_at_26[0x2];
-	u8         pd[0x18];
-
-	u8         mtu[0x3];
-	u8         log_msg_max[0x5];
-	u8         reserved_at_48[0x1];
-	u8         log_rq_size[0x4];
-	u8         log_rq_stride[0x3];
-	u8         no_sq[0x1];
-	u8         log_sq_size[0x4];
-	u8         reserved_at_55[0x6];
-	u8         rlky[0x1];
-	u8         ulp_stateless_offload_mode[0x4];
-
-	u8         counter_set_id[0x8];
-	u8         uar_page[0x18];
-
-	u8         reserved_at_80[0x8];
-	u8         user_index[0x18];
-
-	u8         reserved_at_a0[0x3];
-	u8         log_page_size[0x5];
-	u8         remote_qpn[0x18];
-
-	struct mlx5_ifc_ads_bits primary_address_path;
-
-	struct mlx5_ifc_ads_bits secondary_address_path;
-
-	u8         log_ack_req_freq[0x4];
-	u8         reserved_at_384[0x4];
-	u8         log_sra_max[0x3];
-	u8         reserved_at_38b[0x2];
-	u8         retry_count[0x3];
-	u8         rnr_retry[0x3];
-	u8         reserved_at_393[0x1];
-	u8         fre[0x1];
-	u8         cur_rnr_retry[0x3];
-	u8         cur_retry_count[0x3];
-	u8         reserved_at_39b[0x5];
-
-	u8         reserved_at_3a0[0x20];
-
-	u8         reserved_at_3c0[0x8];
-	u8         next_send_psn[0x18];
-
-	u8         reserved_at_3e0[0x8];
-	u8         cqn_snd[0x18];
-
-	u8         reserved_at_400[0x8];
-	u8         deth_sqpn[0x18];
-
-	u8         reserved_at_420[0x20];
-
-	u8         reserved_at_440[0x8];
-	u8         last_acked_psn[0x18];
-
-	u8         reserved_at_460[0x8];
-	u8         ssn[0x18];
-
-	u8         reserved_at_480[0x8];
-	u8         log_rra_max[0x3];
-	u8         reserved_at_48b[0x1];
-	u8         atomic_mode[0x4];
-	u8         rre[0x1];
-	u8         rwe[0x1];
-	u8         rae[0x1];
-	u8         reserved_at_493[0x1];
-	u8         page_offset[0x6];
-	u8         reserved_at_49a[0x3];
-	u8         cd_slave_receive[0x1];
-	u8         cd_slave_send[0x1];
-	u8         cd_master[0x1];
-
-	u8         reserved_at_4a0[0x3];
-	u8         min_rnr_nak[0x5];
-	u8         next_rcv_psn[0x18];
-
-	u8         reserved_at_4c0[0x8];
-	u8         xrcd[0x18];
-
-	u8         reserved_at_4e0[0x8];
-	u8         cqn_rcv[0x18];
-
-	u8         dbr_addr[0x40];
-
-	u8         q_key[0x20];
-
-	u8         reserved_at_560[0x5];
-	u8         rq_type[0x3];
-	u8         srqn_rmpn_xrqn[0x18];
-
-	u8         reserved_at_580[0x8];
-	u8         rmsn[0x18];
-
-	u8         hw_sq_wqebb_counter[0x10];
-	u8         sw_sq_wqebb_counter[0x10];
-
-	u8         hw_rq_counter[0x20];
-
-	u8         sw_rq_counter[0x20];
-
-	u8         reserved_at_600[0x20];
-
-	u8         reserved_at_620[0xf];
-	u8         cgs[0x1];
-	u8         cs_req[0x8];
-	u8         cs_res[0x8];
-
-	u8         dc_access_key[0x40];
-
-	u8         reserved_at_680[0xc0];
-};
-
-struct mlx5_ifc_roce_addr_layout_bits {
-	u8         source_l3_address[16][0x8];
-
-	u8         reserved_at_80[0x3];
-	u8         vlan_valid[0x1];
-	u8         vlan_id[0xc];
-	u8         source_mac_47_32[0x10];
-
-	u8         source_mac_31_0[0x20];
-
-	u8         reserved_at_c0[0x14];
-	u8         roce_l3_type[0x4];
-	u8         roce_version[0x8];
-
-	u8         reserved_at_e0[0x20];
-};
-
-union mlx5_ifc_hca_cap_union_bits {
-	struct mlx5_ifc_cmd_hca_cap_bits cmd_hca_cap;
-	struct mlx5_ifc_odp_cap_bits odp_cap;
-	struct mlx5_ifc_atomic_caps_bits atomic_caps;
-	struct mlx5_ifc_roce_cap_bits roce_cap;
-	struct mlx5_ifc_per_protocol_networking_offload_caps_bits per_protocol_networking_offload_caps;
-	struct mlx5_ifc_flow_table_nic_cap_bits flow_table_nic_cap;
-	struct mlx5_ifc_flow_table_eswitch_cap_bits flow_table_eswitch_cap;
-	struct mlx5_ifc_e_switch_cap_bits e_switch_cap;
-	struct mlx5_ifc_vector_calc_cap_bits vector_calc_cap;
-	struct mlx5_ifc_qos_cap_bits qos_cap;
-	u8         reserved_at_0[0x8000];
-};
-
-enum {
-	MLX5_FLOW_CONTEXT_ACTION_ALLOW     = 0x1,
-	MLX5_FLOW_CONTEXT_ACTION_DROP      = 0x2,
-	MLX5_FLOW_CONTEXT_ACTION_FWD_DEST  = 0x4,
-	MLX5_FLOW_CONTEXT_ACTION_COUNT     = 0x8,
-	MLX5_FLOW_CONTEXT_ACTION_ENCAP     = 0x10,
-	MLX5_FLOW_CONTEXT_ACTION_DECAP     = 0x20,
-};
-
-struct mlx5_ifc_flow_context_bits {
-	u8         reserved_at_0[0x20];
-
-	u8         group_id[0x20];
-
-	u8         reserved_at_40[0x8];
-	u8         flow_tag[0x18];
-
-	u8         reserved_at_60[0x10];
-	u8         action[0x10];
-
-	u8         reserved_at_80[0x8];
-	u8         destination_list_size[0x18];
-
-	u8         reserved_at_a0[0x8];
-	u8         flow_counter_list_size[0x18];
-
-	u8         encap_id[0x20];
-
-	u8         reserved_at_e0[0x120];
-
-	struct mlx5_ifc_fte_match_param_bits match_value;
-
-	u8         reserved_at_1200[0x600];
-
-	union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits destination[0];
-};
-
-enum {
-	MLX5_XRC_SRQC_STATE_GOOD   = 0x0,
-	MLX5_XRC_SRQC_STATE_ERROR  = 0x1,
-};
-
-struct mlx5_ifc_xrc_srqc_bits {
-	u8         state[0x4];
-	u8         log_xrc_srq_size[0x4];
-	u8         reserved_at_8[0x18];
-
-	u8         wq_signature[0x1];
-	u8         cont_srq[0x1];
-	u8         reserved_at_22[0x1];
-	u8         rlky[0x1];
-	u8         basic_cyclic_rcv_wqe[0x1];
-	u8         log_rq_stride[0x3];
-	u8         xrcd[0x18];
-
-	u8         page_offset[0x6];
-	u8         reserved_at_46[0x2];
-	u8         cqn[0x18];
-
-	u8         reserved_at_60[0x20];
-
-	u8         user_index_equal_xrc_srqn[0x1];
-	u8         reserved_at_81[0x1];
-	u8         log_page_size[0x6];
-	u8         user_index[0x18];
-
-	u8         reserved_at_a0[0x20];
-
-	u8         reserved_at_c0[0x8];
-	u8         pd[0x18];
-
-	u8         lwm[0x10];
-	u8         wqe_cnt[0x10];
-
-	u8         reserved_at_100[0x40];
-
-	u8         db_record_addr_h[0x20];
-
-	u8         db_record_addr_l[0x1e];
-	u8         reserved_at_17e[0x2];
-
-	u8         reserved_at_180[0x80];
-};
-
-struct mlx5_ifc_traffic_counter_bits {
-	u8         packets[0x40];
-
-	u8         octets[0x40];
-};
-
-struct mlx5_ifc_tisc_bits {
-	u8         strict_lag_tx_port_affinity[0x1];
-	u8         reserved_at_1[0x3];
-	u8         lag_tx_port_affinity[0x04];
-
-	u8         reserved_at_8[0x4];
-	u8         prio[0x4];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x100];
-
-	u8         reserved_at_120[0x8];
-	u8         transport_domain[0x18];
-
-	u8         reserved_at_140[0x3c0];
-};
-
-enum {
-	MLX5_TIRC_DISP_TYPE_DIRECT    = 0x0,
-	MLX5_TIRC_DISP_TYPE_INDIRECT  = 0x1,
-};
-
-enum {
-	MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO  = 0x1,
-	MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO  = 0x2,
-};
-
-enum {
-	MLX5_RX_HASH_FN_NONE           = 0x0,
-	MLX5_RX_HASH_FN_INVERTED_XOR8  = 0x1,
-	MLX5_RX_HASH_FN_TOEPLITZ       = 0x2,
-};
-
-enum {
-	MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST_    = 0x1,
-	MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST_  = 0x2,
-};
-
-struct mlx5_ifc_tirc_bits {
-	u8         reserved_at_0[0x20];
-
-	u8         disp_type[0x4];
-	u8         reserved_at_24[0x1c];
-
-	u8         reserved_at_40[0x40];
-
-	u8         reserved_at_80[0x4];
-	u8         lro_timeout_period_usecs[0x10];
-	u8         lro_enable_mask[0x4];
-	u8         lro_max_ip_payload_size[0x8];
-
-	u8         reserved_at_a0[0x40];
-
-	u8         reserved_at_e0[0x8];
-	u8         inline_rqn[0x18];
-
-	u8         rx_hash_symmetric[0x1];
-	u8         reserved_at_101[0x1];
-	u8         tunneled_offload_en[0x1];
-	u8         reserved_at_103[0x5];
-	u8         indirect_table[0x18];
-
-	u8         rx_hash_fn[0x4];
-	u8         reserved_at_124[0x2];
-	u8         self_lb_block[0x2];
-	u8         transport_domain[0x18];
-
-	u8         rx_hash_toeplitz_key[10][0x20];
-
-	struct mlx5_ifc_rx_hash_field_select_bits rx_hash_field_selector_outer;
-
-	struct mlx5_ifc_rx_hash_field_select_bits rx_hash_field_selector_inner;
-
-	u8         reserved_at_2c0[0x4c0];
-};
-
-enum {
-	MLX5_SRQC_STATE_GOOD   = 0x0,
-	MLX5_SRQC_STATE_ERROR  = 0x1,
-};
-
-struct mlx5_ifc_srqc_bits {
-	u8         state[0x4];
-	u8         log_srq_size[0x4];
-	u8         reserved_at_8[0x18];
-
-	u8         wq_signature[0x1];
-	u8         cont_srq[0x1];
-	u8         reserved_at_22[0x1];
-	u8         rlky[0x1];
-	u8         reserved_at_24[0x1];
-	u8         log_rq_stride[0x3];
-	u8         xrcd[0x18];
-
-	u8         page_offset[0x6];
-	u8         reserved_at_46[0x2];
-	u8         cqn[0x18];
-
-	u8         reserved_at_60[0x20];
-
-	u8         reserved_at_80[0x2];
-	u8         log_page_size[0x6];
-	u8         reserved_at_88[0x18];
-
-	u8         reserved_at_a0[0x20];
-
-	u8         reserved_at_c0[0x8];
-	u8         pd[0x18];
-
-	u8         lwm[0x10];
-	u8         wqe_cnt[0x10];
-
-	u8         reserved_at_100[0x40];
-
-	u8         dbr_addr[0x40];
-
-	u8         reserved_at_180[0x80];
-};
-
-enum {
-	MLX5_SQC_STATE_RST  = 0x0,
-	MLX5_SQC_STATE_RDY  = 0x1,
-	MLX5_SQC_STATE_ERR  = 0x3,
-};
-
-struct mlx5_ifc_sqc_bits {
-	u8         rlky[0x1];
-	u8         cd_master[0x1];
-	u8         fre[0x1];
-	u8         flush_in_error_en[0x1];
-	u8         reserved_at_4[0x1];
-	u8	   min_wqe_inline_mode[0x3];
-	u8         state[0x4];
-	u8         reg_umr[0x1];
-	u8         reserved_at_d[0x13];
-
-	u8         reserved_at_20[0x8];
-	u8         user_index[0x18];
-
-	u8         reserved_at_40[0x8];
-	u8         cqn[0x18];
-
-	u8         reserved_at_60[0x90];
-
-	u8         packet_pacing_rate_limit_index[0x10];
-	u8         tis_lst_sz[0x10];
-	u8         reserved_at_110[0x10];
-
-	u8         reserved_at_120[0x40];
-
-	u8         reserved_at_160[0x8];
-	u8         tis_num_0[0x18];
-
-	struct mlx5_ifc_wq_bits wq;
-};
-
-enum {
-	SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR = 0x0,
-	SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT = 0x1,
-	SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC = 0x2,
-	SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC = 0x3,
-};
-
-struct mlx5_ifc_scheduling_context_bits {
-	u8         element_type[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         element_attributes[0x20];
-
-	u8         parent_element_id[0x20];
-
-	u8         reserved_at_60[0x40];
-
-	u8         bw_share[0x20];
-
-	u8         max_average_bw[0x20];
-
-	u8         reserved_at_e0[0x120];
-};
-
-struct mlx5_ifc_rqtc_bits {
-	u8         reserved_at_0[0xa0];
-
-	u8         reserved_at_a0[0x10];
-	u8         rqt_max_size[0x10];
-
-	u8         reserved_at_c0[0x10];
-	u8         rqt_actual_size[0x10];
-
-	u8         reserved_at_e0[0x6a0];
-
-	struct mlx5_ifc_rq_num_bits rq_num[0];
-};
-
-enum {
-	MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE  = 0x0,
-	MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_RMP     = 0x1,
-};
-
-enum {
-	MLX5_RQC_STATE_RST  = 0x0,
-	MLX5_RQC_STATE_RDY  = 0x1,
-	MLX5_RQC_STATE_ERR  = 0x3,
-};
-
-struct mlx5_ifc_rqc_bits {
-	u8         rlky[0x1];
-	u8         reserved_at_1[0x1];
-	u8         scatter_fcs[0x1];
-	u8         vsd[0x1];
-	u8         mem_rq_type[0x4];
-	u8         state[0x4];
-	u8         reserved_at_c[0x1];
-	u8         flush_in_error_en[0x1];
-	u8         reserved_at_e[0x12];
-
-	u8         reserved_at_20[0x8];
-	u8         user_index[0x18];
-
-	u8         reserved_at_40[0x8];
-	u8         cqn[0x18];
-
-	u8         counter_set_id[0x8];
-	u8         reserved_at_68[0x18];
-
-	u8         reserved_at_80[0x8];
-	u8         rmpn[0x18];
-
-	u8         reserved_at_a0[0xe0];
-
-	struct mlx5_ifc_wq_bits wq;
-};
-
-enum {
-	MLX5_RMPC_STATE_RDY  = 0x1,
-	MLX5_RMPC_STATE_ERR  = 0x3,
-};
-
-struct mlx5_ifc_rmpc_bits {
-	u8         reserved_at_0[0x8];
-	u8         state[0x4];
-	u8         reserved_at_c[0x14];
-
-	u8         basic_cyclic_rcv_wqe[0x1];
-	u8         reserved_at_21[0x1f];
-
-	u8         reserved_at_40[0x140];
-
-	struct mlx5_ifc_wq_bits wq;
-};
-
-struct mlx5_ifc_nic_vport_context_bits {
-	u8         reserved_at_0[0x5];
-	u8         min_wqe_inline_mode[0x3];
-	u8         reserved_at_8[0x17];
-	u8         roce_en[0x1];
-
-	u8         arm_change_event[0x1];
-	u8         reserved_at_21[0x1a];
-	u8         event_on_mtu[0x1];
-	u8         event_on_promisc_change[0x1];
-	u8         event_on_vlan_change[0x1];
-	u8         event_on_mc_address_change[0x1];
-	u8         event_on_uc_address_change[0x1];
-
-	u8         reserved_at_40[0xf0];
-
-	u8         mtu[0x10];
-
-	u8         system_image_guid[0x40];
-	u8         port_guid[0x40];
-	u8         node_guid[0x40];
-
-	u8         reserved_at_200[0x140];
-	u8         qkey_violation_counter[0x10];
-	u8         reserved_at_350[0x430];
-
-	u8         promisc_uc[0x1];
-	u8         promisc_mc[0x1];
-	u8         promisc_all[0x1];
-	u8         reserved_at_783[0x2];
-	u8         allowed_list_type[0x3];
-	u8         reserved_at_788[0xc];
-	u8         allowed_list_size[0xc];
-
-	struct mlx5_ifc_mac_address_layout_bits permanent_address;
-
-	u8         reserved_at_7e0[0x20];
-
-	u8         current_uc_mac_address[0][0x40];
-};
-
-enum {
-	MLX5_MKC_ACCESS_MODE_PA    = 0x0,
-	MLX5_MKC_ACCESS_MODE_MTT   = 0x1,
-	MLX5_MKC_ACCESS_MODE_KLMS  = 0x2,
-	MLX5_MKC_ACCESS_MODE_KSM   = 0x3,
-};
-
-struct mlx5_ifc_mkc_bits {
-	u8         reserved_at_0[0x1];
-	u8         free[0x1];
-	u8         reserved_at_2[0xd];
-	u8         small_fence_on_rdma_read_response[0x1];
-	u8         umr_en[0x1];
-	u8         a[0x1];
-	u8         rw[0x1];
-	u8         rr[0x1];
-	u8         lw[0x1];
-	u8         lr[0x1];
-	u8         access_mode[0x2];
-	u8         reserved_at_18[0x8];
-
-	u8         qpn[0x18];
-	u8         mkey_7_0[0x8];
-
-	u8         reserved_at_40[0x20];
-
-	u8         length64[0x1];
-	u8         bsf_en[0x1];
-	u8         sync_umr[0x1];
-	u8         reserved_at_63[0x2];
-	u8         expected_sigerr_count[0x1];
-	u8         reserved_at_66[0x1];
-	u8         en_rinval[0x1];
-	u8         pd[0x18];
-
-	u8         start_addr[0x40];
-
-	u8         len[0x40];
-
-	u8         bsf_octword_size[0x20];
-
-	u8         reserved_at_120[0x80];
-
-	u8         translations_octword_size[0x20];
-
-	u8         reserved_at_1c0[0x1b];
-	u8         log_page_size[0x5];
-
-	u8         reserved_at_1e0[0x20];
-};
-
-struct mlx5_ifc_pkey_bits {
-	u8         reserved_at_0[0x10];
-	u8         pkey[0x10];
-};
-
-struct mlx5_ifc_array128_auto_bits {
-	u8         array128_auto[16][0x8];
-};
-
-struct mlx5_ifc_hca_vport_context_bits {
-	u8         field_select[0x20];
-
-	u8         reserved_at_20[0xe0];
-
-	u8         sm_virt_aware[0x1];
-	u8         has_smi[0x1];
-	u8         has_raw[0x1];
-	u8         grh_required[0x1];
-	u8         reserved_at_104[0xc];
-	u8         port_physical_state[0x4];
-	u8         vport_state_policy[0x4];
-	u8         port_state[0x4];
-	u8         vport_state[0x4];
-
-	u8         reserved_at_120[0x20];
-
-	u8         system_image_guid[0x40];
-
-	u8         port_guid[0x40];
-
-	u8         node_guid[0x40];
-
-	u8         cap_mask1[0x20];
-
-	u8         cap_mask1_field_select[0x20];
-
-	u8         cap_mask2[0x20];
-
-	u8         cap_mask2_field_select[0x20];
-
-	u8         reserved_at_280[0x80];
-
-	u8         lid[0x10];
-	u8         reserved_at_310[0x4];
-	u8         init_type_reply[0x4];
-	u8         lmc[0x3];
-	u8         subnet_timeout[0x5];
-
-	u8         sm_lid[0x10];
-	u8         sm_sl[0x4];
-	u8         reserved_at_334[0xc];
-
-	u8         qkey_violation_counter[0x10];
-	u8         pkey_violation_counter[0x10];
-
-	u8         reserved_at_360[0xca0];
-};
-
-struct mlx5_ifc_esw_vport_context_bits {
-	u8         reserved_at_0[0x3];
-	u8         vport_svlan_strip[0x1];
-	u8         vport_cvlan_strip[0x1];
-	u8         vport_svlan_insert[0x1];
-	u8         vport_cvlan_insert[0x2];
-	u8         reserved_at_8[0x18];
-
-	u8         reserved_at_20[0x20];
-
-	u8         svlan_cfi[0x1];
-	u8         svlan_pcp[0x3];
-	u8         svlan_id[0xc];
-	u8         cvlan_cfi[0x1];
-	u8         cvlan_pcp[0x3];
-	u8         cvlan_id[0xc];
-
-	u8         reserved_at_60[0x7a0];
-};
-
-enum {
-	MLX5_EQC_STATUS_OK                = 0x0,
-	MLX5_EQC_STATUS_EQ_WRITE_FAILURE  = 0xa,
-};
-
-enum {
-	MLX5_EQC_ST_ARMED  = 0x9,
-	MLX5_EQC_ST_FIRED  = 0xa,
-};
-
-struct mlx5_ifc_eqc_bits {
-	u8         status[0x4];
-	u8         reserved_at_4[0x9];
-	u8         ec[0x1];
-	u8         oi[0x1];
-	u8         reserved_at_f[0x5];
-	u8         st[0x4];
-	u8         reserved_at_18[0x8];
-
-	u8         reserved_at_20[0x20];
-
-	u8         reserved_at_40[0x14];
-	u8         page_offset[0x6];
-	u8         reserved_at_5a[0x6];
-
-	u8         reserved_at_60[0x3];
-	u8         log_eq_size[0x5];
-	u8         uar_page[0x18];
-
-	u8         reserved_at_80[0x20];
-
-	u8         reserved_at_a0[0x18];
-	u8         intr[0x8];
-
-	u8         reserved_at_c0[0x3];
-	u8         log_page_size[0x5];
-	u8         reserved_at_c8[0x18];
-
-	u8         reserved_at_e0[0x60];
-
-	u8         reserved_at_140[0x8];
-	u8         consumer_counter[0x18];
-
-	u8         reserved_at_160[0x8];
-	u8         producer_counter[0x18];
-
-	u8         reserved_at_180[0x80];
-};
-
-enum {
-	MLX5_DCTC_STATE_ACTIVE    = 0x0,
-	MLX5_DCTC_STATE_DRAINING  = 0x1,
-	MLX5_DCTC_STATE_DRAINED   = 0x2,
-};
-
-enum {
-	MLX5_DCTC_CS_RES_DISABLE    = 0x0,
-	MLX5_DCTC_CS_RES_NA         = 0x1,
-	MLX5_DCTC_CS_RES_UP_TO_64B  = 0x2,
-};
-
-enum {
-	MLX5_DCTC_MTU_256_BYTES  = 0x1,
-	MLX5_DCTC_MTU_512_BYTES  = 0x2,
-	MLX5_DCTC_MTU_1K_BYTES   = 0x3,
-	MLX5_DCTC_MTU_2K_BYTES   = 0x4,
-	MLX5_DCTC_MTU_4K_BYTES   = 0x5,
-};
-
-struct mlx5_ifc_dctc_bits {
-	u8         reserved_at_0[0x4];
-	u8         state[0x4];
-	u8         reserved_at_8[0x18];
-
-	u8         reserved_at_20[0x8];
-	u8         user_index[0x18];
-
-	u8         reserved_at_40[0x8];
-	u8         cqn[0x18];
-
-	u8         counter_set_id[0x8];
-	u8         atomic_mode[0x4];
-	u8         rre[0x1];
-	u8         rwe[0x1];
-	u8         rae[0x1];
-	u8         atomic_like_write_en[0x1];
-	u8         latency_sensitive[0x1];
-	u8         rlky[0x1];
-	u8         free_ar[0x1];
-	u8         reserved_at_73[0xd];
-
-	u8         reserved_at_80[0x8];
-	u8         cs_res[0x8];
-	u8         reserved_at_90[0x3];
-	u8         min_rnr_nak[0x5];
-	u8         reserved_at_98[0x8];
-
-	u8         reserved_at_a0[0x8];
-	u8         srqn_xrqn[0x18];
-
-	u8         reserved_at_c0[0x8];
-	u8         pd[0x18];
-
-	u8         tclass[0x8];
-	u8         reserved_at_e8[0x4];
-	u8         flow_label[0x14];
-
-	u8         dc_access_key[0x40];
-
-	u8         reserved_at_140[0x5];
-	u8         mtu[0x3];
-	u8         port[0x8];
-	u8         pkey_index[0x10];
-
-	u8         reserved_at_160[0x8];
-	u8         my_addr_index[0x8];
-	u8         reserved_at_170[0x8];
-	u8         hop_limit[0x8];
-
-	u8         dc_access_key_violation_count[0x20];
-
-	u8         reserved_at_1a0[0x14];
-	u8         dei_cfi[0x1];
-	u8         eth_prio[0x3];
-	u8         ecn[0x2];
-	u8         dscp[0x6];
-
-	u8         reserved_at_1c0[0x40];
-};
-
-enum {
-	MLX5_CQC_STATUS_OK             = 0x0,
-	MLX5_CQC_STATUS_CQ_OVERFLOW    = 0x9,
-	MLX5_CQC_STATUS_CQ_WRITE_FAIL  = 0xa,
-};
-
-enum {
-	MLX5_CQC_CQE_SZ_64_BYTES   = 0x0,
-	MLX5_CQC_CQE_SZ_128_BYTES  = 0x1,
-};
-
-enum {
-	MLX5_CQC_ST_SOLICITED_NOTIFICATION_REQUEST_ARMED  = 0x6,
-	MLX5_CQC_ST_NOTIFICATION_REQUEST_ARMED            = 0x9,
-	MLX5_CQC_ST_FIRED                                 = 0xa,
-};
-
-enum {
-	MLX5_CQ_PERIOD_MODE_START_FROM_EQE = 0x0,
-	MLX5_CQ_PERIOD_MODE_START_FROM_CQE = 0x1,
-	MLX5_CQ_PERIOD_NUM_MODES
-};
-
-struct mlx5_ifc_cqc_bits {
-	u8         status[0x4];
-	u8         reserved_at_4[0x4];
-	u8         cqe_sz[0x3];
-	u8         cc[0x1];
-	u8         reserved_at_c[0x1];
-	u8         scqe_break_moderation_en[0x1];
-	u8         oi[0x1];
-	u8         cq_period_mode[0x2];
-	u8         cqe_comp_en[0x1];
-	u8         mini_cqe_res_format[0x2];
-	u8         st[0x4];
-	u8         reserved_at_18[0x8];
-
-	u8         reserved_at_20[0x20];
-
-	u8         reserved_at_40[0x14];
-	u8         page_offset[0x6];
-	u8         reserved_at_5a[0x6];
-
-	u8         reserved_at_60[0x3];
-	u8         log_cq_size[0x5];
-	u8         uar_page[0x18];
-
-	u8         reserved_at_80[0x4];
-	u8         cq_period[0xc];
-	u8         cq_max_count[0x10];
-
-	u8         reserved_at_a0[0x18];
-	u8         c_eqn[0x8];
-
-	u8         reserved_at_c0[0x3];
-	u8         log_page_size[0x5];
-	u8         reserved_at_c8[0x18];
-
-	u8         reserved_at_e0[0x20];
-
-	u8         reserved_at_100[0x8];
-	u8         last_notified_index[0x18];
-
-	u8         reserved_at_120[0x8];
-	u8         last_solicit_index[0x18];
-
-	u8         reserved_at_140[0x8];
-	u8         consumer_counter[0x18];
-
-	u8         reserved_at_160[0x8];
-	u8         producer_counter[0x18];
-
-	u8         reserved_at_180[0x40];
-
-	u8         dbr_addr[0x40];
-};
-
-union mlx5_ifc_cong_control_roce_ecn_auto_bits {
-	struct mlx5_ifc_cong_control_802_1qau_rp_bits cong_control_802_1qau_rp;
-	struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits cong_control_r_roce_ecn_rp;
-	struct mlx5_ifc_cong_control_r_roce_ecn_np_bits cong_control_r_roce_ecn_np;
-	u8         reserved_at_0[0x800];
-};
-
-struct mlx5_ifc_query_adapter_param_block_bits {
-	u8         reserved_at_0[0xc0];
-
-	u8         reserved_at_c0[0x8];
-	u8         ieee_vendor_id[0x18];
-
-	u8         reserved_at_e0[0x10];
-	u8         vsd_vendor_id[0x10];
-
-	u8         vsd[208][0x8];
-
-	u8         vsd_contd_psid[16][0x8];
-};
-
-enum {
-	MLX5_XRQC_STATE_GOOD   = 0x0,
-	MLX5_XRQC_STATE_ERROR  = 0x1,
-};
-
-enum {
-	MLX5_XRQC_TOPOLOGY_NO_SPECIAL_TOPOLOGY = 0x0,
-	MLX5_XRQC_TOPOLOGY_TAG_MATCHING        = 0x1,
-};
-
-enum {
-	MLX5_XRQC_OFFLOAD_RNDV = 0x1,
-};
-
-struct mlx5_ifc_tag_matching_topology_context_bits {
-	u8         log_matching_list_sz[0x4];
-	u8         reserved_at_4[0xc];
-	u8         append_next_index[0x10];
-
-	u8         sw_phase_cnt[0x10];
-	u8         hw_phase_cnt[0x10];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_xrqc_bits {
-	u8         state[0x4];
-	u8         rlkey[0x1];
-	u8         reserved_at_5[0xf];
-	u8         topology[0x4];
-	u8         reserved_at_18[0x4];
-	u8         offload[0x4];
-
-	u8         reserved_at_20[0x8];
-	u8         user_index[0x18];
-
-	u8         reserved_at_40[0x8];
-	u8         cqn[0x18];
-
-	u8         reserved_at_60[0xa0];
-
-	struct mlx5_ifc_tag_matching_topology_context_bits tag_matching_topology_context;
-
-	u8         reserved_at_180[0x880];
-
-	struct mlx5_ifc_wq_bits wq;
-};
-
-union mlx5_ifc_modify_field_select_resize_field_select_auto_bits {
-	struct mlx5_ifc_modify_field_select_bits modify_field_select;
-	struct mlx5_ifc_resize_field_select_bits resize_field_select;
-	u8         reserved_at_0[0x20];
-};
-
-union mlx5_ifc_field_select_802_1_r_roce_auto_bits {
-	struct mlx5_ifc_field_select_802_1qau_rp_bits field_select_802_1qau_rp;
-	struct mlx5_ifc_field_select_r_roce_rp_bits field_select_r_roce_rp;
-	struct mlx5_ifc_field_select_r_roce_np_bits field_select_r_roce_np;
-	u8         reserved_at_0[0x20];
-};
-
-union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits {
-	struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits eth_802_3_cntrs_grp_data_layout;
-	struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits eth_2863_cntrs_grp_data_layout;
-	struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout;
-	struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits eth_3635_cntrs_grp_data_layout;
-	struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits eth_extended_cntrs_grp_data_layout;
-	struct mlx5_ifc_eth_per_prio_grp_data_layout_bits eth_per_prio_grp_data_layout;
-	struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits eth_per_traffic_grp_data_layout;
-	struct mlx5_ifc_ib_port_cntrs_grp_data_layout_bits ib_port_cntrs_grp_data_layout;
-	struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs;
-	u8         reserved_at_0[0x7c0];
-};
-
-union mlx5_ifc_pcie_cntrs_grp_data_layout_auto_bits {
-	struct mlx5_ifc_pcie_perf_cntrs_grp_data_layout_bits pcie_perf_cntrs_grp_data_layout;
-	struct mlx5_ifc_pcie_tas_cntrs_grp_data_layout_bits pcie_tas_cntrs_grp_data_layout;
-	u8         reserved_at_0[0x7c0];
-};
-
-union mlx5_ifc_event_auto_bits {
-	struct mlx5_ifc_comp_event_bits comp_event;
-	struct mlx5_ifc_dct_events_bits dct_events;
-	struct mlx5_ifc_qp_events_bits qp_events;
-	struct mlx5_ifc_wqe_associated_page_fault_event_bits wqe_associated_page_fault_event;
-	struct mlx5_ifc_rdma_page_fault_event_bits rdma_page_fault_event;
-	struct mlx5_ifc_cq_error_bits cq_error;
-	struct mlx5_ifc_dropped_packet_logged_bits dropped_packet_logged;
-	struct mlx5_ifc_port_state_change_event_bits port_state_change_event;
-	struct mlx5_ifc_gpio_event_bits gpio_event;
-	struct mlx5_ifc_db_bf_congestion_event_bits db_bf_congestion_event;
-	struct mlx5_ifc_stall_vl_event_bits stall_vl_event;
-	struct mlx5_ifc_cmd_inter_comp_event_bits cmd_inter_comp_event;
-	u8         reserved_at_0[0xe0];
-};
-
-struct mlx5_ifc_health_buffer_bits {
-	u8         reserved_at_0[0x100];
-
-	u8         assert_existptr[0x20];
-
-	u8         assert_callra[0x20];
-
-	u8         reserved_at_140[0x40];
-
-	u8         fw_version[0x20];
-
-	u8         hw_id[0x20];
-
-	u8         reserved_at_1c0[0x20];
-
-	u8         irisc_index[0x8];
-	u8         synd[0x8];
-	u8         ext_synd[0x10];
-};
-
-struct mlx5_ifc_register_loopback_control_bits {
-	u8         no_lb[0x1];
-	u8         reserved_at_1[0x7];
-	u8         port[0x8];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x60];
-};
-
-struct mlx5_ifc_vport_tc_element_bits {
-	u8         traffic_class[0x4];
-	u8         reserved_at_4[0xc];
-	u8         vport_number[0x10];
-};
-
-struct mlx5_ifc_vport_element_bits {
-	u8         reserved_at_0[0x10];
-	u8         vport_number[0x10];
-};
-
-enum {
-	TSAR_ELEMENT_TSAR_TYPE_DWRR = 0x0,
-	TSAR_ELEMENT_TSAR_TYPE_ROUND_ROBIN = 0x1,
-	TSAR_ELEMENT_TSAR_TYPE_ETS = 0x2,
-};
-
-struct mlx5_ifc_tsar_element_bits {
-	u8         reserved_at_0[0x8];
-	u8         tsar_type[0x8];
-	u8         reserved_at_10[0x10];
-};
-
-struct mlx5_ifc_teardown_hca_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-enum {
-	MLX5_TEARDOWN_HCA_IN_PROFILE_GRACEFUL_CLOSE  = 0x0,
-	MLX5_TEARDOWN_HCA_IN_PROFILE_PANIC_CLOSE     = 0x1,
-};
-
-struct mlx5_ifc_teardown_hca_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x10];
-	u8         profile[0x10];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_sqerr2rts_qp_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_sqerr2rts_qp_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x8];
-	u8         qpn[0x18];
-
-	u8         reserved_at_60[0x20];
-
-	u8         opt_param_mask[0x20];
-
-	u8         reserved_at_a0[0x20];
-
-	struct mlx5_ifc_qpc_bits qpc;
-
-	u8         reserved_at_800[0x80];
-};
-
-struct mlx5_ifc_sqd2rts_qp_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_sqd2rts_qp_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x8];
-	u8         qpn[0x18];
-
-	u8         reserved_at_60[0x20];
-
-	u8         opt_param_mask[0x20];
-
-	u8         reserved_at_a0[0x20];
-
-	struct mlx5_ifc_qpc_bits qpc;
-
-	u8         reserved_at_800[0x80];
-};
-
-struct mlx5_ifc_set_roce_address_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_set_roce_address_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         roce_address_index[0x10];
-	u8         reserved_at_50[0x10];
-
-	u8         reserved_at_60[0x20];
-
-	struct mlx5_ifc_roce_addr_layout_bits roce_address;
-};
-
-struct mlx5_ifc_set_mad_demux_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-enum {
-	MLX5_SET_MAD_DEMUX_IN_DEMUX_MODE_PASS_ALL   = 0x0,
-	MLX5_SET_MAD_DEMUX_IN_DEMUX_MODE_SELECTIVE  = 0x2,
-};
-
-struct mlx5_ifc_set_mad_demux_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x20];
-
-	u8         reserved_at_60[0x6];
-	u8         demux_mode[0x2];
-	u8         reserved_at_68[0x18];
-};
-
-struct mlx5_ifc_set_l2_table_entry_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_set_l2_table_entry_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x60];
-
-	u8         reserved_at_a0[0x8];
-	u8         table_index[0x18];
-
-	u8         reserved_at_c0[0x20];
-
-	u8         reserved_at_e0[0x13];
-	u8         vlan_valid[0x1];
-	u8         vlan[0xc];
-
-	struct mlx5_ifc_mac_address_layout_bits mac_address;
-
-	u8         reserved_at_140[0xc0];
-};
-
-struct mlx5_ifc_set_issi_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_set_issi_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x10];
-	u8         current_issi[0x10];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_set_hca_cap_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_set_hca_cap_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x40];
-
-	union mlx5_ifc_hca_cap_union_bits capability;
-};
-
-enum {
-	MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION    = 0x0,
-	MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_TAG  = 0x1,
-	MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST    = 0x2,
-	MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS    = 0x3
-};
-
-struct mlx5_ifc_set_fte_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_set_fte_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         other_vport[0x1];
-	u8         reserved_at_41[0xf];
-	u8         vport_number[0x10];
-
-	u8         reserved_at_60[0x20];
-
-	u8         table_type[0x8];
-	u8         reserved_at_88[0x18];
-
-	u8         reserved_at_a0[0x8];
-	u8         table_id[0x18];
-
-	u8         reserved_at_c0[0x18];
-	u8         modify_enable_mask[0x8];
-
-	u8         reserved_at_e0[0x20];
-
-	u8         flow_index[0x20];
-
-	u8         reserved_at_120[0xe0];
-
-	struct mlx5_ifc_flow_context_bits flow_context;
-};
-
-struct mlx5_ifc_rts2rts_qp_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_rts2rts_qp_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x8];
-	u8         qpn[0x18];
-
-	u8         reserved_at_60[0x20];
-
-	u8         opt_param_mask[0x20];
-
-	u8         reserved_at_a0[0x20];
-
-	struct mlx5_ifc_qpc_bits qpc;
-
-	u8         reserved_at_800[0x80];
-};
-
-struct mlx5_ifc_rtr2rts_qp_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_rtr2rts_qp_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x8];
-	u8         qpn[0x18];
-
-	u8         reserved_at_60[0x20];
-
-	u8         opt_param_mask[0x20];
-
-	u8         reserved_at_a0[0x20];
-
-	struct mlx5_ifc_qpc_bits qpc;
-
-	u8         reserved_at_800[0x80];
-};
-
-struct mlx5_ifc_rst2init_qp_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_rst2init_qp_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x8];
-	u8         qpn[0x18];
-
-	u8         reserved_at_60[0x20];
-
-	u8         opt_param_mask[0x20];
-
-	u8         reserved_at_a0[0x20];
-
-	struct mlx5_ifc_qpc_bits qpc;
-
-	u8         reserved_at_800[0x80];
-};
-
-struct mlx5_ifc_query_xrq_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-
-	struct mlx5_ifc_xrqc_bits xrq_context;
-};
-
-struct mlx5_ifc_query_xrq_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x8];
-	u8         xrqn[0x18];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_query_xrc_srq_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-
-	struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry;
-
-	u8         reserved_at_280[0x600];
-
-	u8         pas[0][0x40];
-};
-
-struct mlx5_ifc_query_xrc_srq_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x8];
-	u8         xrc_srqn[0x18];
-
-	u8         reserved_at_60[0x20];
-};
-
-enum {
-	MLX5_QUERY_VPORT_STATE_OUT_STATE_DOWN  = 0x0,
-	MLX5_QUERY_VPORT_STATE_OUT_STATE_UP    = 0x1,
-};
-
-struct mlx5_ifc_query_vport_state_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x20];
-
-	u8         reserved_at_60[0x18];
-	u8         admin_state[0x4];
-	u8         state[0x4];
-};
-
-enum {
-	MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT  = 0x0,
-	MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT   = 0x1,
-};
-
-struct mlx5_ifc_query_vport_state_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         other_vport[0x1];
-	u8         reserved_at_41[0xf];
-	u8         vport_number[0x10];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_query_vport_counter_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-
-	struct mlx5_ifc_traffic_counter_bits received_errors;
-
-	struct mlx5_ifc_traffic_counter_bits transmit_errors;
-
-	struct mlx5_ifc_traffic_counter_bits received_ib_unicast;
-
-	struct mlx5_ifc_traffic_counter_bits transmitted_ib_unicast;
-
-	struct mlx5_ifc_traffic_counter_bits received_ib_multicast;
-
-	struct mlx5_ifc_traffic_counter_bits transmitted_ib_multicast;
-
-	struct mlx5_ifc_traffic_counter_bits received_eth_broadcast;
-
-	struct mlx5_ifc_traffic_counter_bits transmitted_eth_broadcast;
-
-	struct mlx5_ifc_traffic_counter_bits received_eth_unicast;
-
-	struct mlx5_ifc_traffic_counter_bits transmitted_eth_unicast;
-
-	struct mlx5_ifc_traffic_counter_bits received_eth_multicast;
-
-	struct mlx5_ifc_traffic_counter_bits transmitted_eth_multicast;
-
-	u8         reserved_at_680[0xa00];
-};
-
-enum {
-	MLX5_QUERY_VPORT_COUNTER_IN_OP_MOD_VPORT_COUNTERS  = 0x0,
-};
-
-struct mlx5_ifc_query_vport_counter_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         other_vport[0x1];
-	u8         reserved_at_41[0xb];
-	u8	   port_num[0x4];
-	u8         vport_number[0x10];
-
-	u8         reserved_at_60[0x60];
-
-	u8         clear[0x1];
-	u8         reserved_at_c1[0x1f];
-
-	u8         reserved_at_e0[0x20];
-};
-
-struct mlx5_ifc_query_tis_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-
-	struct mlx5_ifc_tisc_bits tis_context;
-};
-
-struct mlx5_ifc_query_tis_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x8];
-	u8         tisn[0x18];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_query_tir_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0xc0];
-
-	struct mlx5_ifc_tirc_bits tir_context;
-};
-
-struct mlx5_ifc_query_tir_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x8];
-	u8         tirn[0x18];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_query_srq_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-
-	struct mlx5_ifc_srqc_bits srq_context_entry;
-
-	u8         reserved_at_280[0x600];
-
-	u8         pas[0][0x40];
-};
-
-struct mlx5_ifc_query_srq_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x8];
-	u8         srqn[0x18];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_query_sq_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0xc0];
-
-	struct mlx5_ifc_sqc_bits sq_context;
-};
-
-struct mlx5_ifc_query_sq_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x8];
-	u8         sqn[0x18];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_query_special_contexts_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         dump_fill_mkey[0x20];
-
-	u8         resd_lkey[0x20];
-
-	u8         null_mkey[0x20];
-
-	u8         reserved_at_a0[0x60];
-};
-
-struct mlx5_ifc_query_special_contexts_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_query_scheduling_element_out_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0xc0];
-
-	struct mlx5_ifc_scheduling_context_bits scheduling_context;
-
-	u8         reserved_at_300[0x100];
-};
-
-enum {
-	SCHEDULING_HIERARCHY_E_SWITCH = 0x2,
-};
-
-struct mlx5_ifc_query_scheduling_element_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         scheduling_hierarchy[0x8];
-	u8         reserved_at_48[0x18];
-
-	u8         scheduling_element_id[0x20];
-
-	u8         reserved_at_80[0x180];
-};
-
-struct mlx5_ifc_query_rqt_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0xc0];
-
-	struct mlx5_ifc_rqtc_bits rqt_context;
-};
-
-struct mlx5_ifc_query_rqt_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x8];
-	u8         rqtn[0x18];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_query_rq_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0xc0];
-
-	struct mlx5_ifc_rqc_bits rq_context;
-};
-
-struct mlx5_ifc_query_rq_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x8];
-	u8         rqn[0x18];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_query_roce_address_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-
-	struct mlx5_ifc_roce_addr_layout_bits roce_address;
-};
-
-struct mlx5_ifc_query_roce_address_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         roce_address_index[0x10];
-	u8         reserved_at_50[0x10];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_query_rmp_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0xc0];
-
-	struct mlx5_ifc_rmpc_bits rmp_context;
-};
-
-struct mlx5_ifc_query_rmp_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x8];
-	u8         rmpn[0x18];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_query_qp_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-
-	u8         opt_param_mask[0x20];
-
-	u8         reserved_at_a0[0x20];
-
-	struct mlx5_ifc_qpc_bits qpc;
-
-	u8         reserved_at_800[0x80];
-
-	u8         pas[0][0x40];
-};
-
-struct mlx5_ifc_query_qp_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x8];
-	u8         qpn[0x18];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_query_q_counter_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-
-	u8         rx_write_requests[0x20];
-
-	u8         reserved_at_a0[0x20];
-
-	u8         rx_read_requests[0x20];
-
-	u8         reserved_at_e0[0x20];
-
-	u8         rx_atomic_requests[0x20];
-
-	u8         reserved_at_120[0x20];
-
-	u8         rx_dct_connect[0x20];
-
-	u8         reserved_at_160[0x20];
-
-	u8         out_of_buffer[0x20];
-
-	u8         reserved_at_1a0[0x20];
-
-	u8         out_of_sequence[0x20];
-
-	u8         reserved_at_1e0[0x20];
-
-	u8         duplicate_request[0x20];
-
-	u8         reserved_at_220[0x20];
-
-	u8         rnr_nak_retry_err[0x20];
-
-	u8         reserved_at_260[0x20];
-
-	u8         packet_seq_err[0x20];
-
-	u8         reserved_at_2a0[0x20];
-
-	u8         implied_nak_seq_err[0x20];
-
-	u8         reserved_at_2e0[0x20];
-
-	u8         local_ack_timeout_err[0x20];
-
-	u8         reserved_at_320[0x4e0];
-};
-
-struct mlx5_ifc_query_q_counter_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x80];
-
-	u8         clear[0x1];
-	u8         reserved_at_c1[0x1f];
-
-	u8         reserved_at_e0[0x18];
-	u8         counter_set_id[0x8];
-};
-
-struct mlx5_ifc_query_pages_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x10];
-	u8         function_id[0x10];
-
-	u8         num_pages[0x20];
-};
-
-enum {
-	MLX5_QUERY_PAGES_IN_OP_MOD_BOOT_PAGES     = 0x1,
-	MLX5_QUERY_PAGES_IN_OP_MOD_INIT_PAGES     = 0x2,
-	MLX5_QUERY_PAGES_IN_OP_MOD_REGULAR_PAGES  = 0x3,
-};
-
-struct mlx5_ifc_query_pages_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x10];
-	u8         function_id[0x10];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_query_nic_vport_context_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-
-	struct mlx5_ifc_nic_vport_context_bits nic_vport_context;
-};
-
-struct mlx5_ifc_query_nic_vport_context_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         other_vport[0x1];
-	u8         reserved_at_41[0xf];
-	u8         vport_number[0x10];
-
-	u8         reserved_at_60[0x5];
-	u8         allowed_list_type[0x3];
-	u8         reserved_at_68[0x18];
-};
-
-struct mlx5_ifc_query_mkey_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-
-	struct mlx5_ifc_mkc_bits memory_key_mkey_entry;
-
-	u8         reserved_at_280[0x600];
-
-	u8         bsf0_klm0_pas_mtt0_1[16][0x8];
-
-	u8         bsf1_klm1_pas_mtt2_3[16][0x8];
-};
-
-struct mlx5_ifc_query_mkey_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x8];
-	u8         mkey_index[0x18];
-
-	u8         pg_access[0x1];
-	u8         reserved_at_61[0x1f];
-};
-
-struct mlx5_ifc_query_mad_demux_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-
-	u8         mad_dumux_parameters_block[0x20];
-};
-
-struct mlx5_ifc_query_mad_demux_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_query_l2_table_entry_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0xa0];
-
-	u8         reserved_at_e0[0x13];
-	u8         vlan_valid[0x1];
-	u8         vlan[0xc];
-
-	struct mlx5_ifc_mac_address_layout_bits mac_address;
-
-	u8         reserved_at_140[0xc0];
-};
-
-struct mlx5_ifc_query_l2_table_entry_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x60];
-
-	u8         reserved_at_a0[0x8];
-	u8         table_index[0x18];
-
-	u8         reserved_at_c0[0x140];
-};
-
-struct mlx5_ifc_query_issi_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x10];
-	u8         current_issi[0x10];
-
-	u8         reserved_at_60[0xa0];
-
-	u8         reserved_at_100[76][0x8];
-	u8         supported_issi_dw0[0x20];
-};
-
-struct mlx5_ifc_query_issi_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_set_driver_version_out_bits {
-	u8         status[0x8];
-	u8         reserved_0[0x18];
-
-	u8         syndrome[0x20];
-	u8         reserved_1[0x40];
-};
-
-struct mlx5_ifc_set_driver_version_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_0[0x10];
-
-	u8         reserved_1[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_2[0x40];
-	u8         driver_version[64][0x8];
-};
-
-struct mlx5_ifc_query_hca_vport_pkey_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-
-	struct mlx5_ifc_pkey_bits pkey[0];
-};
-
-struct mlx5_ifc_query_hca_vport_pkey_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         other_vport[0x1];
-	u8         reserved_at_41[0xb];
-	u8         port_num[0x4];
-	u8         vport_number[0x10];
-
-	u8         reserved_at_60[0x10];
-	u8         pkey_index[0x10];
-};
-
-enum {
-	MLX5_HCA_VPORT_SEL_PORT_GUID	= 1 << 0,
-	MLX5_HCA_VPORT_SEL_NODE_GUID	= 1 << 1,
-	MLX5_HCA_VPORT_SEL_STATE_POLICY	= 1 << 2,
-};
-
-struct mlx5_ifc_query_hca_vport_gid_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x20];
-
-	u8         gids_num[0x10];
-	u8         reserved_at_70[0x10];
-
-	struct mlx5_ifc_array128_auto_bits gid[0];
-};
-
-struct mlx5_ifc_query_hca_vport_gid_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         other_vport[0x1];
-	u8         reserved_at_41[0xb];
-	u8         port_num[0x4];
-	u8         vport_number[0x10];
-
-	u8         reserved_at_60[0x10];
-	u8         gid_index[0x10];
-};
-
-struct mlx5_ifc_query_hca_vport_context_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-
-	struct mlx5_ifc_hca_vport_context_bits hca_vport_context;
-};
-
-struct mlx5_ifc_query_hca_vport_context_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         other_vport[0x1];
-	u8         reserved_at_41[0xb];
-	u8         port_num[0x4];
-	u8         vport_number[0x10];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_query_hca_cap_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-
-	union mlx5_ifc_hca_cap_union_bits capability;
-};
-
-struct mlx5_ifc_query_hca_cap_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_query_flow_table_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x80];
-
-	u8         reserved_at_c0[0x8];
-	u8         level[0x8];
-	u8         reserved_at_d0[0x8];
-	u8         log_size[0x8];
-
-	u8         reserved_at_e0[0x120];
-};
-
-struct mlx5_ifc_query_flow_table_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x40];
-
-	u8         table_type[0x8];
-	u8         reserved_at_88[0x18];
-
-	u8         reserved_at_a0[0x8];
-	u8         table_id[0x18];
-
-	u8         reserved_at_c0[0x140];
-};
-
-struct mlx5_ifc_query_fte_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x1c0];
-
-	struct mlx5_ifc_flow_context_bits flow_context;
-};
-
-struct mlx5_ifc_query_fte_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x40];
-
-	u8         table_type[0x8];
-	u8         reserved_at_88[0x18];
-
-	u8         reserved_at_a0[0x8];
-	u8         table_id[0x18];
-
-	u8         reserved_at_c0[0x40];
-
-	u8         flow_index[0x20];
-
-	u8         reserved_at_120[0xe0];
-};
-
-enum {
-	MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_OUTER_HEADERS    = 0x0,
-	MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS  = 0x1,
-	MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_INNER_HEADERS    = 0x2,
-};
-
-struct mlx5_ifc_query_flow_group_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0xa0];
-
-	u8         start_flow_index[0x20];
-
-	u8         reserved_at_100[0x20];
-
-	u8         end_flow_index[0x20];
-
-	u8         reserved_at_140[0xa0];
-
-	u8         reserved_at_1e0[0x18];
-	u8         match_criteria_enable[0x8];
-
-	struct mlx5_ifc_fte_match_param_bits match_criteria;
-
-	u8         reserved_at_1200[0xe00];
-};
-
-struct mlx5_ifc_query_flow_group_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x40];
-
-	u8         table_type[0x8];
-	u8         reserved_at_88[0x18];
-
-	u8         reserved_at_a0[0x8];
-	u8         table_id[0x18];
-
-	u8         group_id[0x20];
-
-	u8         reserved_at_e0[0x120];
-};
-
-struct mlx5_ifc_query_flow_counter_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-
-	struct mlx5_ifc_traffic_counter_bits flow_statistics[0];
-};
-
-struct mlx5_ifc_query_flow_counter_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x80];
-
-	u8         clear[0x1];
-	u8         reserved_at_c1[0xf];
-	u8         num_of_counters[0x10];
-
-	u8         reserved_at_e0[0x10];
-	u8         flow_counter_id[0x10];
-};
-
-struct mlx5_ifc_query_esw_vport_context_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-
-	struct mlx5_ifc_esw_vport_context_bits esw_vport_context;
-};
-
-struct mlx5_ifc_query_esw_vport_context_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         other_vport[0x1];
-	u8         reserved_at_41[0xf];
-	u8         vport_number[0x10];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_modify_esw_vport_context_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_esw_vport_context_fields_select_bits {
-	u8         reserved_at_0[0x1c];
-	u8         vport_cvlan_insert[0x1];
-	u8         vport_svlan_insert[0x1];
-	u8         vport_cvlan_strip[0x1];
-	u8         vport_svlan_strip[0x1];
-};
-
-struct mlx5_ifc_modify_esw_vport_context_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         other_vport[0x1];
-	u8         reserved_at_41[0xf];
-	u8         vport_number[0x10];
-
-	struct mlx5_ifc_esw_vport_context_fields_select_bits field_select;
-
-	struct mlx5_ifc_esw_vport_context_bits esw_vport_context;
-};
-
-struct mlx5_ifc_query_eq_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-
-	struct mlx5_ifc_eqc_bits eq_context_entry;
-
-	u8         reserved_at_280[0x40];
-
-	u8         event_bitmask[0x40];
-
-	u8         reserved_at_300[0x580];
-
-	u8         pas[0][0x40];
-};
-
-struct mlx5_ifc_query_eq_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x18];
-	u8         eq_number[0x8];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_encap_header_in_bits {
-	u8         reserved_at_0[0x5];
-	u8         header_type[0x3];
-	u8         reserved_at_8[0xe];
-	u8         encap_header_size[0xa];
-
-	u8         reserved_at_20[0x10];
-	u8         encap_header[2][0x8];
-
-	u8         more_encap_header[0][0x8];
-};
-
-struct mlx5_ifc_query_encap_header_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0xa0];
-
-	struct mlx5_ifc_encap_header_in_bits encap_header[0];
-};
-
-struct mlx5_ifc_query_encap_header_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         encap_id[0x20];
-
-	u8         reserved_at_60[0xa0];
-};
-
-struct mlx5_ifc_alloc_encap_header_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         encap_id[0x20];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_alloc_encap_header_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0xa0];
-
-	struct mlx5_ifc_encap_header_in_bits encap_header;
-};
-
-struct mlx5_ifc_dealloc_encap_header_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_dealloc_encap_header_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         encap_id[0x20];
-
-	u8         reserved_60[0x20];
-};
-
-struct mlx5_ifc_query_dct_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-
-	struct mlx5_ifc_dctc_bits dct_context_entry;
-
-	u8         reserved_at_280[0x180];
-};
-
-struct mlx5_ifc_query_dct_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x8];
-	u8         dctn[0x18];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_query_cq_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-
-	struct mlx5_ifc_cqc_bits cq_context;
-
-	u8         reserved_at_280[0x600];
-
-	u8         pas[0][0x40];
-};
-
-struct mlx5_ifc_query_cq_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x8];
-	u8         cqn[0x18];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_query_cong_status_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x20];
-
-	u8         enable[0x1];
-	u8         tag_enable[0x1];
-	u8         reserved_at_62[0x1e];
-};
-
-struct mlx5_ifc_query_cong_status_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x18];
-	u8         priority[0x4];
-	u8         cong_protocol[0x4];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_query_cong_statistics_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-
-	u8         cur_flows[0x20];
-
-	u8         sum_flows[0x20];
-
-	u8         cnp_ignored_high[0x20];
-
-	u8         cnp_ignored_low[0x20];
-
-	u8         cnp_handled_high[0x20];
-
-	u8         cnp_handled_low[0x20];
-
-	u8         reserved_at_140[0x100];
-
-	u8         time_stamp_high[0x20];
-
-	u8         time_stamp_low[0x20];
-
-	u8         accumulators_period[0x20];
-
-	u8         ecn_marked_roce_packets_high[0x20];
-
-	u8         ecn_marked_roce_packets_low[0x20];
-
-	u8         cnps_sent_high[0x20];
-
-	u8         cnps_sent_low[0x20];
-
-	u8         reserved_at_320[0x560];
-};
-
-struct mlx5_ifc_query_cong_statistics_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         clear[0x1];
-	u8         reserved_at_41[0x1f];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_query_cong_params_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-
-	union mlx5_ifc_cong_control_roce_ecn_auto_bits congestion_parameters;
-};
-
-struct mlx5_ifc_query_cong_params_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x1c];
-	u8         cong_protocol[0x4];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_query_adapter_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-
-	struct mlx5_ifc_query_adapter_param_block_bits query_adapter_struct;
-};
-
-struct mlx5_ifc_query_adapter_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_qp_2rst_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_qp_2rst_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x8];
-	u8         qpn[0x18];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_qp_2err_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_qp_2err_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x8];
-	u8         qpn[0x18];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_page_fault_resume_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_page_fault_resume_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         error[0x1];
-	u8         reserved_at_41[0x4];
-	u8         page_fault_type[0x3];
-	u8         wq_number[0x18];
-
-	u8         reserved_at_60[0x8];
-	u8         token[0x18];
-};
-
-struct mlx5_ifc_nop_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_nop_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_modify_vport_state_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_modify_vport_state_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         other_vport[0x1];
-	u8         reserved_at_41[0xf];
-	u8         vport_number[0x10];
-
-	u8         reserved_at_60[0x18];
-	u8         admin_state[0x4];
-	u8         reserved_at_7c[0x4];
-};
-
-struct mlx5_ifc_modify_tis_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_modify_tis_bitmask_bits {
-	u8         reserved_at_0[0x20];
-
-	u8         reserved_at_20[0x1d];
-	u8         lag_tx_port_affinity[0x1];
-	u8         strict_lag_tx_port_affinity[0x1];
-	u8         prio[0x1];
-};
-
-struct mlx5_ifc_modify_tis_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x8];
-	u8         tisn[0x18];
-
-	u8         reserved_at_60[0x20];
-
-	struct mlx5_ifc_modify_tis_bitmask_bits bitmask;
-
-	u8         reserved_at_c0[0x40];
-
-	struct mlx5_ifc_tisc_bits ctx;
-};
-
-struct mlx5_ifc_modify_tir_bitmask_bits {
-	u8	   reserved_at_0[0x20];
-
-	u8         reserved_at_20[0x1b];
-	u8         self_lb_en[0x1];
-	u8         reserved_at_3c[0x1];
-	u8         hash[0x1];
-	u8         reserved_at_3e[0x1];
-	u8         lro[0x1];
-};
-
-struct mlx5_ifc_modify_tir_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_modify_tir_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x8];
-	u8         tirn[0x18];
-
-	u8         reserved_at_60[0x20];
-
-	struct mlx5_ifc_modify_tir_bitmask_bits bitmask;
-
-	u8         reserved_at_c0[0x40];
-
-	struct mlx5_ifc_tirc_bits ctx;
-};
-
-struct mlx5_ifc_modify_sq_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_modify_sq_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         sq_state[0x4];
-	u8         reserved_at_44[0x4];
-	u8         sqn[0x18];
-
-	u8         reserved_at_60[0x20];
-
-	u8         modify_bitmask[0x40];
-
-	u8         reserved_at_c0[0x40];
-
-	struct mlx5_ifc_sqc_bits ctx;
-};
-
-struct mlx5_ifc_modify_scheduling_element_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x1c0];
-};
-
-enum {
-	MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE = 0x1,
-	MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW = 0x2,
-};
-
-struct mlx5_ifc_modify_scheduling_element_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         scheduling_hierarchy[0x8];
-	u8         reserved_at_48[0x18];
-
-	u8         scheduling_element_id[0x20];
-
-	u8         reserved_at_80[0x20];
-
-	u8         modify_bitmask[0x20];
-
-	u8         reserved_at_c0[0x40];
-
-	struct mlx5_ifc_scheduling_context_bits scheduling_context;
-
-	u8         reserved_at_300[0x100];
-};
-
-struct mlx5_ifc_modify_rqt_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_rqt_bitmask_bits {
-	u8	   reserved_at_0[0x20];
-
-	u8         reserved_at_20[0x1f];
-	u8         rqn_list[0x1];
-};
-
-struct mlx5_ifc_modify_rqt_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x8];
-	u8         rqtn[0x18];
-
-	u8         reserved_at_60[0x20];
-
-	struct mlx5_ifc_rqt_bitmask_bits bitmask;
-
-	u8         reserved_at_c0[0x40];
-
-	struct mlx5_ifc_rqtc_bits ctx;
-};
-
-struct mlx5_ifc_modify_rq_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-enum {
-	MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD = 1ULL << 1,
-	MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_MODIFY_RQ_COUNTER_SET_ID = 1ULL << 3,
-};
-
-struct mlx5_ifc_modify_rq_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         rq_state[0x4];
-	u8         reserved_at_44[0x4];
-	u8         rqn[0x18];
-
-	u8         reserved_at_60[0x20];
-
-	u8         modify_bitmask[0x40];
-
-	u8         reserved_at_c0[0x40];
-
-	struct mlx5_ifc_rqc_bits ctx;
-};
-
-struct mlx5_ifc_modify_rmp_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_rmp_bitmask_bits {
-	u8	   reserved_at_0[0x20];
-
-	u8         reserved_at_20[0x1f];
-	u8         lwm[0x1];
-};
-
-struct mlx5_ifc_modify_rmp_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         rmp_state[0x4];
-	u8         reserved_at_44[0x4];
-	u8         rmpn[0x18];
-
-	u8         reserved_at_60[0x20];
-
-	struct mlx5_ifc_rmp_bitmask_bits bitmask;
-
-	u8         reserved_at_c0[0x40];
-
-	struct mlx5_ifc_rmpc_bits ctx;
-};
-
-struct mlx5_ifc_modify_nic_vport_context_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_modify_nic_vport_field_select_bits {
-	u8         reserved_at_0[0x16];
-	u8         node_guid[0x1];
-	u8         port_guid[0x1];
-	u8         min_inline[0x1];
-	u8         mtu[0x1];
-	u8         change_event[0x1];
-	u8         promisc[0x1];
-	u8         permanent_address[0x1];
-	u8         addresses_list[0x1];
-	u8         roce_en[0x1];
-	u8         reserved_at_1f[0x1];
-};
-
-struct mlx5_ifc_modify_nic_vport_context_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         other_vport[0x1];
-	u8         reserved_at_41[0xf];
-	u8         vport_number[0x10];
-
-	struct mlx5_ifc_modify_nic_vport_field_select_bits field_select;
-
-	u8         reserved_at_80[0x780];
-
-	struct mlx5_ifc_nic_vport_context_bits nic_vport_context;
-};
-
-struct mlx5_ifc_modify_hca_vport_context_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_modify_hca_vport_context_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         other_vport[0x1];
-	u8         reserved_at_41[0xb];
-	u8         port_num[0x4];
-	u8         vport_number[0x10];
-
-	u8         reserved_at_60[0x20];
-
-	struct mlx5_ifc_hca_vport_context_bits hca_vport_context;
-};
-
-struct mlx5_ifc_modify_cq_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-enum {
-	MLX5_MODIFY_CQ_IN_OP_MOD_MODIFY_CQ  = 0x0,
-	MLX5_MODIFY_CQ_IN_OP_MOD_RESIZE_CQ  = 0x1,
-};
-
-struct mlx5_ifc_modify_cq_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x8];
-	u8         cqn[0x18];
-
-	union mlx5_ifc_modify_field_select_resize_field_select_auto_bits modify_field_select_resize_field_select;
-
-	struct mlx5_ifc_cqc_bits cq_context;
-
-	u8         reserved_at_280[0x600];
-
-	u8         pas[0][0x40];
-};
-
-struct mlx5_ifc_modify_cong_status_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_modify_cong_status_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x18];
-	u8         priority[0x4];
-	u8         cong_protocol[0x4];
-
-	u8         enable[0x1];
-	u8         tag_enable[0x1];
-	u8         reserved_at_62[0x1e];
-};
-
-struct mlx5_ifc_modify_cong_params_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_modify_cong_params_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x1c];
-	u8         cong_protocol[0x4];
-
-	union mlx5_ifc_field_select_802_1_r_roce_auto_bits field_select;
-
-	u8         reserved_at_80[0x80];
-
-	union mlx5_ifc_cong_control_roce_ecn_auto_bits congestion_parameters;
-};
-
-struct mlx5_ifc_manage_pages_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         output_num_entries[0x20];
-
-	u8         reserved_at_60[0x20];
-
-	u8         pas[0][0x40];
-};
-
-enum {
-	MLX5_MANAGE_PAGES_IN_OP_MOD_ALLOCATION_FAIL     = 0x0,
-	MLX5_MANAGE_PAGES_IN_OP_MOD_ALLOCATION_SUCCESS  = 0x1,
-	MLX5_MANAGE_PAGES_IN_OP_MOD_HCA_RETURN_PAGES    = 0x2,
-};
-
-struct mlx5_ifc_manage_pages_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x10];
-	u8         function_id[0x10];
-
-	u8         input_num_entries[0x20];
-
-	u8         pas[0][0x40];
-};
-
-struct mlx5_ifc_mad_ifc_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-
-	u8         response_mad_packet[256][0x8];
-};
-
-struct mlx5_ifc_mad_ifc_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         remote_lid[0x10];
-	u8         reserved_at_50[0x8];
-	u8         port[0x8];
-
-	u8         reserved_at_60[0x20];
-
-	u8         mad[256][0x8];
-};
-
-struct mlx5_ifc_init_hca_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_init_hca_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_init2rtr_qp_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_init2rtr_qp_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x8];
-	u8         qpn[0x18];
-
-	u8         reserved_at_60[0x20];
-
-	u8         opt_param_mask[0x20];
-
-	u8         reserved_at_a0[0x20];
-
-	struct mlx5_ifc_qpc_bits qpc;
-
-	u8         reserved_at_800[0x80];
-};
-
-struct mlx5_ifc_init2init_qp_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_init2init_qp_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x8];
-	u8         qpn[0x18];
-
-	u8         reserved_at_60[0x20];
-
-	u8         opt_param_mask[0x20];
-
-	u8         reserved_at_a0[0x20];
-
-	struct mlx5_ifc_qpc_bits qpc;
-
-	u8         reserved_at_800[0x80];
-};
-
-struct mlx5_ifc_get_dropped_packet_log_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-
-	u8         packet_headers_log[128][0x8];
-
-	u8         packet_syndrome[64][0x8];
-};
-
-struct mlx5_ifc_get_dropped_packet_log_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_gen_eqe_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x18];
-	u8         eq_number[0x8];
-
-	u8         reserved_at_60[0x20];
-
-	u8         eqe[64][0x8];
-};
-
-struct mlx5_ifc_gen_eq_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_enable_hca_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x20];
-};
-
-struct mlx5_ifc_enable_hca_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x10];
-	u8         function_id[0x10];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_drain_dct_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_drain_dct_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x8];
-	u8         dctn[0x18];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_disable_hca_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x20];
-};
-
-struct mlx5_ifc_disable_hca_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x10];
-	u8         function_id[0x10];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_detach_from_mcg_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_detach_from_mcg_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x8];
-	u8         qpn[0x18];
-
-	u8         reserved_at_60[0x20];
-
-	u8         multicast_gid[16][0x8];
-};
-
-struct mlx5_ifc_destroy_xrq_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_destroy_xrq_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x8];
-	u8         xrqn[0x18];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_destroy_xrc_srq_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_destroy_xrc_srq_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x8];
-	u8         xrc_srqn[0x18];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_destroy_tis_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_destroy_tis_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x8];
-	u8         tisn[0x18];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_destroy_tir_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_destroy_tir_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x8];
-	u8         tirn[0x18];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_destroy_srq_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_destroy_srq_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x8];
-	u8         srqn[0x18];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_destroy_sq_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_destroy_sq_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x8];
-	u8         sqn[0x18];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_destroy_scheduling_element_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x1c0];
-};
-
-struct mlx5_ifc_destroy_scheduling_element_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         scheduling_hierarchy[0x8];
-	u8         reserved_at_48[0x18];
-
-	u8         scheduling_element_id[0x20];
-
-	u8         reserved_at_80[0x180];
-};
-
-struct mlx5_ifc_destroy_rqt_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_destroy_rqt_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x8];
-	u8         rqtn[0x18];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_destroy_rq_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_destroy_rq_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x8];
-	u8         rqn[0x18];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_destroy_rmp_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_destroy_rmp_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x8];
-	u8         rmpn[0x18];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_destroy_qp_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_destroy_qp_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x8];
-	u8         qpn[0x18];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_destroy_psv_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_destroy_psv_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x8];
-	u8         psvn[0x18];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_destroy_mkey_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_destroy_mkey_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x8];
-	u8         mkey_index[0x18];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_destroy_flow_table_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_destroy_flow_table_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         other_vport[0x1];
-	u8         reserved_at_41[0xf];
-	u8         vport_number[0x10];
-
-	u8         reserved_at_60[0x20];
-
-	u8         table_type[0x8];
-	u8         reserved_at_88[0x18];
-
-	u8         reserved_at_a0[0x8];
-	u8         table_id[0x18];
-
-	u8         reserved_at_c0[0x140];
-};
-
-struct mlx5_ifc_destroy_flow_group_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_destroy_flow_group_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         other_vport[0x1];
-	u8         reserved_at_41[0xf];
-	u8         vport_number[0x10];
-
-	u8         reserved_at_60[0x20];
-
-	u8         table_type[0x8];
-	u8         reserved_at_88[0x18];
-
-	u8         reserved_at_a0[0x8];
-	u8         table_id[0x18];
-
-	u8         group_id[0x20];
-
-	u8         reserved_at_e0[0x120];
-};
-
-struct mlx5_ifc_destroy_eq_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_destroy_eq_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x18];
-	u8         eq_number[0x8];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_destroy_dct_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_destroy_dct_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x8];
-	u8         dctn[0x18];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_destroy_cq_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_destroy_cq_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x8];
-	u8         cqn[0x18];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_delete_vxlan_udp_dport_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_delete_vxlan_udp_dport_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x20];
-
-	u8         reserved_at_60[0x10];
-	u8         vxlan_udp_port[0x10];
-};
-
-struct mlx5_ifc_delete_l2_table_entry_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_delete_l2_table_entry_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x60];
-
-	u8         reserved_at_a0[0x8];
-	u8         table_index[0x18];
-
-	u8         reserved_at_c0[0x140];
-};
-
-struct mlx5_ifc_delete_fte_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_delete_fte_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         other_vport[0x1];
-	u8         reserved_at_41[0xf];
-	u8         vport_number[0x10];
-
-	u8         reserved_at_60[0x20];
-
-	u8         table_type[0x8];
-	u8         reserved_at_88[0x18];
-
-	u8         reserved_at_a0[0x8];
-	u8         table_id[0x18];
-
-	u8         reserved_at_c0[0x40];
-
-	u8         flow_index[0x20];
-
-	u8         reserved_at_120[0xe0];
-};
-
-struct mlx5_ifc_dealloc_xrcd_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_dealloc_xrcd_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x8];
-	u8         xrcd[0x18];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_dealloc_uar_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_dealloc_uar_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x8];
-	u8         uar[0x18];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_dealloc_transport_domain_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_dealloc_transport_domain_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x8];
-	u8         transport_domain[0x18];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_dealloc_q_counter_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_dealloc_q_counter_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x18];
-	u8         counter_set_id[0x8];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_dealloc_pd_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_dealloc_pd_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x8];
-	u8         pd[0x18];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_dealloc_flow_counter_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_dealloc_flow_counter_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x10];
-	u8         flow_counter_id[0x10];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_create_xrq_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x8];
-	u8         xrqn[0x18];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_create_xrq_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x40];
-
-	struct mlx5_ifc_xrqc_bits xrq_context;
-};
-
-struct mlx5_ifc_create_xrc_srq_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x8];
-	u8         xrc_srqn[0x18];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_create_xrc_srq_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x40];
-
-	struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry;
-
-	u8         reserved_at_280[0x600];
-
-	u8         pas[0][0x40];
-};
-
-struct mlx5_ifc_create_tis_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x8];
-	u8         tisn[0x18];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_create_tis_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0xc0];
-
-	struct mlx5_ifc_tisc_bits ctx;
-};
-
-struct mlx5_ifc_create_tir_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x8];
-	u8         tirn[0x18];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_create_tir_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0xc0];
-
-	struct mlx5_ifc_tirc_bits ctx;
-};
-
-struct mlx5_ifc_create_srq_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x8];
-	u8         srqn[0x18];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_create_srq_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x40];
-
-	struct mlx5_ifc_srqc_bits srq_context_entry;
-
-	u8         reserved_at_280[0x600];
-
-	u8         pas[0][0x40];
-};
-
-struct mlx5_ifc_create_sq_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x8];
-	u8         sqn[0x18];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_create_sq_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0xc0];
-
-	struct mlx5_ifc_sqc_bits ctx;
-};
-
-struct mlx5_ifc_create_scheduling_element_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-
-	u8         scheduling_element_id[0x20];
-
-	u8         reserved_at_a0[0x160];
-};
-
-struct mlx5_ifc_create_scheduling_element_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         scheduling_hierarchy[0x8];
-	u8         reserved_at_48[0x18];
-
-	u8         reserved_at_60[0xa0];
-
-	struct mlx5_ifc_scheduling_context_bits scheduling_context;
-
-	u8         reserved_at_300[0x100];
-};
-
-struct mlx5_ifc_create_rqt_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x8];
-	u8         rqtn[0x18];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_create_rqt_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0xc0];
-
-	struct mlx5_ifc_rqtc_bits rqt_context;
-};
-
-struct mlx5_ifc_create_rq_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x8];
-	u8         rqn[0x18];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_create_rq_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0xc0];
-
-	struct mlx5_ifc_rqc_bits ctx;
-};
-
-struct mlx5_ifc_create_rmp_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x8];
-	u8         rmpn[0x18];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_create_rmp_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0xc0];
-
-	struct mlx5_ifc_rmpc_bits ctx;
-};
-
-struct mlx5_ifc_create_qp_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x8];
-	u8         qpn[0x18];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_create_qp_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x40];
-
-	u8         opt_param_mask[0x20];
-
-	u8         reserved_at_a0[0x20];
-
-	struct mlx5_ifc_qpc_bits qpc;
-
-	u8         reserved_at_800[0x80];
-
-	u8         pas[0][0x40];
-};
-
-struct mlx5_ifc_create_psv_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-
-	u8         reserved_at_80[0x8];
-	u8         psv0_index[0x18];
-
-	u8         reserved_at_a0[0x8];
-	u8         psv1_index[0x18];
-
-	u8         reserved_at_c0[0x8];
-	u8         psv2_index[0x18];
-
-	u8         reserved_at_e0[0x8];
-	u8         psv3_index[0x18];
-};
-
-struct mlx5_ifc_create_psv_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         num_psv[0x4];
-	u8         reserved_at_44[0x4];
-	u8         pd[0x18];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_create_mkey_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x8];
-	u8         mkey_index[0x18];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_create_mkey_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x20];
-
-	u8         pg_access[0x1];
-	u8         reserved_at_61[0x1f];
-
-	struct mlx5_ifc_mkc_bits memory_key_mkey_entry;
-
-	u8         reserved_at_280[0x80];
-
-	u8         translations_octword_actual_size[0x20];
-
-	u8         reserved_at_320[0x560];
-
-	u8         klm_pas_mtt[0][0x20];
-};
-
-struct mlx5_ifc_create_flow_table_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x8];
-	u8         table_id[0x18];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_create_flow_table_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         other_vport[0x1];
-	u8         reserved_at_41[0xf];
-	u8         vport_number[0x10];
-
-	u8         reserved_at_60[0x20];
-
-	u8         table_type[0x8];
-	u8         reserved_at_88[0x18];
-
-	u8         reserved_at_a0[0x20];
-
-	u8         encap_en[0x1];
-	u8         decap_en[0x1];
-	u8         reserved_at_c2[0x2];
-	u8         table_miss_mode[0x4];
-	u8         level[0x8];
-	u8         reserved_at_d0[0x8];
-	u8         log_size[0x8];
-
-	u8         reserved_at_e0[0x8];
-	u8         table_miss_id[0x18];
-
-	u8         reserved_at_100[0x8];
-	u8         lag_master_next_table_id[0x18];
-
-	u8         reserved_at_120[0x80];
-};
-
-struct mlx5_ifc_create_flow_group_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x8];
-	u8         group_id[0x18];
-
-	u8         reserved_at_60[0x20];
-};
-
-enum {
-	MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS    = 0x0,
-	MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS  = 0x1,
-	MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS    = 0x2,
-};
-
-struct mlx5_ifc_create_flow_group_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         other_vport[0x1];
-	u8         reserved_at_41[0xf];
-	u8         vport_number[0x10];
-
-	u8         reserved_at_60[0x20];
-
-	u8         table_type[0x8];
-	u8         reserved_at_88[0x18];
-
-	u8         reserved_at_a0[0x8];
-	u8         table_id[0x18];
-
-	u8         reserved_at_c0[0x20];
-
-	u8         start_flow_index[0x20];
-
-	u8         reserved_at_100[0x20];
-
-	u8         end_flow_index[0x20];
-
-	u8         reserved_at_140[0xa0];
-
-	u8         reserved_at_1e0[0x18];
-	u8         match_criteria_enable[0x8];
-
-	struct mlx5_ifc_fte_match_param_bits match_criteria;
-
-	u8         reserved_at_1200[0xe00];
-};
-
-struct mlx5_ifc_create_eq_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x18];
-	u8         eq_number[0x8];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_create_eq_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x40];
-
-	struct mlx5_ifc_eqc_bits eq_context_entry;
-
-	u8         reserved_at_280[0x40];
-
-	u8         event_bitmask[0x40];
-
-	u8         reserved_at_300[0x580];
-
-	u8         pas[0][0x40];
-};
-
-struct mlx5_ifc_create_dct_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x8];
-	u8         dctn[0x18];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_create_dct_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x40];
-
-	struct mlx5_ifc_dctc_bits dct_context_entry;
-
-	u8         reserved_at_280[0x180];
-};
-
-struct mlx5_ifc_create_cq_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x8];
-	u8         cqn[0x18];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_create_cq_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x40];
-
-	struct mlx5_ifc_cqc_bits cq_context;
-
-	u8         reserved_at_280[0x600];
-
-	u8         pas[0][0x40];
-};
-
-struct mlx5_ifc_config_int_moderation_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x4];
-	u8         min_delay[0xc];
-	u8         int_vector[0x10];
-
-	u8         reserved_at_60[0x20];
-};
-
-enum {
-	MLX5_CONFIG_INT_MODERATION_IN_OP_MOD_WRITE  = 0x0,
-	MLX5_CONFIG_INT_MODERATION_IN_OP_MOD_READ   = 0x1,
-};
-
-struct mlx5_ifc_config_int_moderation_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x4];
-	u8         min_delay[0xc];
-	u8         int_vector[0x10];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_attach_to_mcg_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_attach_to_mcg_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x8];
-	u8         qpn[0x18];
-
-	u8         reserved_at_60[0x20];
-
-	u8         multicast_gid[16][0x8];
-};
-
-struct mlx5_ifc_arm_xrq_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_arm_xrq_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x8];
-	u8         xrqn[0x18];
-
-	u8         reserved_at_60[0x10];
-	u8         lwm[0x10];
-};
-
-struct mlx5_ifc_arm_xrc_srq_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-enum {
-	MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ  = 0x1,
-};
-
-struct mlx5_ifc_arm_xrc_srq_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x8];
-	u8         xrc_srqn[0x18];
-
-	u8         reserved_at_60[0x10];
-	u8         lwm[0x10];
-};
-
-struct mlx5_ifc_arm_rq_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-enum {
-	MLX5_ARM_RQ_IN_OP_MOD_SRQ = 0x1,
-	MLX5_ARM_RQ_IN_OP_MOD_XRQ = 0x2,
-};
-
-struct mlx5_ifc_arm_rq_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x8];
-	u8         srq_number[0x18];
-
-	u8         reserved_at_60[0x10];
-	u8         lwm[0x10];
-};
-
-struct mlx5_ifc_arm_dct_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_arm_dct_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x8];
-	u8         dct_number[0x18];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_alloc_xrcd_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x8];
-	u8         xrcd[0x18];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_alloc_xrcd_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_alloc_uar_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x8];
-	u8         uar[0x18];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_alloc_uar_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_alloc_transport_domain_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x8];
-	u8         transport_domain[0x18];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_alloc_transport_domain_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_alloc_q_counter_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x18];
-	u8         counter_set_id[0x8];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_alloc_q_counter_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_alloc_pd_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x8];
-	u8         pd[0x18];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_alloc_pd_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_alloc_flow_counter_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x10];
-	u8         flow_counter_id[0x10];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_alloc_flow_counter_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_add_vxlan_udp_dport_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_add_vxlan_udp_dport_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x20];
-
-	u8         reserved_at_60[0x10];
-	u8         vxlan_udp_port[0x10];
-};
-
-struct mlx5_ifc_set_rate_limit_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_set_rate_limit_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x10];
-	u8         rate_limit_index[0x10];
-
-	u8         reserved_at_60[0x20];
-
-	u8         rate_limit[0x20];
-};
-
-struct mlx5_ifc_access_register_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-
-	u8         register_data[0][0x20];
-};
-
-enum {
-	MLX5_ACCESS_REGISTER_IN_OP_MOD_WRITE  = 0x0,
-	MLX5_ACCESS_REGISTER_IN_OP_MOD_READ   = 0x1,
-};
-
-struct mlx5_ifc_access_register_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x10];
-	u8         register_id[0x10];
-
-	u8         argument[0x20];
-
-	u8         register_data[0][0x20];
-};
-
-struct mlx5_ifc_sltp_reg_bits {
-	u8         status[0x4];
-	u8         version[0x4];
-	u8         local_port[0x8];
-	u8         pnat[0x2];
-	u8         reserved_at_12[0x2];
-	u8         lane[0x4];
-	u8         reserved_at_18[0x8];
-
-	u8         reserved_at_20[0x20];
-
-	u8         reserved_at_40[0x7];
-	u8         polarity[0x1];
-	u8         ob_tap0[0x8];
-	u8         ob_tap1[0x8];
-	u8         ob_tap2[0x8];
-
-	u8         reserved_at_60[0xc];
-	u8         ob_preemp_mode[0x4];
-	u8         ob_reg[0x8];
-	u8         ob_bias[0x8];
-
-	u8         reserved_at_80[0x20];
-};
-
-struct mlx5_ifc_slrg_reg_bits {
-	u8         status[0x4];
-	u8         version[0x4];
-	u8         local_port[0x8];
-	u8         pnat[0x2];
-	u8         reserved_at_12[0x2];
-	u8         lane[0x4];
-	u8         reserved_at_18[0x8];
-
-	u8         time_to_link_up[0x10];
-	u8         reserved_at_30[0xc];
-	u8         grade_lane_speed[0x4];
-
-	u8         grade_version[0x8];
-	u8         grade[0x18];
-
-	u8         reserved_at_60[0x4];
-	u8         height_grade_type[0x4];
-	u8         height_grade[0x18];
-
-	u8         height_dz[0x10];
-	u8         height_dv[0x10];
-
-	u8         reserved_at_a0[0x10];
-	u8         height_sigma[0x10];
-
-	u8         reserved_at_c0[0x20];
-
-	u8         reserved_at_e0[0x4];
-	u8         phase_grade_type[0x4];
-	u8         phase_grade[0x18];
-
-	u8         reserved_at_100[0x8];
-	u8         phase_eo_pos[0x8];
-	u8         reserved_at_110[0x8];
-	u8         phase_eo_neg[0x8];
-
-	u8         ffe_set_tested[0x10];
-	u8         test_errors_per_lane[0x10];
-};
-
-struct mlx5_ifc_pvlc_reg_bits {
-	u8         reserved_at_0[0x8];
-	u8         local_port[0x8];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x1c];
-	u8         vl_hw_cap[0x4];
-
-	u8         reserved_at_40[0x1c];
-	u8         vl_admin[0x4];
-
-	u8         reserved_at_60[0x1c];
-	u8         vl_operational[0x4];
-};
-
-struct mlx5_ifc_pude_reg_bits {
-	u8         swid[0x8];
-	u8         local_port[0x8];
-	u8         reserved_at_10[0x4];
-	u8         admin_status[0x4];
-	u8         reserved_at_18[0x4];
-	u8         oper_status[0x4];
-
-	u8         reserved_at_20[0x60];
-};
-
-struct mlx5_ifc_ptys_reg_bits {
-	u8         reserved_at_0[0x1];
-	u8         an_disable_admin[0x1];
-	u8         an_disable_cap[0x1];
-	u8         reserved_at_3[0x5];
-	u8         local_port[0x8];
-	u8         reserved_at_10[0xd];
-	u8         proto_mask[0x3];
-
-	u8         an_status[0x4];
-	u8         reserved_at_24[0x3c];
-
-	u8         eth_proto_capability[0x20];
-
-	u8         ib_link_width_capability[0x10];
-	u8         ib_proto_capability[0x10];
-
-	u8         reserved_at_a0[0x20];
-
-	u8         eth_proto_admin[0x20];
-
-	u8         ib_link_width_admin[0x10];
-	u8         ib_proto_admin[0x10];
-
-	u8         reserved_at_100[0x20];
-
-	u8         eth_proto_oper[0x20];
-
-	u8         ib_link_width_oper[0x10];
-	u8         ib_proto_oper[0x10];
-
-	u8         reserved_at_160[0x20];
-
-	u8         eth_proto_lp_advertise[0x20];
-
-	u8         reserved_at_1a0[0x60];
-};
-
-struct mlx5_ifc_mlcr_reg_bits {
-	u8         reserved_at_0[0x8];
-	u8         local_port[0x8];
-	u8         reserved_at_10[0x20];
-
-	u8         beacon_duration[0x10];
-	u8         reserved_at_40[0x10];
-
-	u8         beacon_remain[0x10];
-};
-
-struct mlx5_ifc_ptas_reg_bits {
-	u8         reserved_at_0[0x20];
-
-	u8         algorithm_options[0x10];
-	u8         reserved_at_30[0x4];
-	u8         repetitions_mode[0x4];
-	u8         num_of_repetitions[0x8];
-
-	u8         grade_version[0x8];
-	u8         height_grade_type[0x4];
-	u8         phase_grade_type[0x4];
-	u8         height_grade_weight[0x8];
-	u8         phase_grade_weight[0x8];
-
-	u8         gisim_measure_bits[0x10];
-	u8         adaptive_tap_measure_bits[0x10];
-
-	u8         ber_bath_high_error_threshold[0x10];
-	u8         ber_bath_mid_error_threshold[0x10];
-
-	u8         ber_bath_low_error_threshold[0x10];
-	u8         one_ratio_high_threshold[0x10];
-
-	u8         one_ratio_high_mid_threshold[0x10];
-	u8         one_ratio_low_mid_threshold[0x10];
-
-	u8         one_ratio_low_threshold[0x10];
-	u8         ndeo_error_threshold[0x10];
-
-	u8         mixer_offset_step_size[0x10];
-	u8         reserved_at_110[0x8];
-	u8         mix90_phase_for_voltage_bath[0x8];
-
-	u8         mixer_offset_start[0x10];
-	u8         mixer_offset_end[0x10];
-
-	u8         reserved_at_140[0x15];
-	u8         ber_test_time[0xb];
-};
-
-struct mlx5_ifc_pspa_reg_bits {
-	u8         swid[0x8];
-	u8         local_port[0x8];
-	u8         sub_port[0x8];
-	u8         reserved_at_18[0x8];
-
-	u8         reserved_at_20[0x20];
-};
-
-struct mlx5_ifc_pqdr_reg_bits {
-	u8         reserved_at_0[0x8];
-	u8         local_port[0x8];
-	u8         reserved_at_10[0x5];
-	u8         prio[0x3];
-	u8         reserved_at_18[0x6];
-	u8         mode[0x2];
-
-	u8         reserved_at_20[0x20];
-
-	u8         reserved_at_40[0x10];
-	u8         min_threshold[0x10];
-
-	u8         reserved_at_60[0x10];
-	u8         max_threshold[0x10];
-
-	u8         reserved_at_80[0x10];
-	u8         mark_probability_denominator[0x10];
-
-	u8         reserved_at_a0[0x60];
-};
-
-struct mlx5_ifc_ppsc_reg_bits {
-	u8         reserved_at_0[0x8];
-	u8         local_port[0x8];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x60];
-
-	u8         reserved_at_80[0x1c];
-	u8         wrps_admin[0x4];
-
-	u8         reserved_at_a0[0x1c];
-	u8         wrps_status[0x4];
-
-	u8         reserved_at_c0[0x8];
-	u8         up_threshold[0x8];
-	u8         reserved_at_d0[0x8];
-	u8         down_threshold[0x8];
-
-	u8         reserved_at_e0[0x20];
-
-	u8         reserved_at_100[0x1c];
-	u8         srps_admin[0x4];
-
-	u8         reserved_at_120[0x1c];
-	u8         srps_status[0x4];
-
-	u8         reserved_at_140[0x40];
-};
-
-struct mlx5_ifc_pplr_reg_bits {
-	u8         reserved_at_0[0x8];
-	u8         local_port[0x8];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x8];
-	u8         lb_cap[0x8];
-	u8         reserved_at_30[0x8];
-	u8         lb_en[0x8];
-};
-
-struct mlx5_ifc_pplm_reg_bits {
-	u8         reserved_at_0[0x8];
-	u8         local_port[0x8];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x20];
-
-	u8         port_profile_mode[0x8];
-	u8         static_port_profile[0x8];
-	u8         active_port_profile[0x8];
-	u8         reserved_at_58[0x8];
-
-	u8         retransmission_active[0x8];
-	u8         fec_mode_active[0x18];
-
-	u8         reserved_at_80[0x20];
-};
-
-struct mlx5_ifc_ppcnt_reg_bits {
-	u8         swid[0x8];
-	u8         local_port[0x8];
-	u8         pnat[0x2];
-	u8         reserved_at_12[0x8];
-	u8         grp[0x6];
-
-	u8         clr[0x1];
-	u8         reserved_at_21[0x1c];
-	u8         prio_tc[0x3];
-
-	union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits counter_set;
-};
-
-struct mlx5_ifc_mpcnt_reg_bits {
-	u8         reserved_at_0[0x8];
-	u8         pcie_index[0x8];
-	u8         reserved_at_10[0xa];
-	u8         grp[0x6];
-
-	u8         clr[0x1];
-	u8         reserved_at_21[0x1f];
-
-	union mlx5_ifc_pcie_cntrs_grp_data_layout_auto_bits counter_set;
-};
-
-struct mlx5_ifc_ppad_reg_bits {
-	u8         reserved_at_0[0x3];
-	u8         single_mac[0x1];
-	u8         reserved_at_4[0x4];
-	u8         local_port[0x8];
-	u8         mac_47_32[0x10];
-
-	u8         mac_31_0[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_pmtu_reg_bits {
-	u8         reserved_at_0[0x8];
-	u8         local_port[0x8];
-	u8         reserved_at_10[0x10];
-
-	u8         max_mtu[0x10];
-	u8         reserved_at_30[0x10];
-
-	u8         admin_mtu[0x10];
-	u8         reserved_at_50[0x10];
-
-	u8         oper_mtu[0x10];
-	u8         reserved_at_70[0x10];
-};
-
-struct mlx5_ifc_pmpr_reg_bits {
-	u8         reserved_at_0[0x8];
-	u8         module[0x8];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x18];
-	u8         attenuation_5g[0x8];
-
-	u8         reserved_at_40[0x18];
-	u8         attenuation_7g[0x8];
-
-	u8         reserved_at_60[0x18];
-	u8         attenuation_12g[0x8];
-};
-
-struct mlx5_ifc_pmpe_reg_bits {
-	u8         reserved_at_0[0x8];
-	u8         module[0x8];
-	u8         reserved_at_10[0xc];
-	u8         module_status[0x4];
-
-	u8         reserved_at_20[0x60];
-};
-
-struct mlx5_ifc_pmpc_reg_bits {
-	u8         module_state_updated[32][0x8];
-};
-
-struct mlx5_ifc_pmlpn_reg_bits {
-	u8         reserved_at_0[0x4];
-	u8         mlpn_status[0x4];
-	u8         local_port[0x8];
-	u8         reserved_at_10[0x10];
-
-	u8         e[0x1];
-	u8         reserved_at_21[0x1f];
-};
-
-struct mlx5_ifc_pmlp_reg_bits {
-	u8         rxtx[0x1];
-	u8         reserved_at_1[0x7];
-	u8         local_port[0x8];
-	u8         reserved_at_10[0x8];
-	u8         width[0x8];
-
-	u8         lane0_module_mapping[0x20];
-
-	u8         lane1_module_mapping[0x20];
-
-	u8         lane2_module_mapping[0x20];
-
-	u8         lane3_module_mapping[0x20];
-
-	u8         reserved_at_a0[0x160];
-};
-
-struct mlx5_ifc_pmaos_reg_bits {
-	u8         reserved_at_0[0x8];
-	u8         module[0x8];
-	u8         reserved_at_10[0x4];
-	u8         admin_status[0x4];
-	u8         reserved_at_18[0x4];
-	u8         oper_status[0x4];
-
-	u8         ase[0x1];
-	u8         ee[0x1];
-	u8         reserved_at_22[0x1c];
-	u8         e[0x2];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_plpc_reg_bits {
-	u8         reserved_at_0[0x4];
-	u8         profile_id[0xc];
-	u8         reserved_at_10[0x4];
-	u8         proto_mask[0x4];
-	u8         reserved_at_18[0x8];
-
-	u8         reserved_at_20[0x10];
-	u8         lane_speed[0x10];
-
-	u8         reserved_at_40[0x17];
-	u8         lpbf[0x1];
-	u8         fec_mode_policy[0x8];
-
-	u8         retransmission_capability[0x8];
-	u8         fec_mode_capability[0x18];
-
-	u8         retransmission_support_admin[0x8];
-	u8         fec_mode_support_admin[0x18];
-
-	u8         retransmission_request_admin[0x8];
-	u8         fec_mode_request_admin[0x18];
-
-	u8         reserved_at_c0[0x80];
-};
-
-struct mlx5_ifc_plib_reg_bits {
-	u8         reserved_at_0[0x8];
-	u8         local_port[0x8];
-	u8         reserved_at_10[0x8];
-	u8         ib_port[0x8];
-
-	u8         reserved_at_20[0x60];
-};
-
-struct mlx5_ifc_plbf_reg_bits {
-	u8         reserved_at_0[0x8];
-	u8         local_port[0x8];
-	u8         reserved_at_10[0xd];
-	u8         lbf_mode[0x3];
-
-	u8         reserved_at_20[0x20];
-};
-
-struct mlx5_ifc_pipg_reg_bits {
-	u8         reserved_at_0[0x8];
-	u8         local_port[0x8];
-	u8         reserved_at_10[0x10];
-
-	u8         dic[0x1];
-	u8         reserved_at_21[0x19];
-	u8         ipg[0x4];
-	u8         reserved_at_3e[0x2];
-};
-
-struct mlx5_ifc_pifr_reg_bits {
-	u8         reserved_at_0[0x8];
-	u8         local_port[0x8];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0xe0];
-
-	u8         port_filter[8][0x20];
-
-	u8         port_filter_update_en[8][0x20];
-};
-
-struct mlx5_ifc_pfcc_reg_bits {
-	u8         reserved_at_0[0x8];
-	u8         local_port[0x8];
-	u8         reserved_at_10[0x10];
-
-	u8         ppan[0x4];
-	u8         reserved_at_24[0x4];
-	u8         prio_mask_tx[0x8];
-	u8         reserved_at_30[0x8];
-	u8         prio_mask_rx[0x8];
-
-	u8         pptx[0x1];
-	u8         aptx[0x1];
-	u8         reserved_at_42[0x6];
-	u8         pfctx[0x8];
-	u8         reserved_at_50[0x10];
-
-	u8         pprx[0x1];
-	u8         aprx[0x1];
-	u8         reserved_at_62[0x6];
-	u8         pfcrx[0x8];
-	u8         reserved_at_70[0x10];
-
-	u8         reserved_at_80[0x80];
-};
-
-struct mlx5_ifc_pelc_reg_bits {
-	u8         op[0x4];
-	u8         reserved_at_4[0x4];
-	u8         local_port[0x8];
-	u8         reserved_at_10[0x10];
-
-	u8         op_admin[0x8];
-	u8         op_capability[0x8];
-	u8         op_request[0x8];
-	u8         op_active[0x8];
-
-	u8         admin[0x40];
-
-	u8         capability[0x40];
-
-	u8         request[0x40];
-
-	u8         active[0x40];
-
-	u8         reserved_at_140[0x80];
-};
-
-struct mlx5_ifc_peir_reg_bits {
-	u8         reserved_at_0[0x8];
-	u8         local_port[0x8];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0xc];
-	u8         error_count[0x4];
-	u8         reserved_at_30[0x10];
-
-	u8         reserved_at_40[0xc];
-	u8         lane[0x4];
-	u8         reserved_at_50[0x8];
-	u8         error_type[0x8];
-};
-
-struct mlx5_ifc_pcap_reg_bits {
-	u8         reserved_at_0[0x8];
-	u8         local_port[0x8];
-	u8         reserved_at_10[0x10];
-
-	u8         port_capability_mask[4][0x20];
-};
-
-struct mlx5_ifc_paos_reg_bits {
-	u8         swid[0x8];
-	u8         local_port[0x8];
-	u8         reserved_at_10[0x4];
-	u8         admin_status[0x4];
-	u8         reserved_at_18[0x4];
-	u8         oper_status[0x4];
-
-	u8         ase[0x1];
-	u8         ee[0x1];
-	u8         reserved_at_22[0x1c];
-	u8         e[0x2];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_pamp_reg_bits {
-	u8         reserved_at_0[0x8];
-	u8         opamp_group[0x8];
-	u8         reserved_at_10[0xc];
-	u8         opamp_group_type[0x4];
-
-	u8         start_index[0x10];
-	u8         reserved_at_30[0x4];
-	u8         num_of_indices[0xc];
-
-	u8         index_data[18][0x10];
-};
-
-struct mlx5_ifc_pcmr_reg_bits {
-	u8         reserved_at_0[0x8];
-	u8         local_port[0x8];
-	u8         reserved_at_10[0x2e];
-	u8         fcs_cap[0x1];
-	u8         reserved_at_3f[0x1f];
-	u8         fcs_chk[0x1];
-	u8         reserved_at_5f[0x1];
-};
-
-struct mlx5_ifc_lane_2_module_mapping_bits {
-	u8         reserved_at_0[0x6];
-	u8         rx_lane[0x2];
-	u8         reserved_at_8[0x6];
-	u8         tx_lane[0x2];
-	u8         reserved_at_10[0x8];
-	u8         module[0x8];
-};
-
-struct mlx5_ifc_bufferx_reg_bits {
-	u8         reserved_at_0[0x6];
-	u8         lossy[0x1];
-	u8         epsb[0x1];
-	u8         reserved_at_8[0xc];
-	u8         size[0xc];
-
-	u8         xoff_threshold[0x10];
-	u8         xon_threshold[0x10];
-};
-
-struct mlx5_ifc_set_node_in_bits {
-	u8         node_description[64][0x8];
-};
-
-struct mlx5_ifc_register_power_settings_bits {
-	u8         reserved_at_0[0x18];
-	u8         power_settings_level[0x8];
-
-	u8         reserved_at_20[0x60];
-};
-
-struct mlx5_ifc_register_host_endianness_bits {
-	u8         he[0x1];
-	u8         reserved_at_1[0x1f];
-
-	u8         reserved_at_20[0x60];
-};
-
-struct mlx5_ifc_umr_pointer_desc_argument_bits {
-	u8         reserved_at_0[0x20];
-
-	u8         mkey[0x20];
-
-	u8         addressh_63_32[0x20];
-
-	u8         addressl_31_0[0x20];
-};
-
-struct mlx5_ifc_ud_adrs_vector_bits {
-	u8         dc_key[0x40];
-
-	u8         ext[0x1];
-	u8         reserved_at_41[0x7];
-	u8         destination_qp_dct[0x18];
-
-	u8         static_rate[0x4];
-	u8         sl_eth_prio[0x4];
-	u8         fl[0x1];
-	u8         mlid[0x7];
-	u8         rlid_udp_sport[0x10];
-
-	u8         reserved_at_80[0x20];
-
-	u8         rmac_47_16[0x20];
-
-	u8         rmac_15_0[0x10];
-	u8         tclass[0x8];
-	u8         hop_limit[0x8];
-
-	u8         reserved_at_e0[0x1];
-	u8         grh[0x1];
-	u8         reserved_at_e2[0x2];
-	u8         src_addr_index[0x8];
-	u8         flow_label[0x14];
-
-	u8         rgid_rip[16][0x8];
-};
-
-struct mlx5_ifc_pages_req_event_bits {
-	u8         reserved_at_0[0x10];
-	u8         function_id[0x10];
-
-	u8         num_pages[0x20];
-
-	u8         reserved_at_40[0xa0];
-};
-
-struct mlx5_ifc_eqe_bits {
-	u8         reserved_at_0[0x8];
-	u8         event_type[0x8];
-	u8         reserved_at_10[0x8];
-	u8         event_sub_type[0x8];
-
-	u8         reserved_at_20[0xe0];
-
-	union mlx5_ifc_event_auto_bits event_data;
-
-	u8         reserved_at_1e0[0x10];
-	u8         signature[0x8];
-	u8         reserved_at_1f8[0x7];
-	u8         owner[0x1];
-};
-
-enum {
-	MLX5_CMD_QUEUE_ENTRY_TYPE_PCIE_CMD_IF_TRANSPORT  = 0x7,
-};
-
-struct mlx5_ifc_cmd_queue_entry_bits {
-	u8         type[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         input_length[0x20];
-
-	u8         input_mailbox_pointer_63_32[0x20];
-
-	u8         input_mailbox_pointer_31_9[0x17];
-	u8         reserved_at_77[0x9];
-
-	u8         command_input_inline_data[16][0x8];
-
-	u8         command_output_inline_data[16][0x8];
-
-	u8         output_mailbox_pointer_63_32[0x20];
-
-	u8         output_mailbox_pointer_31_9[0x17];
-	u8         reserved_at_1b7[0x9];
-
-	u8         output_length[0x20];
-
-	u8         token[0x8];
-	u8         signature[0x8];
-	u8         reserved_at_1f0[0x8];
-	u8         status[0x7];
-	u8         ownership[0x1];
-};
-
-struct mlx5_ifc_cmd_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         command_output[0x20];
-};
-
-struct mlx5_ifc_cmd_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         command[0][0x20];
-};
-
-struct mlx5_ifc_cmd_if_box_bits {
-	u8         mailbox_data[512][0x8];
-
-	u8         reserved_at_1000[0x180];
-
-	u8         next_pointer_63_32[0x20];
-
-	u8         next_pointer_31_10[0x16];
-	u8         reserved_at_11b6[0xa];
-
-	u8         block_number[0x20];
-
-	u8         reserved_at_11e0[0x8];
-	u8         token[0x8];
-	u8         ctrl_signature[0x8];
-	u8         signature[0x8];
-};
-
-struct mlx5_ifc_mtt_bits {
-	u8         ptag_63_32[0x20];
-
-	u8         ptag_31_8[0x18];
-	u8         reserved_at_38[0x6];
-	u8         wr_en[0x1];
-	u8         rd_en[0x1];
-};
-
-struct mlx5_ifc_query_wol_rol_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x10];
-	u8         rol_mode[0x8];
-	u8         wol_mode[0x8];
-
-	u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_query_wol_rol_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_set_wol_rol_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_set_wol_rol_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         rol_mode_valid[0x1];
-	u8         wol_mode_valid[0x1];
-	u8         reserved_at_42[0xe];
-	u8         rol_mode[0x8];
-	u8         wol_mode[0x8];
-
-	u8         reserved_at_60[0x20];
-};
-
-enum {
-	MLX5_INITIAL_SEG_NIC_INTERFACE_FULL_DRIVER  = 0x0,
-	MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED     = 0x1,
-	MLX5_INITIAL_SEG_NIC_INTERFACE_NO_DRAM_NIC  = 0x2,
-};
-
-enum {
-	MLX5_INITIAL_SEG_NIC_INTERFACE_SUPPORTED_FULL_DRIVER  = 0x0,
-	MLX5_INITIAL_SEG_NIC_INTERFACE_SUPPORTED_DISABLED     = 0x1,
-	MLX5_INITIAL_SEG_NIC_INTERFACE_SUPPORTED_NO_DRAM_NIC  = 0x2,
-};
-
-enum {
-	MLX5_INITIAL_SEG_HEALTH_SYNDROME_FW_INTERNAL_ERR              = 0x1,
-	MLX5_INITIAL_SEG_HEALTH_SYNDROME_DEAD_IRISC                   = 0x7,
-	MLX5_INITIAL_SEG_HEALTH_SYNDROME_HW_FATAL_ERR                 = 0x8,
-	MLX5_INITIAL_SEG_HEALTH_SYNDROME_FW_CRC_ERR                   = 0x9,
-	MLX5_INITIAL_SEG_HEALTH_SYNDROME_ICM_FETCH_PCI_ERR            = 0xa,
-	MLX5_INITIAL_SEG_HEALTH_SYNDROME_ICM_PAGE_ERR                 = 0xb,
-	MLX5_INITIAL_SEG_HEALTH_SYNDROME_ASYNCHRONOUS_EQ_BUF_OVERRUN  = 0xc,
-	MLX5_INITIAL_SEG_HEALTH_SYNDROME_EQ_IN_ERR                    = 0xd,
-	MLX5_INITIAL_SEG_HEALTH_SYNDROME_EQ_INV                       = 0xe,
-	MLX5_INITIAL_SEG_HEALTH_SYNDROME_FFSER_ERR                    = 0xf,
-	MLX5_INITIAL_SEG_HEALTH_SYNDROME_HIGH_TEMP_ERR                = 0x10,
-};
-
-struct mlx5_ifc_initial_seg_bits {
-	u8         fw_rev_minor[0x10];
-	u8         fw_rev_major[0x10];
-
-	u8         cmd_interface_rev[0x10];
-	u8         fw_rev_subminor[0x10];
-
-	u8         reserved_at_40[0x40];
-
-	u8         cmdq_phy_addr_63_32[0x20];
-
-	u8         cmdq_phy_addr_31_12[0x14];
-	u8         reserved_at_b4[0x2];
-	u8         nic_interface[0x2];
-	u8         log_cmdq_size[0x4];
-	u8         log_cmdq_stride[0x4];
-
-	u8         command_doorbell_vector[0x20];
-
-	u8         reserved_at_e0[0xf00];
-
-	u8         initializing[0x1];
-	u8         reserved_at_fe1[0x4];
-	u8         nic_interface_supported[0x3];
-	u8         reserved_at_fe8[0x18];
-
-	struct mlx5_ifc_health_buffer_bits health_buffer;
-
-	u8         no_dram_nic_offset[0x20];
-
-	u8         reserved_at_1220[0x6e40];
-
-	u8         reserved_at_8060[0x1f];
-	u8         clear_int[0x1];
-
-	u8         health_syndrome[0x8];
-	u8         health_counter[0x18];
-
-	u8         reserved_at_80a0[0x17fc0];
-};
-
-union mlx5_ifc_ports_control_registers_document_bits {
-	struct mlx5_ifc_bufferx_reg_bits bufferx_reg;
-	struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout;
-	struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits eth_2863_cntrs_grp_data_layout;
-	struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits eth_3635_cntrs_grp_data_layout;
-	struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits eth_802_3_cntrs_grp_data_layout;
-	struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits eth_extended_cntrs_grp_data_layout;
-	struct mlx5_ifc_eth_per_prio_grp_data_layout_bits eth_per_prio_grp_data_layout;
-	struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits eth_per_traffic_grp_data_layout;
-	struct mlx5_ifc_lane_2_module_mapping_bits lane_2_module_mapping;
-	struct mlx5_ifc_pamp_reg_bits pamp_reg;
-	struct mlx5_ifc_paos_reg_bits paos_reg;
-	struct mlx5_ifc_pcap_reg_bits pcap_reg;
-	struct mlx5_ifc_peir_reg_bits peir_reg;
-	struct mlx5_ifc_pelc_reg_bits pelc_reg;
-	struct mlx5_ifc_pfcc_reg_bits pfcc_reg;
-	struct mlx5_ifc_ib_port_cntrs_grp_data_layout_bits ib_port_cntrs_grp_data_layout;
-	struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs;
-	struct mlx5_ifc_pifr_reg_bits pifr_reg;
-	struct mlx5_ifc_pipg_reg_bits pipg_reg;
-	struct mlx5_ifc_plbf_reg_bits plbf_reg;
-	struct mlx5_ifc_plib_reg_bits plib_reg;
-	struct mlx5_ifc_plpc_reg_bits plpc_reg;
-	struct mlx5_ifc_pmaos_reg_bits pmaos_reg;
-	struct mlx5_ifc_pmlp_reg_bits pmlp_reg;
-	struct mlx5_ifc_pmlpn_reg_bits pmlpn_reg;
-	struct mlx5_ifc_pmpc_reg_bits pmpc_reg;
-	struct mlx5_ifc_pmpe_reg_bits pmpe_reg;
-	struct mlx5_ifc_pmpr_reg_bits pmpr_reg;
-	struct mlx5_ifc_pmtu_reg_bits pmtu_reg;
-	struct mlx5_ifc_ppad_reg_bits ppad_reg;
-	struct mlx5_ifc_ppcnt_reg_bits ppcnt_reg;
-	struct mlx5_ifc_mpcnt_reg_bits mpcnt_reg;
-	struct mlx5_ifc_pplm_reg_bits pplm_reg;
-	struct mlx5_ifc_pplr_reg_bits pplr_reg;
-	struct mlx5_ifc_ppsc_reg_bits ppsc_reg;
-	struct mlx5_ifc_pqdr_reg_bits pqdr_reg;
-	struct mlx5_ifc_pspa_reg_bits pspa_reg;
-	struct mlx5_ifc_ptas_reg_bits ptas_reg;
-	struct mlx5_ifc_ptys_reg_bits ptys_reg;
-	struct mlx5_ifc_mlcr_reg_bits mlcr_reg;
-	struct mlx5_ifc_pude_reg_bits pude_reg;
-	struct mlx5_ifc_pvlc_reg_bits pvlc_reg;
-	struct mlx5_ifc_slrg_reg_bits slrg_reg;
-	struct mlx5_ifc_sltp_reg_bits sltp_reg;
-	u8         reserved_at_0[0x60e0];
-};
-
-union mlx5_ifc_debug_enhancements_document_bits {
-	struct mlx5_ifc_health_buffer_bits health_buffer;
-	u8         reserved_at_0[0x200];
-};
-
-union mlx5_ifc_uplink_pci_interface_document_bits {
-	struct mlx5_ifc_initial_seg_bits initial_seg;
-	u8         reserved_at_0[0x20060];
-};
-
-struct mlx5_ifc_set_flow_table_root_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_set_flow_table_root_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         other_vport[0x1];
-	u8         reserved_at_41[0xf];
-	u8         vport_number[0x10];
-
-	u8         reserved_at_60[0x20];
-
-	u8         table_type[0x8];
-	u8         reserved_at_88[0x18];
-
-	u8         reserved_at_a0[0x8];
-	u8         table_id[0x18];
-
-	u8         reserved_at_c0[0x140];
-};
-
-enum {
-	MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID     = (1UL << 0),
-	MLX5_MODIFY_FLOW_TABLE_LAG_NEXT_TABLE_ID = (1UL << 15),
-};
-
-struct mlx5_ifc_modify_flow_table_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_modify_flow_table_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         other_vport[0x1];
-	u8         reserved_at_41[0xf];
-	u8         vport_number[0x10];
-
-	u8         reserved_at_60[0x10];
-	u8         modify_field_select[0x10];
-
-	u8         table_type[0x8];
-	u8         reserved_at_88[0x18];
-
-	u8         reserved_at_a0[0x8];
-	u8         table_id[0x18];
-
-	u8         reserved_at_c0[0x4];
-	u8         table_miss_mode[0x4];
-	u8         reserved_at_c8[0x18];
-
-	u8         reserved_at_e0[0x8];
-	u8         table_miss_id[0x18];
-
-	u8         reserved_at_100[0x8];
-	u8         lag_master_next_table_id[0x18];
-
-	u8         reserved_at_120[0x80];
-};
-
-struct mlx5_ifc_ets_tcn_config_reg_bits {
-	u8         g[0x1];
-	u8         b[0x1];
-	u8         r[0x1];
-	u8         reserved_at_3[0x9];
-	u8         group[0x4];
-	u8         reserved_at_10[0x9];
-	u8         bw_allocation[0x7];
-
-	u8         reserved_at_20[0xc];
-	u8         max_bw_units[0x4];
-	u8         reserved_at_30[0x8];
-	u8         max_bw_value[0x8];
-};
-
-struct mlx5_ifc_ets_global_config_reg_bits {
-	u8         reserved_at_0[0x2];
-	u8         r[0x1];
-	u8         reserved_at_3[0x1d];
-
-	u8         reserved_at_20[0xc];
-	u8         max_bw_units[0x4];
-	u8         reserved_at_30[0x8];
-	u8         max_bw_value[0x8];
-};
-
-struct mlx5_ifc_qetc_reg_bits {
-	u8                                         reserved_at_0[0x8];
-	u8                                         port_number[0x8];
-	u8                                         reserved_at_10[0x30];
-
-	struct mlx5_ifc_ets_tcn_config_reg_bits    tc_configuration[0x8];
-	struct mlx5_ifc_ets_global_config_reg_bits global_configuration;
-};
-
-struct mlx5_ifc_qtct_reg_bits {
-	u8         reserved_at_0[0x8];
-	u8         port_number[0x8];
-	u8         reserved_at_10[0xd];
-	u8         prio[0x3];
-
-	u8         reserved_at_20[0x1d];
-	u8         tclass[0x3];
-};
-
-struct mlx5_ifc_mcia_reg_bits {
-	u8         l[0x1];
-	u8         reserved_at_1[0x7];
-	u8         module[0x8];
-	u8         reserved_at_10[0x8];
-	u8         status[0x8];
-
-	u8         i2c_device_address[0x8];
-	u8         page_number[0x8];
-	u8         device_address[0x10];
-
-	u8         reserved_at_40[0x10];
-	u8         size[0x10];
-
-	u8         reserved_at_60[0x20];
-
-	u8         dword_0[0x20];
-	u8         dword_1[0x20];
-	u8         dword_2[0x20];
-	u8         dword_3[0x20];
-	u8         dword_4[0x20];
-	u8         dword_5[0x20];
-	u8         dword_6[0x20];
-	u8         dword_7[0x20];
-	u8         dword_8[0x20];
-	u8         dword_9[0x20];
-	u8         dword_10[0x20];
-	u8         dword_11[0x20];
-};
-
-struct mlx5_ifc_dcbx_param_bits {
-	u8         dcbx_cee_cap[0x1];
-	u8         dcbx_ieee_cap[0x1];
-	u8         dcbx_standby_cap[0x1];
-	u8         reserved_at_0[0x5];
-	u8         port_number[0x8];
-	u8         reserved_at_10[0xa];
-	u8         max_application_table_size[6];
-	u8         reserved_at_20[0x15];
-	u8         version_oper[0x3];
-	u8         reserved_at_38[5];
-	u8         version_admin[0x3];
-	u8         willing_admin[0x1];
-	u8         reserved_at_41[0x3];
-	u8         pfc_cap_oper[0x4];
-	u8         reserved_at_48[0x4];
-	u8         pfc_cap_admin[0x4];
-	u8         reserved_at_50[0x4];
-	u8         num_of_tc_oper[0x4];
-	u8         reserved_at_58[0x4];
-	u8         num_of_tc_admin[0x4];
-	u8         remote_willing[0x1];
-	u8         reserved_at_61[3];
-	u8         remote_pfc_cap[4];
-	u8         reserved_at_68[0x14];
-	u8         remote_num_of_tc[0x4];
-	u8         reserved_at_80[0x18];
-	u8         error[0x8];
-	u8         reserved_at_a0[0x160];
-};
-
-struct mlx5_ifc_lagc_bits {
-	u8         reserved_at_0[0x1d];
-	u8         lag_state[0x3];
-
-	u8         reserved_at_20[0x14];
-	u8         tx_remap_affinity_2[0x4];
-	u8         reserved_at_38[0x4];
-	u8         tx_remap_affinity_1[0x4];
-};
-
-struct mlx5_ifc_create_lag_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_create_lag_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	struct mlx5_ifc_lagc_bits ctx;
-};
-
-struct mlx5_ifc_modify_lag_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_modify_lag_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x20];
-	u8         field_select[0x20];
-
-	struct mlx5_ifc_lagc_bits ctx;
-};
-
-struct mlx5_ifc_query_lag_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-
-	struct mlx5_ifc_lagc_bits ctx;
-};
-
-struct mlx5_ifc_query_lag_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_destroy_lag_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_destroy_lag_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_create_vport_lag_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_create_vport_lag_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_destroy_vport_lag_out_bits {
-	u8         status[0x8];
-	u8         reserved_at_8[0x18];
-
-	u8         syndrome[0x20];
-
-	u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_destroy_vport_lag_in_bits {
-	u8         opcode[0x10];
-	u8         reserved_at_10[0x10];
-
-	u8         reserved_at_20[0x10];
-	u8         op_mod[0x10];
-
-	u8         reserved_at_40[0x40];
-};
-
-#endif /* MLX5_IFC_H */
diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h
deleted file mode 100644
index e527732..0000000
--- a/include/linux/mlx5/port.h
+++ /dev/null
@@ -1,167 +0,0 @@
-/*
- * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef __MLX5_PORT_H__
-#define __MLX5_PORT_H__
-
-#include <linux/mlx5/driver.h>
-
-enum mlx5_beacon_duration {
-	MLX5_BEACON_DURATION_OFF = 0x0,
-	MLX5_BEACON_DURATION_INF = 0xffff,
-};
-
-enum mlx5_module_id {
-	MLX5_MODULE_ID_SFP              = 0x3,
-	MLX5_MODULE_ID_QSFP             = 0xC,
-	MLX5_MODULE_ID_QSFP_PLUS        = 0xD,
-	MLX5_MODULE_ID_QSFP28           = 0x11,
-};
-
-enum mlx5_an_status {
-	MLX5_AN_UNAVAILABLE = 0,
-	MLX5_AN_COMPLETE    = 1,
-	MLX5_AN_FAILED      = 2,
-	MLX5_AN_LINK_UP     = 3,
-	MLX5_AN_LINK_DOWN   = 4,
-};
-
-#define MLX5_EEPROM_MAX_BYTES			32
-#define MLX5_EEPROM_IDENTIFIER_BYTE_MASK	0x000000ff
-#define MLX5_I2C_ADDR_LOW		0x50
-#define MLX5_I2C_ADDR_HIGH		0x51
-#define MLX5_EEPROM_PAGE_LENGTH		256
-
-enum mlx5e_link_mode {
-	MLX5E_1000BASE_CX_SGMII	 = 0,
-	MLX5E_1000BASE_KX	 = 1,
-	MLX5E_10GBASE_CX4	 = 2,
-	MLX5E_10GBASE_KX4	 = 3,
-	MLX5E_10GBASE_KR	 = 4,
-	MLX5E_20GBASE_KR2	 = 5,
-	MLX5E_40GBASE_CR4	 = 6,
-	MLX5E_40GBASE_KR4	 = 7,
-	MLX5E_56GBASE_R4	 = 8,
-	MLX5E_10GBASE_CR	 = 12,
-	MLX5E_10GBASE_SR	 = 13,
-	MLX5E_10GBASE_ER	 = 14,
-	MLX5E_40GBASE_SR4	 = 15,
-	MLX5E_40GBASE_LR4	 = 16,
-	MLX5E_50GBASE_SR2	 = 18,
-	MLX5E_100GBASE_CR4	 = 20,
-	MLX5E_100GBASE_SR4	 = 21,
-	MLX5E_100GBASE_KR4	 = 22,
-	MLX5E_100GBASE_LR4	 = 23,
-	MLX5E_100BASE_TX	 = 24,
-	MLX5E_1000BASE_T	 = 25,
-	MLX5E_10GBASE_T		 = 26,
-	MLX5E_25GBASE_CR	 = 27,
-	MLX5E_25GBASE_KR	 = 28,
-	MLX5E_25GBASE_SR	 = 29,
-	MLX5E_50GBASE_CR2	 = 30,
-	MLX5E_50GBASE_KR2	 = 31,
-	MLX5E_LINK_MODES_NUMBER,
-};
-
-#define MLX5E_PROT_MASK(link_mode) (1 << link_mode)
-
-#define PORT_MODULE_EVENT_MODULE_STATUS_MASK 0xF
-#define PORT_MODULE_EVENT_ERROR_TYPE_MASK         0xF
-
-int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps);
-int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
-			 int ptys_size, int proto_mask, u8 local_port);
-int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev,
-			      u32 *proto_cap, int proto_mask);
-int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev,
-				u32 *proto_admin, int proto_mask);
-int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev,
-				    u8 *link_width_oper, u8 local_port);
-int mlx5_query_port_ib_proto_oper(struct mlx5_core_dev *dev,
-				  u8 *proto_oper, u8 local_port);
-int mlx5_query_port_eth_proto_oper(struct mlx5_core_dev *dev,
-				   u32 *proto_oper, u8 local_port);
-int mlx5_set_port_ptys(struct mlx5_core_dev *dev, bool an_disable,
-		       u32 proto_admin, int proto_mask);
-void mlx5_toggle_port_link(struct mlx5_core_dev *dev);
-int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
-			       enum mlx5_port_status status);
-int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
-				 enum mlx5_port_status *status);
-int mlx5_set_port_beacon(struct mlx5_core_dev *dev, u16 beacon_duration);
-void mlx5_query_port_autoneg(struct mlx5_core_dev *dev, int proto_mask,
-			     u8 *an_status,
-			     u8 *an_disable_cap, u8 *an_disable_admin);
-
-int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port);
-void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu, u8 port);
-void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu,
-			      u8 port);
-
-int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
-			      u8 *vl_hw_cap, u8 local_port);
-
-int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause);
-int mlx5_query_port_pause(struct mlx5_core_dev *dev,
-			  u32 *rx_pause, u32 *tx_pause);
-
-int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx);
-int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx,
-			u8 *pfc_en_rx);
-
-int mlx5_max_tc(struct mlx5_core_dev *mdev);
-
-int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc);
-int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev,
-			    u8 prio, u8 *tc);
-int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group);
-int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw);
-int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev,
-				u8 tc, u8 *bw_pct);
-int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev,
-				    u8 *max_bw_value,
-				    u8 *max_bw_unit);
-int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev,
-				   u8 *max_bw_value,
-				   u8 *max_bw_unit);
-int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode);
-int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode);
-
-int mlx5_set_port_fcs(struct mlx5_core_dev *mdev, u8 enable);
-void mlx5_query_port_fcs(struct mlx5_core_dev *mdev, bool *supported,
-			 bool *enabled);
-int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
-			     u16 offset, u16 size, u8 *data);
-
-int mlx5_query_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *out);
-int mlx5_set_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *in);
-#endif /* __MLX5_PORT_H__ */
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
deleted file mode 100644
index 219c699..0000000
--- a/include/linux/mlx5/qp.h
+++ /dev/null
@@ -1,598 +0,0 @@
-/*
- * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef MLX5_QP_H
-#define MLX5_QP_H
-
-#include <linux/mlx5/device.h>
-#include <linux/mlx5/driver.h>
-
-#define MLX5_INVALID_LKEY	0x100
-#define MLX5_SIG_WQE_SIZE	(MLX5_SEND_WQE_BB * 5)
-#define MLX5_DIF_SIZE		8
-#define MLX5_STRIDE_BLOCK_OP	0x400
-#define MLX5_CPY_GRD_MASK	0xc0
-#define MLX5_CPY_APP_MASK	0x30
-#define MLX5_CPY_REF_MASK	0x0f
-#define MLX5_BSF_INC_REFTAG	(1 << 6)
-#define MLX5_BSF_INL_VALID	(1 << 15)
-#define MLX5_BSF_REFRESH_DIF	(1 << 14)
-#define MLX5_BSF_REPEAT_BLOCK	(1 << 7)
-#define MLX5_BSF_APPTAG_ESCAPE	0x1
-#define MLX5_BSF_APPREF_ESCAPE	0x2
-
-enum mlx5_qp_optpar {
-	MLX5_QP_OPTPAR_ALT_ADDR_PATH		= 1 << 0,
-	MLX5_QP_OPTPAR_RRE			= 1 << 1,
-	MLX5_QP_OPTPAR_RAE			= 1 << 2,
-	MLX5_QP_OPTPAR_RWE			= 1 << 3,
-	MLX5_QP_OPTPAR_PKEY_INDEX		= 1 << 4,
-	MLX5_QP_OPTPAR_Q_KEY			= 1 << 5,
-	MLX5_QP_OPTPAR_RNR_TIMEOUT		= 1 << 6,
-	MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH	= 1 << 7,
-	MLX5_QP_OPTPAR_SRA_MAX			= 1 << 8,
-	MLX5_QP_OPTPAR_RRA_MAX			= 1 << 9,
-	MLX5_QP_OPTPAR_PM_STATE			= 1 << 10,
-	MLX5_QP_OPTPAR_RETRY_COUNT		= 1 << 12,
-	MLX5_QP_OPTPAR_RNR_RETRY		= 1 << 13,
-	MLX5_QP_OPTPAR_ACK_TIMEOUT		= 1 << 14,
-	MLX5_QP_OPTPAR_PRI_PORT			= 1 << 16,
-	MLX5_QP_OPTPAR_SRQN			= 1 << 18,
-	MLX5_QP_OPTPAR_CQN_RCV			= 1 << 19,
-	MLX5_QP_OPTPAR_DC_HS			= 1 << 20,
-	MLX5_QP_OPTPAR_DC_KEY			= 1 << 21,
-};
-
-enum mlx5_qp_state {
-	MLX5_QP_STATE_RST			= 0,
-	MLX5_QP_STATE_INIT			= 1,
-	MLX5_QP_STATE_RTR			= 2,
-	MLX5_QP_STATE_RTS			= 3,
-	MLX5_QP_STATE_SQER			= 4,
-	MLX5_QP_STATE_SQD			= 5,
-	MLX5_QP_STATE_ERR			= 6,
-	MLX5_QP_STATE_SQ_DRAINING		= 7,
-	MLX5_QP_STATE_SUSPENDED			= 9,
-	MLX5_QP_NUM_STATE,
-	MLX5_QP_STATE,
-	MLX5_QP_STATE_BAD,
-};
-
-enum {
-	MLX5_SQ_STATE_NA	= MLX5_SQC_STATE_ERR + 1,
-	MLX5_SQ_NUM_STATE	= MLX5_SQ_STATE_NA + 1,
-	MLX5_RQ_STATE_NA	= MLX5_RQC_STATE_ERR + 1,
-	MLX5_RQ_NUM_STATE	= MLX5_RQ_STATE_NA + 1,
-};
-
-enum {
-	MLX5_QP_ST_RC				= 0x0,
-	MLX5_QP_ST_UC				= 0x1,
-	MLX5_QP_ST_UD				= 0x2,
-	MLX5_QP_ST_XRC				= 0x3,
-	MLX5_QP_ST_MLX				= 0x4,
-	MLX5_QP_ST_DCI				= 0x5,
-	MLX5_QP_ST_DCT				= 0x6,
-	MLX5_QP_ST_QP0				= 0x7,
-	MLX5_QP_ST_QP1				= 0x8,
-	MLX5_QP_ST_RAW_ETHERTYPE		= 0x9,
-	MLX5_QP_ST_RAW_IPV6			= 0xa,
-	MLX5_QP_ST_SNIFFER			= 0xb,
-	MLX5_QP_ST_SYNC_UMR			= 0xe,
-	MLX5_QP_ST_PTP_1588			= 0xd,
-	MLX5_QP_ST_REG_UMR			= 0xc,
-	MLX5_QP_ST_MAX
-};
-
-enum {
-	MLX5_QP_PM_MIGRATED			= 0x3,
-	MLX5_QP_PM_ARMED			= 0x0,
-	MLX5_QP_PM_REARM			= 0x1
-};
-
-enum {
-	MLX5_NON_ZERO_RQ	= 0x0,
-	MLX5_SRQ_RQ		= 0x1,
-	MLX5_CRQ_RQ		= 0x2,
-	MLX5_ZERO_LEN_RQ	= 0x3
-};
-
-/* TODO REM */
-enum {
-	/* params1 */
-	MLX5_QP_BIT_SRE				= 1 << 15,
-	MLX5_QP_BIT_SWE				= 1 << 14,
-	MLX5_QP_BIT_SAE				= 1 << 13,
-	/* params2 */
-	MLX5_QP_BIT_RRE				= 1 << 15,
-	MLX5_QP_BIT_RWE				= 1 << 14,
-	MLX5_QP_BIT_RAE				= 1 << 13,
-	MLX5_QP_BIT_RIC				= 1 <<	4,
-	MLX5_QP_BIT_CC_SLAVE_RECV		= 1 <<  2,
-	MLX5_QP_BIT_CC_SLAVE_SEND		= 1 <<  1,
-	MLX5_QP_BIT_CC_MASTER			= 1 <<  0
-};
-
-enum {
-	MLX5_WQE_CTRL_CQ_UPDATE		= 2 << 2,
-	MLX5_WQE_CTRL_CQ_UPDATE_AND_EQE	= 3 << 2,
-	MLX5_WQE_CTRL_SOLICITED		= 1 << 1,
-};
-
-enum {
-	MLX5_SEND_WQE_DS	= 16,
-	MLX5_SEND_WQE_BB	= 64,
-};
-
-#define MLX5_SEND_WQEBB_NUM_DS	(MLX5_SEND_WQE_BB / MLX5_SEND_WQE_DS)
-
-enum {
-	MLX5_SEND_WQE_MAX_WQEBBS	= 16,
-};
-
-enum {
-	MLX5_WQE_FMR_PERM_LOCAL_READ	= 1 << 27,
-	MLX5_WQE_FMR_PERM_LOCAL_WRITE	= 1 << 28,
-	MLX5_WQE_FMR_PERM_REMOTE_READ	= 1 << 29,
-	MLX5_WQE_FMR_PERM_REMOTE_WRITE	= 1 << 30,
-	MLX5_WQE_FMR_PERM_ATOMIC	= 1 << 31
-};
-
-enum {
-	MLX5_FENCE_MODE_NONE			= 0 << 5,
-	MLX5_FENCE_MODE_INITIATOR_SMALL		= 1 << 5,
-	MLX5_FENCE_MODE_FENCE			= 2 << 5,
-	MLX5_FENCE_MODE_STRONG_ORDERING		= 3 << 5,
-	MLX5_FENCE_MODE_SMALL_AND_FENCE		= 4 << 5,
-};
-
-enum {
-	MLX5_RCV_DBR	= 0,
-	MLX5_SND_DBR	= 1,
-};
-
-enum {
-	MLX5_FLAGS_INLINE	= 1<<7,
-	MLX5_FLAGS_CHECK_FREE   = 1<<5,
-};
-
-struct mlx5_wqe_fmr_seg {
-	__be32			flags;
-	__be32			mem_key;
-	__be64			buf_list;
-	__be64			start_addr;
-	__be64			reg_len;
-	__be32			offset;
-	__be32			page_size;
-	u32			reserved[2];
-};
-
-struct mlx5_wqe_ctrl_seg {
-	__be32			opmod_idx_opcode;
-	__be32			qpn_ds;
-	u8			signature;
-	u8			rsvd[2];
-	u8			fm_ce_se;
-	__be32			imm;
-};
-
-#define MLX5_WQE_CTRL_DS_MASK 0x3f
-#define MLX5_WQE_CTRL_QPN_MASK 0xffffff00
-#define MLX5_WQE_CTRL_QPN_SHIFT 8
-#define MLX5_WQE_DS_UNITS 16
-#define MLX5_WQE_CTRL_OPCODE_MASK 0xff
-#define MLX5_WQE_CTRL_WQE_INDEX_MASK 0x00ffff00
-#define MLX5_WQE_CTRL_WQE_INDEX_SHIFT 8
-#define MLX5_WQE_AV_EXT 0x80000000
-
-enum {
-	MLX5_ETH_WQE_L3_INNER_CSUM      = 1 << 4,
-	MLX5_ETH_WQE_L4_INNER_CSUM      = 1 << 5,
-	MLX5_ETH_WQE_L3_CSUM            = 1 << 6,
-	MLX5_ETH_WQE_L4_CSUM            = 1 << 7,
-};
-
-struct mlx5_wqe_eth_seg {
-	u8              rsvd0[4];
-	u8              cs_flags;
-	u8              rsvd1;
-	__be16          mss;
-	__be32          rsvd2;
-	__be16          inline_hdr_sz;
-	u8              inline_hdr_start[2];
-};
-
-struct mlx5_wqe_xrc_seg {
-	__be32			xrc_srqn;
-	u8			rsvd[12];
-};
-
-struct mlx5_wqe_masked_atomic_seg {
-	__be64			swap_add;
-	__be64			compare;
-	__be64			swap_add_mask;
-	__be64			compare_mask;
-};
-
-struct mlx5_base_av {
-	union {
-		struct {
-			__be32	qkey;
-			__be32	reserved;
-		} qkey;
-		__be64	dc_key;
-	} key;
-	__be32	dqp_dct;
-	u8	stat_rate_sl;
-	u8	fl_mlid;
-	union {
-		__be16	rlid;
-		__be16  udp_sport;
-	};
-};
-
-struct mlx5_av {
-	union {
-		struct {
-			__be32	qkey;
-			__be32	reserved;
-		} qkey;
-		__be64	dc_key;
-	} key;
-	__be32	dqp_dct;
-	u8	stat_rate_sl;
-	u8	fl_mlid;
-	union {
-		__be16	rlid;
-		__be16  udp_sport;
-	};
-	u8	reserved0[4];
-	u8	rmac[6];
-	u8	tclass;
-	u8	hop_limit;
-	__be32	grh_gid_fl;
-	u8	rgid[16];
-};
-
-struct mlx5_wqe_datagram_seg {
-	struct mlx5_av	av;
-};
-
-struct mlx5_wqe_raddr_seg {
-	__be64			raddr;
-	__be32			rkey;
-	u32			reserved;
-};
-
-struct mlx5_wqe_atomic_seg {
-	__be64			swap_add;
-	__be64			compare;
-};
-
-struct mlx5_wqe_data_seg {
-	__be32			byte_count;
-	__be32			lkey;
-	__be64			addr;
-};
-
-struct mlx5_wqe_umr_ctrl_seg {
-	u8		flags;
-	u8		rsvd0[3];
-	__be16		xlt_octowords;
-	union {
-		__be16	xlt_offset;
-		__be16	bsf_octowords;
-	};
-	__be64		mkey_mask;
-	__be32		xlt_offset_47_16;
-	u8		rsvd1[28];
-};
-
-struct mlx5_seg_set_psv {
-	__be32		psv_num;
-	__be16		syndrome;
-	__be16		status;
-	__be32		transient_sig;
-	__be32		ref_tag;
-};
-
-struct mlx5_seg_get_psv {
-	u8		rsvd[19];
-	u8		num_psv;
-	__be32		l_key;
-	__be64		va;
-	__be32		psv_index[4];
-};
-
-struct mlx5_seg_check_psv {
-	u8		rsvd0[2];
-	__be16		err_coalescing_op;
-	u8		rsvd1[2];
-	__be16		xport_err_op;
-	u8		rsvd2[2];
-	__be16		xport_err_mask;
-	u8		rsvd3[7];
-	u8		num_psv;
-	__be32		l_key;
-	__be64		va;
-	__be32		psv_index[4];
-};
-
-struct mlx5_rwqe_sig {
-	u8	rsvd0[4];
-	u8	signature;
-	u8	rsvd1[11];
-};
-
-struct mlx5_wqe_signature_seg {
-	u8	rsvd0[4];
-	u8	signature;
-	u8	rsvd1[11];
-};
-
-#define MLX5_WQE_INLINE_SEG_BYTE_COUNT_MASK 0x3ff
-
-struct mlx5_wqe_inline_seg {
-	__be32	byte_count;
-};
-
-enum mlx5_sig_type {
-	MLX5_DIF_CRC = 0x1,
-	MLX5_DIF_IPCS = 0x2,
-};
-
-struct mlx5_bsf_inl {
-	__be16		vld_refresh;
-	__be16		dif_apptag;
-	__be32		dif_reftag;
-	u8		sig_type;
-	u8		rp_inv_seed;
-	u8		rsvd[3];
-	u8		dif_inc_ref_guard_check;
-	__be16		dif_app_bitmask_check;
-};
-
-struct mlx5_bsf {
-	struct mlx5_bsf_basic {
-		u8		bsf_size_sbs;
-		u8		check_byte_mask;
-		union {
-			u8	copy_byte_mask;
-			u8	bs_selector;
-			u8	rsvd_wflags;
-		} wire;
-		union {
-			u8	bs_selector;
-			u8	rsvd_mflags;
-		} mem;
-		__be32		raw_data_size;
-		__be32		w_bfs_psv;
-		__be32		m_bfs_psv;
-	} basic;
-	struct mlx5_bsf_ext {
-		__be32		t_init_gen_pro_size;
-		__be32		rsvd_epi_size;
-		__be32		w_tfs_psv;
-		__be32		m_tfs_psv;
-	} ext;
-	struct mlx5_bsf_inl	w_inl;
-	struct mlx5_bsf_inl	m_inl;
-};
-
-struct mlx5_mtt {
-	__be64		ptag;
-};
-
-struct mlx5_klm {
-	__be32		bcount;
-	__be32		key;
-	__be64		va;
-};
-
-struct mlx5_stride_block_entry {
-	__be16		stride;
-	__be16		bcount;
-	__be32		key;
-	__be64		va;
-};
-
-struct mlx5_stride_block_ctrl_seg {
-	__be32		bcount_per_cycle;
-	__be32		op;
-	__be32		repeat_count;
-	u16		rsvd;
-	__be16		num_entries;
-};
-
-struct mlx5_core_qp {
-	struct mlx5_core_rsc_common	common; /* must be first */
-	void (*event)		(struct mlx5_core_qp *, int);
-	int			qpn;
-	struct mlx5_rsc_debug	*dbg;
-	int			pid;
-};
-
-struct mlx5_qp_path {
-	u8			fl_free_ar;
-	u8			rsvd3;
-	__be16			pkey_index;
-	u8			rsvd0;
-	u8			grh_mlid;
-	__be16			rlid;
-	u8			ackto_lt;
-	u8			mgid_index;
-	u8			static_rate;
-	u8			hop_limit;
-	__be32			tclass_flowlabel;
-	union {
-		u8		rgid[16];
-		u8		rip[16];
-	};
-	u8			f_dscp_ecn_prio;
-	u8			ecn_dscp;
-	__be16			udp_sport;
-	u8			dci_cfi_prio_sl;
-	u8			port;
-	u8			rmac[6];
-};
-
-/* FIXME: use mlx5_ifc.h qpc */
-struct mlx5_qp_context {
-	__be32			flags;
-	__be32			flags_pd;
-	u8			mtu_msgmax;
-	u8			rq_size_stride;
-	__be16			sq_crq_size;
-	__be32			qp_counter_set_usr_page;
-	__be32			wire_qpn;
-	__be32			log_pg_sz_remote_qpn;
-	struct			mlx5_qp_path pri_path;
-	struct			mlx5_qp_path alt_path;
-	__be32			params1;
-	u8			reserved2[4];
-	__be32			next_send_psn;
-	__be32			cqn_send;
-	__be32			deth_sqpn;
-	u8			reserved3[4];
-	__be32			last_acked_psn;
-	__be32			ssn;
-	__be32			params2;
-	__be32			rnr_nextrecvpsn;
-	__be32			xrcd;
-	__be32			cqn_recv;
-	__be64			db_rec_addr;
-	__be32			qkey;
-	__be32			rq_type_srqn;
-	__be32			rmsn;
-	__be16			hw_sq_wqe_counter;
-	__be16			sw_sq_wqe_counter;
-	__be16			hw_rcyclic_byte_counter;
-	__be16			hw_rq_counter;
-	__be16			sw_rcyclic_byte_counter;
-	__be16			sw_rq_counter;
-	u8			rsvd0[5];
-	u8			cgs;
-	u8			cs_req;
-	u8			cs_res;
-	__be64			dc_access_key;
-	u8			rsvd1[24];
-};
-
-static inline struct mlx5_core_qp *__mlx5_qp_lookup(struct mlx5_core_dev *dev, u32 qpn)
-{
-	return radix_tree_lookup(&dev->priv.qp_table.tree, qpn);
-}
-
-static inline struct mlx5_core_mkey *__mlx5_mr_lookup(struct mlx5_core_dev *dev, u32 key)
-{
-	return radix_tree_lookup(&dev->priv.mkey_table.tree, key);
-}
-
-int mlx5_core_create_qp(struct mlx5_core_dev *dev,
-			struct mlx5_core_qp *qp,
-			u32 *in,
-			int inlen);
-int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 opcode,
-			u32 opt_param_mask, void *qpc,
-			struct mlx5_core_qp *qp);
-int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
-			 struct mlx5_core_qp *qp);
-int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
-		       u32 *out, int outlen);
-
-int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn);
-int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn);
-void mlx5_init_qp_table(struct mlx5_core_dev *dev);
-void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev);
-int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
-void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
-int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
-				struct mlx5_core_qp *rq);
-void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev,
-				  struct mlx5_core_qp *rq);
-int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
-				struct mlx5_core_qp *sq);
-void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev,
-				  struct mlx5_core_qp *sq);
-int mlx5_core_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id);
-int mlx5_core_dealloc_q_counter(struct mlx5_core_dev *dev, u16 counter_id);
-int mlx5_core_query_q_counter(struct mlx5_core_dev *dev, u16 counter_id,
-			      int reset, void *out, int out_size);
-int mlx5_core_query_out_of_buffer(struct mlx5_core_dev *dev, u16 counter_id,
-				  u32 *out_of_buffer);
-
-static inline const char *mlx5_qp_type_str(int type)
-{
-	switch (type) {
-	case MLX5_QP_ST_RC: return "RC";
-	case MLX5_QP_ST_UC: return "C";
-	case MLX5_QP_ST_UD: return "UD";
-	case MLX5_QP_ST_XRC: return "XRC";
-	case MLX5_QP_ST_MLX: return "MLX";
-	case MLX5_QP_ST_QP0: return "QP0";
-	case MLX5_QP_ST_QP1: return "QP1";
-	case MLX5_QP_ST_RAW_ETHERTYPE: return "RAW_ETHERTYPE";
-	case MLX5_QP_ST_RAW_IPV6: return "RAW_IPV6";
-	case MLX5_QP_ST_SNIFFER: return "SNIFFER";
-	case MLX5_QP_ST_SYNC_UMR: return "SYNC_UMR";
-	case MLX5_QP_ST_PTP_1588: return "PTP_1588";
-	case MLX5_QP_ST_REG_UMR: return "REG_UMR";
-	default: return "Invalid transport type";
-	}
-}
-
-static inline const char *mlx5_qp_state_str(int state)
-{
-	switch (state) {
-	case MLX5_QP_STATE_RST:
-	return "RST";
-	case MLX5_QP_STATE_INIT:
-	return "INIT";
-	case MLX5_QP_STATE_RTR:
-	return "RTR";
-	case MLX5_QP_STATE_RTS:
-	return "RTS";
-	case MLX5_QP_STATE_SQER:
-	return "SQER";
-	case MLX5_QP_STATE_SQD:
-	return "SQD";
-	case MLX5_QP_STATE_ERR:
-	return "ERR";
-	case MLX5_QP_STATE_SQ_DRAINING:
-	return "SQ_DRAINING";
-	case MLX5_QP_STATE_SUSPENDED:
-	return "SUSPENDED";
-	default: return "Invalid QP state";
-	}
-}
-
-#endif /* MLX5_QP_H */
diff --git a/include/linux/mlx5/srq.h b/include/linux/mlx5/srq.h
deleted file mode 100644
index 1cde0fd..0000000
--- a/include/linux/mlx5/srq.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef MLX5_SRQ_H
-#define MLX5_SRQ_H
-
-#include <linux/mlx5/driver.h>
-
-enum {
-	MLX5_SRQ_FLAG_ERR    = (1 << 0),
-	MLX5_SRQ_FLAG_WQ_SIG = (1 << 1),
-};
-
-struct mlx5_srq_attr {
-	u32 type;
-	u32 flags;
-	u32 log_size;
-	u32 wqe_shift;
-	u32 log_page_size;
-	u32 wqe_cnt;
-	u32 srqn;
-	u32 xrcd;
-	u32 page_offset;
-	u32 cqn;
-	u32 pd;
-	u32 lwm;
-	u32 user_index;
-	u64 db_record;
-	__be64 *pas;
-};
-
-struct mlx5_core_dev;
-
-void mlx5_init_srq_table(struct mlx5_core_dev *dev);
-void mlx5_cleanup_srq_table(struct mlx5_core_dev *dev);
-
-#endif /* MLX5_SRQ_H */
diff --git a/include/linux/mlx5/transobj.h b/include/linux/mlx5/transobj.h
deleted file mode 100644
index 88441f5..0000000
--- a/include/linux/mlx5/transobj.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef __TRANSOBJ_H__
-#define __TRANSOBJ_H__
-
-#include <linux/mlx5/driver.h>
-
-int mlx5_core_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn);
-void mlx5_core_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn);
-int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen,
-			u32 *rqn);
-int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen);
-void mlx5_core_destroy_rq(struct mlx5_core_dev *dev, u32 rqn);
-int mlx5_core_query_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *out);
-int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen,
-			u32 *sqn);
-int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen);
-void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn);
-int mlx5_core_query_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *out);
-int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
-			 u32 *tirn);
-int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in,
-			 int inlen);
-void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn);
-int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen,
-			 u32 *tisn);
-int mlx5_core_modify_tis(struct mlx5_core_dev *dev, u32 tisn, u32 *in,
-			 int inlen);
-void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn);
-int mlx5_core_create_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen,
-			 u32 *rmpn);
-int mlx5_core_modify_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen);
-int mlx5_core_destroy_rmp(struct mlx5_core_dev *dev, u32 rmpn);
-int mlx5_core_query_rmp(struct mlx5_core_dev *dev, u32 rmpn, u32 *out);
-int mlx5_core_arm_rmp(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm);
-int mlx5_core_create_xsrq(struct mlx5_core_dev *dev, u32 *in, int inlen,
-			  u32 *rmpn);
-int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 rmpn);
-int mlx5_core_query_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u32 *out);
-int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm);
-
-int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen,
-			 u32 *rqtn);
-int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in,
-			 int inlen);
-void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn);
-
-#endif /* __TRANSOBJ_H__ */
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
deleted file mode 100644
index ec35157..0000000
--- a/include/linux/mlx5/vport.h
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * Copyright (c) 2013-2015, Mellanox Technologies, Ltd.  All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef __MLX5_VPORT_H__
-#define __MLX5_VPORT_H__
-
-#include <linux/mlx5/driver.h>
-#include <linux/mlx5/device.h>
-
-enum {
-	MLX5_CAP_INLINE_MODE_L2,
-	MLX5_CAP_INLINE_MODE_VPORT_CONTEXT,
-	MLX5_CAP_INLINE_MODE_NOT_REQUIRED,
-};
-
-u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport);
-u8 mlx5_query_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
-				u16 vport);
-int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
-				  u16 vport, u8 state);
-int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
-				     u16 vport, u8 *addr);
-int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
-				    u16 vport, u8 *min_inline);
-int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev,
-				     u16 vport, u8 min_inline);
-int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev,
-				      u16 vport, u8 *addr);
-int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu);
-int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu);
-int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
-					   u64 *system_image_guid);
-int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid);
-int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
-				    u32 vport, u64 node_guid);
-int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
-					u16 *qkey_viol_cntr);
-int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
-			     u8 port_num, u16  vf_num, u16 gid_index,
-			     union ib_gid *gid);
-int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
-			      u8 port_num, u16 vf_num, u16 pkey_index,
-			      u16 *pkey);
-int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev,
-				 u8 other_vport, u8 port_num,
-				 u16 vf_num,
-				 struct mlx5_hca_vport_context *rep);
-int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev,
-					   u64 *sys_image_guid);
-int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev,
-				   u64 *node_guid);
-int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
-				  u32 vport,
-				  enum mlx5_list_type list_type,
-				  u8 addr_list[][ETH_ALEN],
-				  int *list_size);
-int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
-				   enum mlx5_list_type list_type,
-				   u8 addr_list[][ETH_ALEN],
-				   int list_size);
-int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev,
-				 u32 vport,
-				 int *promisc_uc,
-				 int *promisc_mc,
-				 int *promisc_all);
-int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev,
-				  int promisc_uc,
-				  int promisc_mc,
-				  int promisc_all);
-int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev,
-			       u32 vport,
-			       u16 vlans[],
-			       int *size);
-int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
-				u16 vlans[],
-				int list_size);
-
-int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev);
-int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev);
-int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport,
-				  int vf, u8 port_num, void *out,
-				  size_t out_sz);
-int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
-				       u8 other_vport, u8 port_num,
-				       int vf,
-				       struct mlx5_hca_vport_context *req);
-
-#endif /* __MLX5_VPORT_H__ */
-- 
2.7.4

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ