lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1462364478-10808-15-git-send-email-oulijun@huawei.com>
Date:	Wed, 4 May 2016 20:21:11 +0800
From:	Lijun Ou <oulijun@...wei.com>
To:	<dledford@...hat.com>, <sean.hefty@...el.com>,
	<hal.rosenstock@...il.com>, <davem@...emloft.net>,
	<jeffrey.t.kirsher@...el.com>, <jiri@...lanox.com>,
	<ogerlitz@...lanox.com>
CC:	<linux-rdma@...r.kernel.org>, <linux-kernel@...r.kernel.org>,
	<netdev@...r.kernel.org>, <gongyangming@...wei.com>,
	<xiaokun@...wei.com>, <tangchaofei@...wei.com>,
	<oulijun@...wei.com>, <haifeng.wei@...wei.com>,
	<yisen.zhuang@...wei.com>, <yankejian@...wei.com>,
	<charles.chenxin@...wei.com>, <linuxarm@...wei.com>
Subject: [PATCH v7 14/21] IB/hns: Add operations support for IB device and port

This patch mainly registered some relative verbs for the kernel.
These operation functions will be called by user. For example:
    1. modify device
    2. query device
    3. query_port
    4. modify_port
and so on.

Signed-off-by: Wei Hu <xavier.huwei@...wei.com>
Signed-off-by: Nenglong Zhao <zhaonenglong@...ilicon.com>
Signed-off-by: Lijun Ou <oulijun@...wei.com>
---
 drivers/infiniband/hw/hns/hns_roce_common.h |   4 +
 drivers/infiniband/hw/hns/hns_roce_device.h |  21 +++
 drivers/infiniband/hw/hns/hns_roce_main.c   | 228 ++++++++++++++++++++++++++++
 drivers/infiniband/hw/hns/hns_roce_user.h   |  17 +++
 4 files changed, 270 insertions(+)
 create mode 100644 drivers/infiniband/hw/hns/hns_roce_user.h

diff --git a/drivers/infiniband/hw/hns/hns_roce_common.h b/drivers/infiniband/hw/hns/hns_roce_common.h
index 45b5804..135203e 100644
--- a/drivers/infiniband/hw/hns/hns_roce_common.h
+++ b/drivers/infiniband/hw/hns/hns_roce_common.h
@@ -10,6 +10,10 @@
 #ifndef _HNS_ROCE_COMMON_H
 #define _HNS_ROCE_COMMON_H
 
+#ifndef assert
+#define assert(cond)
+#endif
+
 #define roce_writel(value, addr)     writel((value), (addr))
 #define roce_readl(addr)            readl((addr))
 #define roce_raw_write(value, addr) \
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index 85ee147..d135e7f 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -22,6 +22,8 @@
 #define DRV_NAME "hns_roce"
 
 #define MAC_ADDR_OCTET_NUM			6
+#define HNS_ROCE_MAX_MSG_LEN			0x80000000
+
 
 #define HNS_ROCE_BA_SIZE			(32 * 4096)
 
@@ -35,7 +37,10 @@
 
 #define HNS_ROCE_MAX_PORTS			6
 #define HNS_ROCE_MAX_GID_NUM			16
+#define HNS_ROCE_GID_SIZE			16
 
+#define PKEY_ID					0xffff
+#define NODE_DESC_SIZE				64
 #define ADDR_SHIFT_12				12
 #define ADDR_SHIFT_32				32
 #define ADDR_SHIFT_44				44
@@ -106,6 +111,11 @@ struct hns_roce_uar {
 	unsigned long	index;
 };
 
+struct hns_roce_ucontext {
+	struct ib_ucontext	ibucontext;
+	struct hns_roce_uar	uar;
+};
+
 struct hns_roce_bitmap {
 	/* Bitmap Traversal last a bit which is 1 */
 	unsigned long		last;
@@ -363,6 +373,17 @@ struct hns_roce_dev {
 	struct hns_roce_hw	*hw;
 };
 
+static inline struct hns_roce_dev *to_hr_dev(struct ib_device *ib_dev)
+{
+	return container_of(ib_dev, struct hns_roce_dev, ib_dev);
+}
+
+static inline struct hns_roce_ucontext
+			*to_hr_ucontext(struct ib_ucontext *ibucontext)
+{
+	return container_of(ibucontext, struct hns_roce_ucontext, ibucontext);
+}
+
 static inline void hns_roce_write64_k(__be32 val[2], void __iomem *dest)
 {
 	__raw_writeq(*(u64 *) val, dest);
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index 9f7ad61..6a1fd38 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -41,6 +41,7 @@
 #include <rdma/ib_verbs.h>
 #include "hns_roce_common.h"
 #include "hns_roce_device.h"
+#include "hns_roce_user.h"
 #include "hns_roce_icm.h"
 
 /**
@@ -341,6 +342,217 @@ int hns_roce_setup_mtu_gids(struct hns_roce_dev  *hr_dev)
 	return ret;
 }
 
+static int hns_roce_query_device(struct ib_device *ib_dev,
+				 struct ib_device_attr *props,
+				 struct ib_udata *uhw)
+{
+	struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
+
+	memset(props, 0, sizeof(*props));
+
+	props->fw_ver = hr_dev->fw_ver;
+	props->sys_image_guid = hr_dev->sys_image_guid;
+	props->max_mr_size = (u64)(~(0ULL));
+	props->page_size_cap = hr_dev->caps.page_size_cap;
+	props->vendor_id = hr_dev->vendor_id;
+	props->vendor_part_id = hr_dev->vendor_part_id;
+	props->hw_ver = hr_dev->hw_rev;
+	props->max_qp = hr_dev->caps.num_qps;
+	props->max_qp_wr = hr_dev->caps.max_wqes;
+	props->device_cap_flags = IB_DEVICE_PORT_ACTIVE_EVENT |
+				  IB_DEVICE_RC_RNR_NAK_GEN |
+				  IB_DEVICE_LOCAL_DMA_LKEY;
+	props->max_sge = hr_dev->caps.max_sq_sg;
+	props->max_sge_rd = 1;
+	props->max_cq = hr_dev->caps.num_cqs;
+	props->max_cqe = hr_dev->caps.max_cqes;
+	props->max_mr = hr_dev->caps.num_mtpts;
+	props->max_pd = hr_dev->caps.num_pds;
+	props->max_qp_rd_atom = hr_dev->caps.max_qp_dest_rdma;
+	props->max_qp_init_rd_atom = hr_dev->caps.max_qp_init_rdma;
+	props->atomic_cap = IB_ATOMIC_NONE;
+	props->max_pkeys = 1;
+	props->local_ca_ack_delay = hr_dev->caps.local_ca_ack_delay;
+
+	return 0;
+}
+
+static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num,
+			       struct ib_port_attr *props)
+{
+	struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
+	struct device *dev = &hr_dev->pdev->dev;
+	struct net_device *net_dev;
+	unsigned long flags;
+	enum ib_mtu mtu;
+	u8 port;
+
+	assert(port_num > 0);
+	port = port_num - 1;
+
+	memset(props, 0, sizeof(*props));
+
+	props->max_mtu = hr_dev->caps.max_mtu;
+	props->gid_tbl_len = hr_dev->caps.gid_table_len[port];
+	props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
+				IB_PORT_VENDOR_CLASS_SUP |
+				IB_PORT_BOOT_MGMT_SUP;
+	props->max_msg_sz = HNS_ROCE_MAX_MSG_LEN;
+	props->pkey_tbl_len = 1;
+	props->active_width = IB_WIDTH_4X;
+	props->active_speed = 1;
+
+	spin_lock_irqsave(&hr_dev->iboe.lock, flags);
+
+	net_dev = hr_dev->iboe.netdevs[port];
+	if (!net_dev) {
+		spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
+		dev_err(dev, "find netdev %d failed!\r\n", port);
+		return -EINVAL;
+	}
+
+	mtu = iboe_get_mtu(net_dev->mtu);
+	props->active_mtu = mtu ? min(props->max_mtu, mtu) : IB_MTU_256;
+	props->state = (netif_running(net_dev) && netif_carrier_ok(net_dev)) ?
+			IB_PORT_ACTIVE : IB_PORT_DOWN;
+	props->phys_state = (props->state == IB_PORT_ACTIVE) ? 5 : 3;
+
+	spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
+
+	return 0;
+}
+
+static enum rdma_link_layer hns_roce_get_link_layer(struct ib_device *device,
+						    u8 port_num)
+{
+	return IB_LINK_LAYER_ETHERNET;
+}
+
+static int hns_roce_query_gid(struct ib_device *ib_dev, u8 port_num, int index,
+			      union ib_gid *gid)
+{
+	struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
+	struct device *dev = &hr_dev->pdev->dev;
+	u8 gid_idx = 0;
+	u8 port;
+
+	if (port_num < 1 || port_num > hr_dev->caps.num_ports ||
+	    index >= hr_dev->caps.gid_table_len[port_num - 1]) {
+		dev_err(dev,
+			"port_num %d index %d illegal! correct range: port_num 1~%d index 0~%d!\n",
+			port_num, index, hr_dev->caps.num_ports,
+			hr_dev->caps.gid_table_len[port_num - 1] - 1);
+		return -EINVAL;
+	}
+
+	port = port_num - 1;
+	gid_idx = hns_get_gid_index(hr_dev, port, index);
+	if (gid_idx >= HNS_ROCE_MAX_GID_NUM) {
+		dev_err(dev, "port_num %d index %d illegal! total gid num %d!\n",
+			port_num, index, HNS_ROCE_MAX_GID_NUM);
+		return -EINVAL;
+	}
+
+	memcpy(gid->raw, hr_dev->iboe.gid_table[gid_idx].raw,
+	       HNS_ROCE_GID_SIZE);
+
+	return 0;
+}
+
+static int hns_roce_query_pkey(struct ib_device *ib_dev, u8 port, u16 index,
+			       u16 *pkey)
+{
+	*pkey = PKEY_ID;
+
+	return 0;
+}
+
+static int hns_roce_modify_device(struct ib_device *ib_dev, int mask,
+				  struct ib_device_modify *props)
+{
+	unsigned long flags;
+
+	if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
+		return -EOPNOTSUPP;
+
+	if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
+		spin_lock_irqsave(&to_hr_dev(ib_dev)->sm_lock, flags);
+		memcpy(ib_dev->node_desc, props->node_desc, NODE_DESC_SIZE);
+		spin_unlock_irqrestore(&to_hr_dev(ib_dev)->sm_lock, flags);
+	}
+
+	return 0;
+}
+
+static int hns_roce_modify_port(struct ib_device *ib_dev, u8 port_num, int mask,
+				struct ib_port_modify *props)
+{
+	return 0;
+}
+
+static struct ib_ucontext *hns_roce_alloc_ucontext(struct ib_device *ib_dev,
+						   struct ib_udata *udata)
+{
+	int ret = 0;
+	struct hns_roce_ucontext *context;
+	struct hns_roce_ib_alloc_ucontext_resp resp;
+	struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
+
+	resp.qp_tab_size = hr_dev->caps.num_qps;
+
+	context = kmalloc(sizeof(*context), GFP_KERNEL);
+	if (!context)
+		return ERR_PTR(-ENOMEM);
+
+	ret = hns_roce_uar_alloc(hr_dev, &context->uar);
+	if (ret)
+		goto _error_fail_uar_alloc;
+
+	ret = ib_copy_to_udata(udata, &resp, sizeof(resp));
+	if (ret)
+		goto _error_fail_copy_to_udata;
+
+	return &context->ibucontext;
+
+_error_fail_copy_to_udata:
+hns_roce_uar_free(hr_dev, &context->uar);
+
+_error_fail_uar_alloc:
+	kfree(context);
+
+	return ERR_PTR(ret);
+}
+
+static int hns_roce_dealloc_ucontext(struct ib_ucontext *ibcontext)
+{
+	struct hns_roce_ucontext *context = to_hr_ucontext(ibcontext);
+
+	hns_roce_uar_free(to_hr_dev(ibcontext->device), &context->uar);
+	kfree(context);
+
+	return 0;
+}
+
+static int hns_roce_mmap(struct ib_ucontext *context,
+			 struct vm_area_struct *vma)
+{
+	if (((vma->vm_end - vma->vm_start) % PAGE_SIZE) != 0)
+		return -EINVAL;
+
+	if (vma->vm_pgoff == 0) {
+		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+		if (io_remap_pfn_range(vma, vma->vm_start,
+				       to_hr_ucontext(context)->uar.pfn,
+				       PAGE_SIZE, vma->vm_page_prot))
+			return -EAGAIN;
+
+	} else {
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
 void hns_roce_unregister_device(struct hns_roce_dev *hr_dev)
 {
 	struct hns_roce_ib_iboe *iboe = &hr_dev->iboe;
@@ -370,6 +582,22 @@ int hns_roce_register_device(struct hns_roce_dev *hr_dev)
 	ib_dev->local_dma_lkey		= hr_dev->caps.reserved_lkey;
 	ib_dev->num_comp_vectors	= hr_dev->caps.num_comp_vectors;
 	ib_dev->uverbs_abi_ver		= 1;
+	ib_dev->uverbs_cmd_mask		=
+		(1ULL << IB_USER_VERBS_CMD_GET_CONTEXT) |
+		(1ULL << IB_USER_VERBS_CMD_QUERY_DEVICE) |
+		(1ULL << IB_USER_VERBS_CMD_QUERY_PORT);
+
+	/* HCA||device||port */
+	ib_dev->modify_device		= hns_roce_modify_device;
+	ib_dev->query_device		= hns_roce_query_device;
+	ib_dev->query_port		= hns_roce_query_port;
+	ib_dev->modify_port		= hns_roce_modify_port;
+	ib_dev->get_link_layer		= hns_roce_get_link_layer;
+	ib_dev->query_gid		= hns_roce_query_gid;
+	ib_dev->query_pkey		= hns_roce_query_pkey;
+	ib_dev->alloc_ucontext		= hns_roce_alloc_ucontext;
+	ib_dev->dealloc_ucontext	= hns_roce_dealloc_ucontext;
+	ib_dev->mmap			= hns_roce_mmap;
 
 	ret = ib_register_device(ib_dev, NULL);
 	if (ret) {
diff --git a/drivers/infiniband/hw/hns/hns_roce_user.h b/drivers/infiniband/hw/hns/hns_roce_user.h
new file mode 100644
index 0000000..457e77a
--- /dev/null
+++ b/drivers/infiniband/hw/hns/hns_roce_user.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2016 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _HNS_ROCE_USER_H
+#define _HNS_ROCE_USER_H
+
+struct hns_roce_ib_alloc_ucontext_resp {
+	__u32	qp_tab_size;
+};
+
+#endif /*_HNS_ROCE_USER_H */
-- 
1.9.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ