[PATCH v2 07/11] uio_hv_generic: create send and receive buffers

Stephen Hemminger stephen at networkplumber.org
Tue Jan 9 18:29:10 UTC 2018


Map in receive and send buffers for networking in UIO device.
These buffers are special and need to be setup by kernel
API's.

Signed-off-by: Stephen Hemminger <sthemmin at microsoft.com>
---
 Documentation/driver-api/uio-howto.rst |   2 +
 drivers/uio/uio_hv_generic.c           | 114 +++++++++++++++++++++++++++++++--
 2 files changed, 111 insertions(+), 5 deletions(-)

diff --git a/Documentation/driver-api/uio-howto.rst b/Documentation/driver-api/uio-howto.rst
index 272795951565..a4228e371244 100644
--- a/Documentation/driver-api/uio-howto.rst
+++ b/Documentation/driver-api/uio-howto.rst
@@ -702,6 +702,8 @@ The vmbus device regions are mapped into uio device resources.
     0) Channel ring buffers: guest to host and host to guest
     1) Guest to host interrupt signalling pages
     2) Guest to host monitor page
+    3) Network receive buffer region
+    4) Network send buffer region
 
 Further information
 ===================
diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c
index 24f8689fdb14..0d945eb6efbb 100644
--- a/drivers/uio/uio_hv_generic.c
+++ b/drivers/uio/uio_hv_generic.c
@@ -35,7 +35,7 @@
 
 #include "../hv/hyperv_vmbus.h"
 
-#define DRIVER_VERSION	"0.02.0"
+#define DRIVER_VERSION	"0.02.1"
 #define DRIVER_AUTHOR	"Stephen Hemminger <sthemmin at microsoft.com>"
 #define DRIVER_DESC	"Generic UIO driver for VMBus devices"
 
@@ -46,19 +46,38 @@ static unsigned int ring_size = 128;
 module_param(ring_size, uint, 0444);
 MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
 
-/*
- * List of resources to be mapped to user space
- * can be extended up to MAX_UIO_MAPS(5) items
- */
+#define RECV_BUFFER_MAX ((16 * 1024 * 1024) / PAGE_SIZE)
+
+static unsigned int recv_buffer_size = RECV_BUFFER_MAX;
+module_param(recv_buffer_size, uint, 0444);
+MODULE_PARM_DESC(recv_buffer_size, "Receive buffer size (# of pages)");
+
+#define SEND_BUFFER_MAX ((15 * 1024 * 1024) / PAGE_SIZE)
+
+static unsigned int send_buffer_size = SEND_BUFFER_MAX;
+module_param(send_buffer_size, uint, 0444);
+MODULE_PARM_DESC(send_buffer_size, "Send buffer size (# of pages)");
+
+/* List of resources to be mapped to user space */
 enum hv_uio_map {
 	TXRX_RING_MAP = 0,
 	INT_PAGE_MAP,
 	MON_PAGE_MAP,
+	RECV_BUF_MAP,
+	SEND_BUF_MAP
 };
 
 struct hv_uio_private_data {
 	struct uio_info info;
 	struct hv_device *device;
+
+	void	*recv_buf;
+	u32	recv_gpadl;
+	char	recv_name[32];	/* "recv_4294967295" */
+
+	void	*send_buf;
+	u32	send_gpadl;
+	char	send_name[32];
 };
 
 /*
@@ -96,11 +115,37 @@ static void hv_uio_channel_cb(void *context)
 	uio_event_notify(&pdata->info);
 }
 
+
+static void
+hv_uio_cleanup(struct hv_device *dev, struct hv_uio_private_data *pdata)
+{
+	if (pdata->send_gpadl) {
+		vmbus_teardown_gpadl(dev->channel, pdata->send_gpadl);
+		pdata->send_gpadl = 0;
+	}
+
+	if (pdata->send_buf) {
+		vfree(pdata->send_buf);
+		pdata->send_buf = NULL;
+	}
+
+	if (pdata->recv_gpadl) {
+		vmbus_teardown_gpadl(dev->channel, pdata->recv_gpadl);
+		pdata->recv_gpadl = 0;
+	}
+
+	if (pdata->recv_buf) {
+		vfree(pdata->recv_buf);
+		pdata->recv_buf = NULL;
+	}
+}
+
 static int
 hv_uio_probe(struct hv_device *dev,
 	     const struct hv_vmbus_device_id *dev_id)
 {
 	struct hv_uio_private_data *pdata;
+	size_t buf_size;
 	int ret;
 
 	pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
@@ -142,6 +187,51 @@ hv_uio_probe(struct hv_device *dev,
 	pdata->info.mem[MON_PAGE_MAP].size = PAGE_SIZE;
 	pdata->info.mem[MON_PAGE_MAP].memtype = UIO_MEM_LOGICAL;
 
+	if (recv_buffer_size) {
+		buf_size = recv_buffer_size * PAGE_SIZE;
+		pdata->recv_buf = vzalloc(buf_size);
+		if (pdata->recv_buf == NULL) {
+			ret = -ENOMEM;
+			goto fail_close;
+		}
+
+		ret = vmbus_establish_gpadl(dev->channel, pdata->recv_buf,
+					    buf_size, &pdata->recv_gpadl);
+		if (ret)
+			goto fail_close;
+
+		/* put Global Physical Address Label in name */
+		snprintf(pdata->recv_name, sizeof(pdata->recv_name),
+			 "recv:%u", pdata->recv_gpadl);
+		pdata->info.mem[RECV_BUF_MAP].name = pdata->recv_name;
+		pdata->info.mem[RECV_BUF_MAP].addr
+			= (phys_addr_t)pdata->recv_buf;
+		pdata->info.mem[RECV_BUF_MAP].size = buf_size;
+		pdata->info.mem[RECV_BUF_MAP].memtype = UIO_MEM_VIRTUAL;
+	}
+
+	if (send_buffer_size) {
+		buf_size = send_buffer_size * PAGE_SIZE;
+		pdata->send_buf = vzalloc(buf_size);
+		if (pdata->send_buf == NULL) {
+			ret = -ENOMEM;
+			goto fail_close;
+		}
+
+		ret = vmbus_establish_gpadl(dev->channel, pdata->send_buf,
+					    buf_size, &pdata->send_gpadl);
+		if (ret)
+			goto fail_close;
+
+		snprintf(pdata->send_name, sizeof(pdata->send_name),
+			 "send:%u", pdata->send_gpadl);
+		pdata->info.mem[SEND_BUF_MAP].name = pdata->send_name;
+		pdata->info.mem[SEND_BUF_MAP].addr
+			= (phys_addr_t)pdata->send_buf;
+		pdata->info.mem[SEND_BUF_MAP].size = buf_size;
+		pdata->info.mem[SEND_BUF_MAP].memtype = UIO_MEM_VIRTUAL;
+	}
+
 	pdata->info.priv = pdata;
 	pdata->device = dev;
 
@@ -156,6 +246,7 @@ hv_uio_probe(struct hv_device *dev,
 	return 0;
 
 fail_close:
+	hv_uio_cleanup(dev, pdata);
 	vmbus_close(dev->channel);
 fail:
 	kfree(pdata);
@@ -172,6 +263,7 @@ hv_uio_remove(struct hv_device *dev)
 		return 0;
 
 	uio_unregister_device(&pdata->info);
+	hv_uio_cleanup(dev, pdata);
 	hv_set_drvdata(dev, NULL);
 	vmbus_close(dev->channel);
 	kfree(pdata);
@@ -200,6 +292,18 @@ hyperv_module_init(void)
 			ring_size);
 	}
 
+	if (recv_buffer_size > RECV_BUFFER_MAX) {
+		recv_buffer_size = RECV_BUFFER_MAX;
+		pr_info("Decreased receive_buffer_size to %u (max allowed)\n",
+			recv_buffer_size);
+	}
+
+	if (send_buffer_size > SEND_BUFFER_MAX) {
+		send_buffer_size = SEND_BUFFER_MAX;
+		pr_info("Decreased send_buffer_size to %u (max allowed)\n",
+			send_buffer_size);
+	}
+
 	return vmbus_driver_register(&hv_uio_drv);
 }
 
-- 
2.15.1



More information about the devel mailing list