pull-request: vmbus 2017-01-13

Stephen Hemminger stephen at networkplumber.org
Fri Jan 13 19:45:21 UTC 2017


Here is an update with cleanups based on recent changes.

The following changes since commit b0f2d7d546d37697d3f50753904f6f0c549b62bc:

  VME: Remove node entry from vme_driver (2017-01-11 09:21:41 +0100)

are available in the git repository at:

  git://git.kernel.org/pub/scm/linux/kernel/git/shemminger/vmbus-next 

for you to fetch changes up to c9483ca4fe3b796ff9c1c2d6cc772756b8195fc5:

  vmbus: remove per channel state (2017-01-13 10:18:37 -0800)

----------------------------------------------------------------
Stephen Hemminger (14):
      vmbus: remove useless return's
      vmbus: constify parameters where possible
      vmbus: use kernel bitops for traversing interrupt mask
      vmbus: fix send interrupt for 64 bit
      vmbus: eliminate unnecessary wrapper functions
      vmbus: drop no longer used kick_q argument
      vmbus: remove no longer used signal_policy
      vmbus: remove conditional locking of vmbus_write
      vmbus: remove unused kickq argument to sendpacket
      vmbus: remove unnecessary initialization
      vmbus: remove unused VMBUS_SERVICE_ID
      vmbus: fix spelling errors
      netvsc: remove no longer needed receive staging buffers
      vmbus: remove per channel state

 drivers/hv/channel.c              |  55 +++++++-------------
 drivers/hv/channel_mgmt.c         |   1 -
 drivers/hv/connection.c           |  62 +++++++++--------------
 drivers/hv/hv_balloon.c           |   2 -
 drivers/hv/hv_fcopy.c             |   2 -
 drivers/hv/hv_kvp.c               |  12 ++---
 drivers/hv/hv_snapshot.c          |   2 -
 drivers/hv/hyperv_vmbus.h         |  37 +++++---------
 drivers/hv/ring_buffer.c          |  93 ++++++++++------------------------
 drivers/hv/vmbus_drv.c            |   6 +--
 drivers/net/hyperv/hyperv_net.h   |   5 --
 drivers/net/hyperv/netvsc.c       | 104 +++++---------------------------------
 drivers/net/hyperv/rndis_filter.c |  11 ----
 include/linux/hyperv.h            |  67 +++---------------------
 14 files changed, 109 insertions(+), 350 deletions(-)

diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index be34547cdb68..a3c2289af9c3 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -47,12 +47,8 @@ void vmbus_setevent(struct vmbus_channel *channel)
 	 * For channels marked as in "low latency" mode
 	 * bypass the monitor page mechanism.
 	 */
-	if ((channel->offermsg.monitor_allocated) &&
-	    (!channel->low_latency)) {
-		/* Each u32 represents 32 channels */
-		sync_set_bit(channel->offermsg.child_relid & 31,
-			(unsigned long *) vmbus_connection.send_int_page +
-			(channel->offermsg.child_relid >> 5));
+	if (channel->offermsg.monitor_allocated && !channel->low_latency) {
+		vmbus_send_interrupt(channel->offermsg.child_relid);
 
 		/* Get the child to parent monitor page */
 		monitorpage = vmbus_connection.monitor_pages[1];
@@ -337,7 +333,7 @@ static int create_gpadl_header(void *kbuffer, u32 size,
 			 * Gpadl is u32 and we are using a pointer which could
 			 * be 64-bit
 			 * This is governed by the guest/host protocol and
-			 * so the hypervisor gurantees that this is ok.
+			 * so the hypervisor guarantees that this is ok.
 			 */
 			for (i = 0; i < pfncurr; i++)
 				gpadl_body->pfn[i] = slow_virt_to_phys(
@@ -384,7 +380,7 @@ static int create_gpadl_header(void *kbuffer, u32 size,
 }
 
 /*
- * vmbus_establish_gpadl - Estabish a GPADL for the specified buffer
+ * vmbus_establish_gpadl - Establish a GPADL for the specified buffer
  *
  * @channel: a channel
  * @kbuffer: from kmalloc or vmalloc
@@ -547,7 +543,7 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
 	/*
 	 * In case a device driver's probe() fails (e.g.,
 	 * util_probe() -> vmbus_open() returns -ENOMEM) and the device is
-	 * rescinded later (e.g., we dynamically disble an Integrated Service
+	 * rescinded later (e.g., we dynamically disable an Integrated Service
 	 * in Hyper-V Manager), the driver's remove() invokes vmbus_close():
 	 * here we should skip most of the below cleanup work.
 	 */
@@ -647,15 +643,14 @@ void vmbus_close(struct vmbus_channel *channel)
 EXPORT_SYMBOL_GPL(vmbus_close);
 
 int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
-			   u32 bufferlen, u64 requestid,
-			   enum vmbus_packet_type type, u32 flags, bool kick_q)
+			 u32 bufferlen, u64 requestid,
+			 enum vmbus_packet_type type, u32 flags)
 {
 	struct vmpacket_descriptor desc;
 	u32 packetlen = sizeof(struct vmpacket_descriptor) + bufferlen;
 	u32 packetlen_aligned = ALIGN(packetlen, sizeof(u64));
 	struct kvec bufferlist[3];
 	u64 aligned_data = 0;
-	bool lock = channel->acquire_ring_lock;
 	int num_vecs = ((bufferlen != 0) ? 3 : 1);
 
 
@@ -674,9 +669,7 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
 	bufferlist[2].iov_base = &aligned_data;
 	bufferlist[2].iov_len = (packetlen_aligned - packetlen);
 
-	return hv_ringbuffer_write(channel, bufferlist, num_vecs,
-				   lock, kick_q);
-
+	return hv_ringbuffer_write(channel, bufferlist, num_vecs);
 }
 EXPORT_SYMBOL(vmbus_sendpacket_ctl);
 
@@ -699,7 +692,7 @@ int vmbus_sendpacket(struct vmbus_channel *channel, void *buffer,
 			   enum vmbus_packet_type type, u32 flags)
 {
 	return vmbus_sendpacket_ctl(channel, buffer, bufferlen, requestid,
-				    type, flags, true);
+				    type, flags);
 }
 EXPORT_SYMBOL(vmbus_sendpacket);
 
@@ -711,11 +704,9 @@ EXPORT_SYMBOL(vmbus_sendpacket);
  * explicitly.
  */
 int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
-				     struct hv_page_buffer pagebuffers[],
-				     u32 pagecount, void *buffer, u32 bufferlen,
-				     u64 requestid,
-				     u32 flags,
-				     bool kick_q)
+				    struct hv_page_buffer pagebuffers[],
+				    u32 pagecount, void *buffer, u32 bufferlen,
+				    u64 requestid, u32 flags)
 {
 	int i;
 	struct vmbus_channel_packet_page_buffer desc;
@@ -724,12 +715,10 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
 	u32 packetlen_aligned;
 	struct kvec bufferlist[3];
 	u64 aligned_data = 0;
-	bool lock = channel->acquire_ring_lock;
 
 	if (pagecount > MAX_PAGE_BUFFER_COUNT)
 		return -EINVAL;
 
-
 	/*
 	 * Adjust the size down since vmbus_channel_packet_page_buffer is the
 	 * largest size we support
@@ -743,7 +732,7 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
 	/* Setup the descriptor */
 	desc.type = VM_PKT_DATA_USING_GPA_DIRECT;
 	desc.flags = flags;
-	desc.dataoffset8 = descsize >> 3; /* in 8-bytes grandularity */
+	desc.dataoffset8 = descsize >> 3; /* in 8-bytes granularity */
 	desc.length8 = (u16)(packetlen_aligned >> 3);
 	desc.transactionid = requestid;
 	desc.rangecount = pagecount;
@@ -761,8 +750,7 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
 	bufferlist[2].iov_base = &aligned_data;
 	bufferlist[2].iov_len = (packetlen_aligned - packetlen);
 
-	return hv_ringbuffer_write(channel, bufferlist, 3,
-				   lock, kick_q);
+	return hv_ringbuffer_write(channel, bufferlist, 3);
 }
 EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer_ctl);
 
@@ -777,8 +765,7 @@ int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
 {
 	u32 flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
 	return vmbus_sendpacket_pagebuffer_ctl(channel, pagebuffers, pagecount,
-					       buffer, bufferlen, requestid,
-					       flags, true);
+					       buffer, bufferlen, requestid, flags);
 
 }
 EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer);
@@ -797,7 +784,6 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
 	u32 packetlen_aligned;
 	struct kvec bufferlist[3];
 	u64 aligned_data = 0;
-	bool lock = channel->acquire_ring_lock;
 
 	packetlen = desc_size + bufferlen;
 	packetlen_aligned = ALIGN(packetlen, sizeof(u64));
@@ -805,7 +791,7 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
 	/* Setup the descriptor */
 	desc->type = VM_PKT_DATA_USING_GPA_DIRECT;
 	desc->flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
-	desc->dataoffset8 = desc_size >> 3; /* in 8-bytes grandularity */
+	desc->dataoffset8 = desc_size >> 3; /* in 8-bytes granularity */
 	desc->length8 = (u16)(packetlen_aligned >> 3);
 	desc->transactionid = requestid;
 	desc->rangecount = 1;
@@ -817,8 +803,7 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
 	bufferlist[2].iov_base = &aligned_data;
 	bufferlist[2].iov_len = (packetlen_aligned - packetlen);
 
-	return hv_ringbuffer_write(channel, bufferlist, 3,
-				   lock, true);
+	return hv_ringbuffer_write(channel, bufferlist, 3);
 }
 EXPORT_SYMBOL_GPL(vmbus_sendpacket_mpb_desc);
 
@@ -836,7 +821,6 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
 	u32 packetlen_aligned;
 	struct kvec bufferlist[3];
 	u64 aligned_data = 0;
-	bool lock = channel->acquire_ring_lock;
 	u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset,
 					 multi_pagebuffer->len);
 
@@ -857,7 +841,7 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
 	/* Setup the descriptor */
 	desc.type = VM_PKT_DATA_USING_GPA_DIRECT;
 	desc.flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
-	desc.dataoffset8 = descsize >> 3; /* in 8-bytes grandularity */
+	desc.dataoffset8 = descsize >> 3; /* in 8-bytes granularity */
 	desc.length8 = (u16)(packetlen_aligned >> 3);
 	desc.transactionid = requestid;
 	desc.rangecount = 1;
@@ -875,8 +859,7 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
 	bufferlist[2].iov_base = &aligned_data;
 	bufferlist[2].iov_len = (packetlen_aligned - packetlen);
 
-	return hv_ringbuffer_write(channel, bufferlist, 3,
-				   lock, true);
+	return hv_ringbuffer_write(channel, bufferlist, 3);
 }
 EXPORT_SYMBOL_GPL(vmbus_sendpacket_multipagebuffer);
 
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 0af7e39006c8..1e656097662f 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -303,7 +303,6 @@ static struct vmbus_channel *alloc_channel(void)
 	if (!channel)
 		return NULL;
 
-	channel->acquire_ring_lock = true;
 	spin_lock_init(&channel->inbound_lock);
 	spin_lock_init(&channel->lock);
 
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index 9b72ebcd37bc..3ee5bc4f4d0b 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -382,17 +382,11 @@ static void process_chn_event(u32 relid)
  */
 void vmbus_on_event(unsigned long data)
 {
-	u32 dword;
-	u32 maxdword;
-	int bit;
-	u32 relid;
-	u32 *recv_int_page = NULL;
-	void *page_addr;
-	int cpu = smp_processor_id();
-	union hv_synic_event_flags *event;
+	unsigned long *recv_int_page;
+	u32 maxbits, relid;
 
 	if (vmbus_proto_version < VERSION_WIN8) {
-		maxdword = MAX_NUM_CHANNELS_SUPPORTED >> 5;
+		maxbits = MAX_NUM_CHANNELS_SUPPORTED;
 		recv_int_page = vmbus_connection.recv_int_page;
 	} else {
 		/*
@@ -400,35 +394,29 @@ void vmbus_on_event(unsigned long data)
 		 * can be directly checked to get the id of the channel
 		 * that has the interrupt pending.
 		 */
-		maxdword = HV_EVENT_FLAGS_DWORD_COUNT;
-		page_addr = hv_context.synic_event_page[cpu];
-		event = (union hv_synic_event_flags *)page_addr +
+		int cpu = smp_processor_id();
+		void *page_addr = hv_context.synic_event_page[cpu];
+		union hv_synic_event_flags *event
+			= (union hv_synic_event_flags *)page_addr +
 						 VMBUS_MESSAGE_SINT;
-		recv_int_page = event->flags32;
+
+		maxbits = HV_EVENT_FLAGS_COUNT;
+		recv_int_page = event->flags;
 	}
 
+	if (unlikely(!recv_int_page))
+		return;
 
+	for_each_set_bit(relid, recv_int_page, maxbits) {
+		unsigned int bit = relid % BITS_PER_LONG;
+		unsigned int dword = BIT_WORD(relid);
 
-	/* Check events */
-	if (!recv_int_page)
-		return;
-	for (dword = 0; dword < maxdword; dword++) {
-		if (!recv_int_page[dword])
-			continue;
-		for (bit = 0; bit < 32; bit++) {
-			if (sync_test_and_clear_bit(bit,
-				(unsigned long *)&recv_int_page[dword])) {
-				relid = (dword << 5) + bit;
-
-				if (relid == 0)
-					/*
-					 * Special case - vmbus
-					 * channel protocol msg
-					 */
-					continue;
-
-				process_chn_event(relid);
-			}
+		if (sync_test_and_clear_bit(bit, recv_int_page + dword)) {
+			/* Special case - vmbus channel protocol msg */
+			if (relid == 0)
+				continue;
+
+			process_chn_event(relid);
 		}
 	}
 }
@@ -494,12 +482,8 @@ void vmbus_set_event(struct vmbus_channel *channel)
 {
 	u32 child_relid = channel->offermsg.child_relid;
 
-	if (!channel->is_dedicated_interrupt) {
-		/* Each u32 represents 32 channels */
-		sync_set_bit(child_relid & 31,
-			(unsigned long *)vmbus_connection.send_int_page +
-			(child_relid >> 5));
-	}
+	if (!channel->is_dedicated_interrupt)
+		vmbus_send_interrupt(child_relid);
 
 	hv_do_hypercall(HVCALL_SIGNAL_EVENT, channel->sig_event, NULL);
 }
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index 14c3dc4bd23c..2f4484dc772e 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -721,8 +721,6 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
 						    5*HZ);
 		post_status(&dm_device);
 	}
-
-	return;
 }
 
 static void hv_online_page(struct page *pg)
diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c
index e47d8c9db03a..cb91053b852d 100644
--- a/drivers/hv/hv_fcopy.c
+++ b/drivers/hv/hv_fcopy.c
@@ -177,8 +177,6 @@ static void fcopy_send_data(struct work_struct *dummy)
 		}
 	}
 	kfree(smsg_out);
-
-	return;
 }
 
 /*
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
index 3abfc5983c97..5a60febe5301 100644
--- a/drivers/hv/hv_kvp.c
+++ b/drivers/hv/hv_kvp.c
@@ -56,7 +56,7 @@
  *
  * While the request/response protocol is guaranteed by the host, we further
  * ensure this by serializing packet processing in this driver - we do not
- * read additional packets from the VMBUs until the current packet is fully
+ * read additional packets from the VMBUS until the current packet is fully
  * handled.
  */
 
@@ -385,7 +385,7 @@ kvp_send_key(struct work_struct *dummy)
 	 * the max lengths specified. We will however, reserve room
 	 * for the string terminating character - in the utf16s_utf8s()
 	 * function we limit the size of the buffer where the converted
-	 * string is placed to HV_KVP_EXCHANGE_MAX_*_SIZE -1 to gaurantee
+	 * string is placed to HV_KVP_EXCHANGE_MAX_*_SIZE -1 to guarantee
 	 * that the strings can be properly terminated!
 	 */
 
@@ -471,8 +471,6 @@ kvp_send_key(struct work_struct *dummy)
 	}
 
 	kfree(message);
-
-	return;
 }
 
 /*
@@ -521,7 +519,7 @@ kvp_respond_to_host(struct hv_kvp_msg *msg_to_host, int error)
 	 */
 	if (error) {
 		/*
-		 * Something failed or we have timedout;
+		 * Something failed or we have timed out;
 		 * terminate the current host-side iteration.
 		 */
 		goto response_done;
@@ -595,8 +593,8 @@ kvp_respond_to_host(struct hv_kvp_msg *msg_to_host, int error)
  * This callback is invoked when we get a KVP message from the host.
  * The host ensures that only one KVP transaction can be active at a time.
  * KVP implementation in Linux needs to forward the key to a user-mde
- * component to retrive the corresponding value. Consequently, we cannot
- * respond to the host in the conext of this callback. Since the host
+ * component to retrieve the corresponding value. Consequently, we cannot
+ * respond to the host in the context of this callback. Since the host
  * guarantees that at most only one transaction can be active at a time,
  * we stash away the transaction state in a set of global variables.
  */
diff --git a/drivers/hv/hv_snapshot.c b/drivers/hv/hv_snapshot.c
index 4e543dbb731a..cf88dbfd7f32 100644
--- a/drivers/hv/hv_snapshot.c
+++ b/drivers/hv/hv_snapshot.c
@@ -203,8 +203,6 @@ static void vss_send_op(void)
 	}
 
 	kfree(vss_msg);
-
-	return;
 }
 
 static void vss_handle_request(struct work_struct *dummy)
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 83beea748c6f..5d54ac8d9146 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -77,8 +77,7 @@ enum hv_cpuid_function {
 
 /* Define synthetic interrupt controller flag constants. */
 #define HV_EVENT_FLAGS_COUNT		(256 * 8)
-#define HV_EVENT_FLAGS_BYTE_COUNT	(256)
-#define HV_EVENT_FLAGS_DWORD_COUNT	(256 / sizeof(u32))
+#define HV_EVENT_FLAGS_LONG_COUNT	(256 / sizeof(unsigned long))
 
 /* Define invalid partition identifier. */
 #define HV_PARTITION_ID_INVALID		((u64)0x0)
@@ -151,8 +150,7 @@ union hv_timer_config {
 
 /* Define the synthetic interrupt controller event flags format. */
 union hv_synic_event_flags {
-	u8 flags8[HV_EVENT_FLAGS_BYTE_COUNT];
-	u32 flags32[HV_EVENT_FLAGS_DWORD_COUNT];
+	unsigned long flags[HV_EVENT_FLAGS_LONG_COUNT];
 };
 
 /* Define the synthetic interrupt flags page layout. */
@@ -408,21 +406,6 @@ static inline  __u64 generate_guest_id(__u8 d_info1, __u32 kernel_version,
 #define HV_SERVICE_PROTOCOL_VERSION (0x0010)
 #define HV_CONNECT_PAYLOAD_BYTE_COUNT 64
 
-/* #define VMBUS_REVISION_NUMBER	6 */
-
-/* Our local vmbus's port and connection id. Anything >0 is fine */
-/* #define VMBUS_PORT_ID		11 */
-
-/* 628180B8-308D-4c5e-B7DB-1BEB62E62EF4 */
-static const uuid_le VMBUS_SERVICE_ID = {
-	.b = {
-		0xb8, 0x80, 0x81, 0x62, 0x8d, 0x30, 0x5e, 0x4c,
-		0xb7, 0xdb, 0x1b, 0xeb, 0x62, 0xe6, 0x2e, 0xf4
-	},
-};
-
-
-
 struct hv_context {
 	/* We only support running on top of Hyper-V
 	* So at this point this really can only contain the Hyper-V ID
@@ -528,16 +511,14 @@ int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
 void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info);
 
 int hv_ringbuffer_write(struct vmbus_channel *channel,
-		    struct kvec *kv_list,
-		    u32 kv_count, bool lock,
-		    bool kick_q);
+			struct kvec *kv_list, u32 kv_count);
 
 int hv_ringbuffer_read(struct vmbus_channel *channel,
 		       void *buffer, u32 buflen, u32 *buffer_actual_len,
 		       u64 *requestid, bool raw);
 
-void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
-			    struct hv_ring_buffer_debug_info *debug_info);
+void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
+				 struct hv_ring_buffer_debug_info *debug_info);
 
 void hv_begin_read(struct hv_ring_buffer_info *rbi);
 
@@ -578,7 +559,7 @@ struct vmbus_connection {
 	 * recvInterruptPage to see which bit is set
 	 */
 	void *int_page;
-	void *send_int_page;
+	unsigned long *send_int_page;
 	void *recv_int_page;
 
 	/*
@@ -608,6 +589,12 @@ struct vmbus_msginfo {
 
 extern struct vmbus_connection vmbus_connection;
 
+static inline void vmbus_send_interrupt(u32 relid)
+{
+	sync_set_bit(relid % BITS_PER_LONG,
+		     vmbus_connection.send_int_page + BIT_WORD(relid));
+}
+
 enum vmbus_message_handler_type {
 	/* The related handler can sleep. */
 	VMHT_BLOCKING = 0,
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 2cd402986858..31b279919253 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -77,8 +77,7 @@ u32 hv_end_read(struct hv_ring_buffer_info *rbi)
  * host logic is fixed.
  */
 
-static void hv_signal_on_write(u32 old_write, struct vmbus_channel *channel,
-			       bool kick_q)
+static void hv_signal_on_write(u32 old_write, struct vmbus_channel *channel)
 {
 	struct hv_ring_buffer_info *rbi = &channel->outbound;
 
@@ -94,34 +93,6 @@ static void hv_signal_on_write(u32 old_write, struct vmbus_channel *channel,
 	 */
 	if (old_write == READ_ONCE(rbi->ring_buffer->read_index))
 		vmbus_setevent(channel);
-
-	return;
-}
-
-/* Get the next write location for the specified ring buffer. */
-static inline u32
-hv_get_next_write_location(struct hv_ring_buffer_info *ring_info)
-{
-	u32 next = ring_info->ring_buffer->write_index;
-
-	return next;
-}
-
-/* Set the next write location for the specified ring buffer. */
-static inline void
-hv_set_next_write_location(struct hv_ring_buffer_info *ring_info,
-		     u32 next_write_location)
-{
-	ring_info->ring_buffer->write_index = next_write_location;
-}
-
-/* Get the next read location for the specified ring buffer. */
-static inline u32
-hv_get_next_read_location(struct hv_ring_buffer_info *ring_info)
-{
-	u32 next = ring_info->ring_buffer->read_index;
-
-	return next;
 }
 
 /*
@@ -129,8 +100,8 @@ hv_get_next_read_location(struct hv_ring_buffer_info *ring_info)
  * This allows the caller to skip.
  */
 static inline u32
-hv_get_next_readlocation_withoffset(struct hv_ring_buffer_info *ring_info,
-				 u32 offset)
+hv_get_next_readlocation_withoffset(const struct hv_ring_buffer_info *ring_info,
+				    u32 offset)
 {
 	u32 next = ring_info->ring_buffer->read_index;
 
@@ -151,7 +122,7 @@ hv_set_next_read_location(struct hv_ring_buffer_info *ring_info,
 
 /* Get the size of the ring buffer. */
 static inline u32
-hv_get_ring_buffersize(struct hv_ring_buffer_info *ring_info)
+hv_get_ring_buffersize(const struct hv_ring_buffer_info *ring_info)
 {
 	return ring_info->ring_datasize;
 }
@@ -168,13 +139,13 @@ hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info)
  * Assume there is enough room. Handles wrap-around in src case only!!
  */
 static u32 hv_copyfrom_ringbuffer(
-	struct hv_ring_buffer_info	*ring_info,
+	const struct hv_ring_buffer_info *ring_info,
 	void				*dest,
 	u32				destlen,
 	u32				start_read_offset)
 {
 	void *ring_buffer = hv_get_ring_buffer(ring_info);
-	u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
+	u32 ring_buffer_size = ring_info->ring_datasize;
 
 	memcpy(dest, ring_buffer + start_read_offset, destlen);
 
@@ -192,11 +163,11 @@ static u32 hv_copyfrom_ringbuffer(
 static u32 hv_copyto_ringbuffer(
 	struct hv_ring_buffer_info	*ring_info,
 	u32				start_write_offset,
-	void				*src,
+	const void			*src,
 	u32				srclen)
 {
 	void *ring_buffer = hv_get_ring_buffer(ring_info);
-	u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
+	u32 ring_buffer_size = ring_info->ring_datasize;
 
 	memcpy(ring_buffer + start_write_offset, src, srclen);
 
@@ -207,8 +178,8 @@ static u32 hv_copyto_ringbuffer(
 }
 
 /* Get various debug metrics for the specified ring buffer. */
-void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
-			    struct hv_ring_buffer_debug_info *debug_info)
+void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
+				 struct hv_ring_buffer_debug_info *debug_info)
 {
 	u32 bytes_avail_towrite;
 	u32 bytes_avail_toread;
@@ -285,17 +256,15 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
 
 /* Write to the ring buffer. */
 int hv_ringbuffer_write(struct vmbus_channel *channel,
-		    struct kvec *kv_list, u32 kv_count, bool lock,
-		    bool kick_q)
+			struct kvec *kv_list, u32 kv_count)
 {
-	int i = 0;
+	int i;
 	u32 bytes_avail_towrite;
-	u32 totalbytes_towrite = 0;
-
+	u32 totalbytes_towrite = sizeof(u64);
 	u32 next_write_location;
 	u32 old_write;
-	u64 prev_indices = 0;
-	unsigned long flags = 0;
+	u64 prev_indices;
+	unsigned long flags;
 	struct hv_ring_buffer_info *outring_info = &channel->outbound;
 
 	if (channel->rescind)
@@ -304,10 +273,7 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
 	for (i = 0; i < kv_count; i++)
 		totalbytes_towrite += kv_list[i].iov_len;
 
-	totalbytes_towrite += sizeof(u64);
-
-	if (lock)
-		spin_lock_irqsave(&outring_info->ring_lock, flags);
+	spin_lock_irqsave(&outring_info->ring_lock, flags);
 
 	bytes_avail_towrite = hv_get_bytes_to_write(outring_info);
 
@@ -317,15 +283,14 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
 	 * is empty since the read index == write index.
 	 */
 	if (bytes_avail_towrite <= totalbytes_towrite) {
-		if (lock)
-			spin_unlock_irqrestore(&outring_info->ring_lock, flags);
+		spin_unlock_irqrestore(&outring_info->ring_lock, flags);
 		return -EAGAIN;
 	}
 
 	/* Write to the ring buffer */
-	next_write_location = hv_get_next_write_location(outring_info);
+	next_write_location = outring_info->ring_buffer->write_index;
 
-	old_write = next_write_location;
+	old_write = outring_info->ring_buffer->write_index;
 
 	for (i = 0; i < kv_count; i++) {
 		next_write_location = hv_copyto_ringbuffer(outring_info,
@@ -335,8 +300,7 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
 	}
 
 	/* Set previous packet start */
-	prev_indices = hv_get_ring_bufferindices(outring_info);
-
+	prev_indices = (u64)outring_info->ring_buffer->write_index << 32;
 	next_write_location = hv_copyto_ringbuffer(outring_info,
 					     next_write_location,
 					     &prev_indices,
@@ -346,13 +310,11 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
 	virt_mb();
 
 	/* Now, update the write location */
-	hv_set_next_write_location(outring_info, next_write_location);
+	outring_info->ring_buffer->write_index = next_write_location;
 
+	spin_unlock_irqrestore(&outring_info->ring_lock, flags);
 
-	if (lock)
-		spin_unlock_irqrestore(&outring_info->ring_lock, flags);
-
-	hv_signal_on_write(old_write, channel, kick_q);
+	hv_signal_on_write(old_write, channel);
 
 	if (channel->rescind)
 		return -ENODEV;
@@ -365,7 +327,7 @@ int hv_ringbuffer_read(struct vmbus_channel *channel,
 		       u64 *requestid, bool raw)
 {
 	u32 bytes_avail_toread;
-	u32 next_read_location = 0;
+	u32 next_read_location;
 	u64 prev_indices = 0;
 	struct vmpacket_descriptor desc;
 	u32 offset;
@@ -390,10 +352,9 @@ int hv_ringbuffer_read(struct vmbus_channel *channel,
 		return ret;
 	}
 
-	next_read_location = hv_get_next_read_location(inring_info);
-	next_read_location = hv_copyfrom_ringbuffer(inring_info, &desc,
-						    sizeof(desc),
-						    next_read_location);
+	next_read_location = hv_copyfrom_ringbuffer(inring_info,
+				    &desc, sizeof(desc),
+				    inring_info->ring_buffer->read_index);
 
 	offset = raw ? 0 : (desc.offset8 << 3);
 	packetlen = (desc.len8 << 3) - offset;
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 565bdd16134a..96e291c2bc53 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -810,8 +810,6 @@ static void vmbus_shutdown(struct device *child_device)
 
 	if (drv->shutdown)
 		drv->shutdown(dev);
-
-	return;
 }
 
 
@@ -933,10 +931,8 @@ static void vmbus_isr(void)
 		(vmbus_proto_version == VERSION_WIN7)) {
 
 		/* Since we are a child, we only need to check bit 0 */
-		if (sync_test_and_clear_bit(0,
-			(unsigned long *) &event->flags32[0])) {
+		if (sync_test_and_clear_bit(0, event->flags))
 			handled = true;
-		}
 	} else {
 		/*
 		 * Our host is win8 or above. The signaling mechanism
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 3958adade7eb..cce70ceba6d5 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -748,11 +748,6 @@ struct netvsc_device {
 
 	int ring_size;
 
-	/* The primary channel callback buffer */
-	unsigned char *cb_buffer;
-	/* The sub channel callback buffer */
-	unsigned char *sub_cb_buf;
-
 	struct multi_send_data msd[VRSS_CHANNEL_MAX];
 	u32 max_pkt; /* max number of pkt in one send, e.g. 8 */
 	u32 pkt_align; /* alignment bytes, e.g. 8 */
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 5a1cc089acb7..7487498b663c 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -67,12 +67,6 @@ static struct netvsc_device *alloc_net_device(void)
 	if (!net_device)
 		return NULL;
 
-	net_device->cb_buffer = kzalloc(NETVSC_PACKET_SIZE, GFP_KERNEL);
-	if (!net_device->cb_buffer) {
-		kfree(net_device);
-		return NULL;
-	}
-
 	net_device->mrc[0].buf = vzalloc(NETVSC_RECVSLOT_MAX *
 					 sizeof(struct recv_comp_data));
 
@@ -93,7 +87,6 @@ static void free_netvsc_device(struct netvsc_device *nvdev)
 	for (i = 0; i < VRSS_CHANNEL_MAX; i++)
 		vfree(nvdev->mrc[i].buf);
 
-	kfree(nvdev->cb_buffer);
 	kfree(nvdev);
 }
 
@@ -584,7 +577,6 @@ void netvsc_device_remove(struct hv_device *device)
 	vmbus_close(device->channel);
 
 	/* Release all resources */
-	vfree(net_device->sub_cb_buf);
 	free_netvsc_device(net_device);
 }
 
@@ -723,8 +715,6 @@ static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
 	char *dest = start + (section_index * net_device->send_section_size)
 		     + pend_size;
 	int i;
-	bool is_data_pkt = (skb != NULL) ? true : false;
-	bool xmit_more = (skb != NULL) ? skb->xmit_more : false;
 	u32 msg_size = 0;
 	u32 padding = 0;
 	u32 remain = packet->total_data_buflen % net_device->pkt_align;
@@ -732,7 +722,7 @@ static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
 		packet->page_buf_cnt;
 
 	/* Add padding */
-	if (is_data_pkt && xmit_more && remain &&
+	if (skb && skb->xmit_more && remain &&
 	    !packet->cp_partial) {
 		padding = net_device->pkt_align - remain;
 		rndis_msg->msg_len += padding;
@@ -772,7 +762,6 @@ static inline int netvsc_send_pkt(
 	int ret;
 	struct hv_page_buffer *pgbuf;
 	u32 ring_avail = hv_ringbuf_avail_percent(&out_channel->outbound);
-	bool xmit_more = (skb != NULL) ? skb->xmit_more : false;
 
 	nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
 	if (skb != NULL) {
@@ -796,16 +785,6 @@ static inline int netvsc_send_pkt(
 	if (out_channel->rescind)
 		return -ENODEV;
 
-	/*
-	 * It is possible that once we successfully place this packet
-	 * on the ringbuffer, we may stop the queue. In that case, we want
-	 * to notify the host independent of the xmit_more flag. We don't
-	 * need to be precise here; in the worst case we may signal the host
-	 * unnecessarily.
-	 */
-	if (ring_avail < (RING_AVAIL_PERCENT_LOWATER + 1))
-		xmit_more = false;
-
 	if (packet->page_buf_cnt) {
 		pgbuf = packet->cp_partial ? (*pb) +
 			packet->rmsg_pgcnt : (*pb);
@@ -815,15 +794,13 @@ static inline int netvsc_send_pkt(
 						      &nvmsg,
 						      sizeof(struct nvsp_message),
 						      req_id,
-						      VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED,
-						      !xmit_more);
+						      VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
 	} else {
 		ret = vmbus_sendpacket_ctl(out_channel, &nvmsg,
 					   sizeof(struct nvsp_message),
 					   req_id,
 					   VM_PKT_DATA_INBAND,
-					   VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED,
-					   !xmit_more);
+					   VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
 	}
 
 	if (ret == 0) {
@@ -1271,16 +1248,11 @@ static void netvsc_process_raw_pkt(struct hv_device *device,
 
 void netvsc_channel_cb(void *context)
 {
-	int ret;
-	struct vmbus_channel *channel = (struct vmbus_channel *)context;
+	struct vmbus_channel *channel = context;
 	u16 q_idx = channel->offermsg.offer.sub_channel_index;
 	struct hv_device *device;
 	struct netvsc_device *net_device;
-	u32 bytes_recvd;
-	u64 request_id;
 	struct vmpacket_descriptor *desc;
-	unsigned char *buffer;
-	int bufferlen = NETVSC_PACKET_SIZE;
 	struct net_device *ndev;
 	bool need_to_commit = false;
 
@@ -1292,65 +1264,19 @@ void netvsc_channel_cb(void *context)
 	net_device = get_inbound_net_device(device);
 	if (!net_device)
 		return;
+
 	ndev = hv_get_drvdata(device);
-	buffer = get_per_channel_state(channel);
-
-	do {
-		desc = get_next_pkt_raw(channel);
-		if (desc != NULL) {
-			netvsc_process_raw_pkt(device,
-					       channel,
-					       net_device,
-					       ndev,
-					       desc->trans_id,
-					       desc);
-
-			put_pkt_raw(channel, desc);
-			need_to_commit = true;
-			continue;
-		}
-		if (need_to_commit) {
-			need_to_commit = false;
-			commit_rd_index(channel);
-		}
 
-		ret = vmbus_recvpacket_raw(channel, buffer, bufferlen,
-					   &bytes_recvd, &request_id);
-		if (ret == 0) {
-			if (bytes_recvd > 0) {
-				desc = (struct vmpacket_descriptor *)buffer;
-				netvsc_process_raw_pkt(device,
-						       channel,
-						       net_device,
-						       ndev,
-						       request_id,
-						       desc);
-			} else {
-				/*
-				 * We are done for this pass.
-				 */
-				break;
-			}
-
-		} else if (ret == -ENOBUFS) {
-			if (bufferlen > NETVSC_PACKET_SIZE)
-				kfree(buffer);
-			/* Handle large packet */
-			buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
-			if (buffer == NULL) {
-				/* Try again next time around */
-				netdev_err(ndev,
-					   "unable to allocate buffer of size "
-					   "(%d)!!\n", bytes_recvd);
-				break;
-			}
-
-			bufferlen = bytes_recvd;
-		}
-	} while (1);
+	while ((desc = get_next_pkt_raw(channel)) != NULL) {
+		netvsc_process_raw_pkt(device, channel, net_device,
+				       ndev, desc->trans_id, desc);
+
+		put_pkt_raw(channel, desc);
+		need_to_commit = true;
+	}
 
-	if (bufferlen > NETVSC_PACKET_SIZE)
-		kfree(buffer);
+	if (need_to_commit)
+		commit_rd_index(channel);
 
 	netvsc_chk_recv_comp(net_device, channel, q_idx);
 }
@@ -1374,8 +1300,6 @@ int netvsc_device_add(struct hv_device *device, void *additional_info)
 
 	net_device->ring_size = ring_size;
 
-	set_per_channel_state(device->channel, net_device->cb_buffer);
-
 	/* Open the channel */
 	ret = vmbus_open(device->channel, ring_size * PAGE_SIZE,
 			 ring_size * PAGE_SIZE, NULL, 0,
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 8d90904e0e49..113c7f4d1590 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -948,9 +948,6 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
 	if (chn_index >= nvscdev->num_chn)
 		return;
 
-	set_per_channel_state(new_sc, nvscdev->sub_cb_buf + (chn_index - 1) *
-			      NETVSC_PACKET_SIZE);
-
 	nvscdev->mrc[chn_index].buf = vzalloc(NETVSC_RECVSLOT_MAX *
 					      sizeof(struct recv_comp_data));
 
@@ -1099,14 +1096,6 @@ int rndis_filter_device_add(struct hv_device *dev,
 	if (net_device->num_chn == 1)
 		goto out;
 
-	net_device->sub_cb_buf = vzalloc((net_device->num_chn - 1) *
-					 NETVSC_PACKET_SIZE);
-	if (!net_device->sub_cb_buf) {
-		net_device->num_chn = 1;
-		dev_info(&dev->device, "No memory for subchannels.\n");
-		goto out;
-	}
-
 	vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open);
 
 	init_packet = &net_device->channel_init_pkt;
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 7ea20bd7cdd1..3535913e15f6 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -138,8 +138,8 @@ struct hv_ring_buffer_info {
  * for the specified ring buffer
  */
 static inline void
-hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi,
-			  u32 *read, u32 *write)
+hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info *rbi,
+			     u32 *read, u32 *write)
 {
 	u32 read_loc, write_loc, dsize;
 
@@ -153,7 +153,7 @@ hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi,
 	*read = dsize - *write;
 }
 
-static inline u32 hv_get_bytes_to_read(struct hv_ring_buffer_info *rbi)
+static inline u32 hv_get_bytes_to_read(const struct hv_ring_buffer_info *rbi)
 {
 	u32 read_loc, write_loc, dsize, read;
 
@@ -167,7 +167,7 @@ static inline u32 hv_get_bytes_to_read(struct hv_ring_buffer_info *rbi)
 	return read;
 }
 
-static inline u32 hv_get_bytes_to_write(struct hv_ring_buffer_info *rbi)
+static inline u32 hv_get_bytes_to_write(const struct hv_ring_buffer_info *rbi)
 {
 	u32 read_loc, write_loc, dsize, write;
 
@@ -670,11 +670,6 @@ struct hv_input_signal_event_buffer {
 	struct hv_input_signal_event event;
 };
 
-enum hv_signal_policy {
-	HV_SIGNAL_POLICY_DEFAULT = 0,
-	HV_SIGNAL_POLICY_EXPLICIT,
-};
-
 enum hv_numa_policy {
 	HV_BALANCED = 0,
 	HV_LOCALIZED,
@@ -828,32 +823,11 @@ struct vmbus_channel {
 	 */
 	struct vmbus_channel *primary_channel;
 	/*
-	 * Support per-channel state for use by vmbus drivers.
-	 */
-	void *per_channel_state;
-	/*
 	 * To support per-cpu lookup mapping of relid to channel,
 	 * link up channels based on their CPU affinity.
 	 */
 	struct list_head percpu_list;
 	/*
-	 * Host signaling policy: The default policy will be
-	 * based on the ring buffer state. We will also support
-	 * a policy where the client driver can have explicit
-	 * signaling control.
-	 */
-	enum hv_signal_policy  signal_policy;
-	/*
-	 * On the channel send side, many of the VMBUS
-	 * device drivers explicity serialize access to the
-	 * outgoing ring buffer. Give more control to the
-	 * VMBUS device drivers in terms how to serialize
-	 * accesss to the outgoing ring buffer.
-	 * The default behavior will be to aquire the
-	 * ring lock to preserve the current behavior.
-	 */
-	bool acquire_ring_lock;
-	/*
 	 * For performance critical channels (storage, networking
 	 * etc,), Hyper-V has a mechanism to enhance the throughput
 	 * at the expense of latency:
@@ -893,23 +867,12 @@ struct vmbus_channel {
 
 };
 
-static inline void set_channel_lock_state(struct vmbus_channel *c, bool state)
-{
-	c->acquire_ring_lock = state;
-}
-
 static inline bool is_hvsock_channel(const struct vmbus_channel *c)
 {
 	return !!(c->offermsg.offer.chn_flags &
 		  VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER);
 }
 
-static inline void set_channel_signal_state(struct vmbus_channel *c,
-					    enum hv_signal_policy policy)
-{
-	c->signal_policy = policy;
-}
-
 static inline void set_channel_affinity_state(struct vmbus_channel *c,
 					      enum hv_numa_policy policy)
 {
@@ -921,16 +884,6 @@ static inline void set_channel_read_state(struct vmbus_channel *c, bool state)
 	c->batched_reading = state;
 }
 
-static inline void set_per_channel_state(struct vmbus_channel *c, void *s)
-{
-	c->per_channel_state = s;
-}
-
-static inline void *get_per_channel_state(struct vmbus_channel *c)
-{
-	return c->per_channel_state;
-}
-
 static inline void set_channel_pending_send_size(struct vmbus_channel *c,
 						 u32 size)
 {
@@ -1041,8 +994,7 @@ extern int vmbus_sendpacket_ctl(struct vmbus_channel *channel,
 				  u32 bufferLen,
 				  u64 requestid,
 				  enum vmbus_packet_type type,
-				  u32 flags,
-				  bool kick_q);
+				  u32 flags);
 
 extern int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
 					    struct hv_page_buffer pagebuffers[],
@@ -1057,8 +1009,7 @@ extern int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
 					   void *buffer,
 					   u32 bufferlen,
 					   u64 requestid,
-					   u32 flags,
-					   bool kick_q);
+					   u32 flags);
 
 extern int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
 					struct hv_multipage_buffer *mpb,
@@ -1467,9 +1418,9 @@ void vmbus_set_event(struct vmbus_channel *channel);
 
 /* Get the start of the ring buffer. */
 static inline void *
-hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info)
+hv_get_ring_buffer(const struct hv_ring_buffer_info *ring_info)
 {
-	return (void *)ring_info->ring_buffer->buffer;
+	return ring_info->ring_buffer->buffer;
 }
 
 /*
@@ -1515,8 +1466,6 @@ static inline  void hv_signal_on_read(struct vmbus_channel *channel)
 
 	if (cur_write_sz >= pending_sz)
 		vmbus_setevent(channel);
-
-	return;
 }
 
 /*


More information about the devel mailing list