Patch 'staging: ccg: include all sourced files' applied to my tree

balbi at ti.com balbi at ti.com
Mon Sep 10 12:47:59 UTC 2012


Hi,

This is a note to let you know that the patch:

	staging: ccg: include all sourced files

has been applied to my tree and can be found at:

	http://bit.ly/QeJOo4

If you have any concerns, please let me know by replying to this mail.

>From e220ff75db3c1195814c2ad5ada11f71b011d000 Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
Date: Thu, 6 Sep 2012 20:11:06 +0200
Subject: [PATCH] staging: ccg: include all sourced files

This Android gadget includes a bunch of .c files. Fixing normal gadgets
is not the real problem but this gadget is not always fixable since the
problem here are fundumential / design.

*I* wanted to get this removed but other people want to keep it even
though there were reports that Android itself is not using it. Some
poeple think that it is better to have this instead of nothing and other
argue that they need sdb and mass storage gadget. The sdb function is
not provided by ccg so I don't see the point of this. I don't see any
logical reasoning behind it and I decided that it is time for retreat.

This patch brings all dependencies of ccg into staging so I can do
whatever I want in drivers/usb/gadget without breaking ccg.

Cc: devel at driverdev.osuosl.org
Signed-off-by: Sebastian Andrzej Siewior <bigeasy at linutronix.de>
Signed-off-by: Felipe Balbi <balbi at ti.com>
---
 drivers/staging/ccg/Makefile         |    2 -
 drivers/staging/ccg/ccg.c            |   26 +-
 drivers/staging/ccg/composite.c      | 1687 ++++++++++++++++++
 drivers/staging/ccg/composite.h      |  395 +++++
 drivers/staging/ccg/config.c         |  158 ++
 drivers/staging/ccg/epautoconf.c     |  393 +++++
 drivers/staging/ccg/f_acm.c          |  814 +++++++++
 drivers/staging/ccg/f_fs.c           | 2455 ++++++++++++++++++++++++++
 drivers/staging/ccg/f_mass_storage.c | 3135 ++++++++++++++++++++++++++++++++++
 drivers/staging/ccg/f_rndis.c        |  918 ++++++++++
 drivers/staging/ccg/gadget_chips.h   |  150 ++
 drivers/staging/ccg/ndis.h           |   47 +
 drivers/staging/ccg/rndis.c          | 1175 +++++++++++++
 drivers/staging/ccg/rndis.h          |  222 +++
 drivers/staging/ccg/storage_common.c |  893 ++++++++++
 drivers/staging/ccg/u_ether.c        |  986 +++++++++++
 drivers/staging/ccg/u_ether.h        |  154 ++
 drivers/staging/ccg/u_serial.c       | 1341 +++++++++++++++
 drivers/staging/ccg/u_serial.h       |   65 +
 drivers/staging/ccg/usbstring.c      |   71 +
 20 files changed, 15072 insertions(+), 15 deletions(-)
 create mode 100644 drivers/staging/ccg/composite.c
 create mode 100644 drivers/staging/ccg/composite.h
 create mode 100644 drivers/staging/ccg/config.c
 create mode 100644 drivers/staging/ccg/epautoconf.c
 create mode 100644 drivers/staging/ccg/f_acm.c
 create mode 100644 drivers/staging/ccg/f_fs.c
 create mode 100644 drivers/staging/ccg/f_mass_storage.c
 create mode 100644 drivers/staging/ccg/f_rndis.c
 create mode 100644 drivers/staging/ccg/gadget_chips.h
 create mode 100644 drivers/staging/ccg/ndis.h
 create mode 100644 drivers/staging/ccg/rndis.c
 create mode 100644 drivers/staging/ccg/rndis.h
 create mode 100644 drivers/staging/ccg/storage_common.c
 create mode 100644 drivers/staging/ccg/u_ether.c
 create mode 100644 drivers/staging/ccg/u_ether.h
 create mode 100644 drivers/staging/ccg/u_serial.c
 create mode 100644 drivers/staging/ccg/u_serial.h
 create mode 100644 drivers/staging/ccg/usbstring.c

diff --git a/drivers/staging/ccg/Makefile b/drivers/staging/ccg/Makefile
index 693da63..814fa9d 100644
--- a/drivers/staging/ccg/Makefile
+++ b/drivers/staging/ccg/Makefile
@@ -1,4 +1,2 @@
 g_ccg-y				:= ccg.o
-ccflags-y			+= -Idrivers/usb/gadget
-
 obj-$(CONFIG_USB_G_CCG)		+= g_ccg.o
diff --git a/drivers/staging/ccg/ccg.c b/drivers/staging/ccg/ccg.c
index eadda55..80feb95 100644
--- a/drivers/staging/ccg/ccg.c
+++ b/drivers/staging/ccg/ccg.c
@@ -32,7 +32,7 @@
 #include <linux/platform_device.h>
 
 #include <linux/usb/ch9.h>
-#include <linux/usb/composite.h>
+#include "composite.h"
 #include <linux/usb/gadget.h>
 
 #include "gadget_chips.h"
@@ -44,19 +44,19 @@
  * the runtime footprint, and giving us at least some parts of what
  * a "gcc --combine ... part1.c part2.c part3.c ... " build would.
  */
-#include "../../usb/gadget/usbstring.c"
-#include "../../usb/gadget/config.c"
-#include "../../usb/gadget/epautoconf.c"
-#include "../../usb/gadget/composite.c"
-
-#include "../../usb/gadget/f_mass_storage.c"
-#include "../../usb/gadget/u_serial.c"
-#include "../../usb/gadget/f_acm.c"
+#include "usbstring.c"
+#include "config.c"
+#include "epautoconf.c"
+#include "composite.c"
+
+#include "f_mass_storage.c"
+#include "u_serial.c"
+#include "f_acm.c"
 #define USB_ETH_RNDIS y
-#include "../../usb/gadget/f_rndis.c"
-#include "../../usb/gadget/rndis.c"
-#include "../../usb/gadget/u_ether.c"
-#include "../../usb/gadget/f_fs.c"
+#include "f_rndis.c"
+#include "rndis.c"
+#include "u_ether.c"
+#include "f_fs.c"
 
 MODULE_AUTHOR("Mike Lockwood, Andrzej Pietrasiewicz");
 MODULE_DESCRIPTION("Configurable Composite USB Gadget");
diff --git a/drivers/staging/ccg/composite.c b/drivers/staging/ccg/composite.c
new file mode 100644
index 0000000..2a345f2
--- /dev/null
+++ b/drivers/staging/ccg/composite.c
@@ -0,0 +1,1687 @@
+/*
+ * composite.c - infrastructure for Composite USB Gadgets
+ *
+ * Copyright (C) 2006-2008 David Brownell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+/* #define VERBOSE_DEBUG */
+
+#include <linux/kallsyms.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/utsname.h>
+
+#include <linux/usb/composite.h>
+#include <asm/unaligned.h>
+
+/*
+ * The code in this file is utility code, used to build a gadget driver
+ * from one or more "function" drivers, one or more "configuration"
+ * objects, and a "usb_composite_driver" by gluing them together along
+ * with the relevant device-wide data.
+ */
+
+/* big enough to hold our biggest descriptor */
+#define USB_BUFSIZ	1024
+
+static struct usb_composite_driver *composite;
+
+/* Some systems will need runtime overrides for the  product identifiers
+ * published in the device descriptor, either numbers or strings or both.
+ * String parameters are in UTF-8 (superset of ASCII's 7 bit characters).
+ */
+
+static ushort idVendor;
+module_param(idVendor, ushort, 0644);
+MODULE_PARM_DESC(idVendor, "USB Vendor ID");
+
+static ushort idProduct;
+module_param(idProduct, ushort, 0644);
+MODULE_PARM_DESC(idProduct, "USB Product ID");
+
+static ushort bcdDevice;
+module_param(bcdDevice, ushort, 0644);
+MODULE_PARM_DESC(bcdDevice, "USB Device version (BCD)");
+
+static char *iManufacturer;
+module_param(iManufacturer, charp, 0644);
+MODULE_PARM_DESC(iManufacturer, "USB Manufacturer string");
+
+static char *iProduct;
+module_param(iProduct, charp, 0644);
+MODULE_PARM_DESC(iProduct, "USB Product string");
+
+static char *iSerialNumber;
+module_param(iSerialNumber, charp, 0644);
+MODULE_PARM_DESC(iSerialNumber, "SerialNumber string");
+
+static char composite_manufacturer[50];
+
+/*-------------------------------------------------------------------------*/
+/**
+ * next_ep_desc() - advance to the next EP descriptor
+ * @t: currect pointer within descriptor array
+ *
+ * Return: next EP descriptor or NULL
+ *
+ * Iterate over @t until either EP descriptor found or
+ * NULL (that indicates end of list) encountered
+ */
+static struct usb_descriptor_header**
+next_ep_desc(struct usb_descriptor_header **t)
+{
+	for (; *t; t++) {
+		if ((*t)->bDescriptorType == USB_DT_ENDPOINT)
+			return t;
+	}
+	return NULL;
+}
+
+/*
+ * for_each_ep_desc()- iterate over endpoint descriptors in the
+ *		descriptors list
+ * @start:	pointer within descriptor array.
+ * @ep_desc:	endpoint descriptor to use as the loop cursor
+ */
+#define for_each_ep_desc(start, ep_desc) \
+	for (ep_desc = next_ep_desc(start); \
+	      ep_desc; ep_desc = next_ep_desc(ep_desc+1))
+
+/**
+ * config_ep_by_speed() - configures the given endpoint
+ * according to gadget speed.
+ * @g: pointer to the gadget
+ * @f: usb function
+ * @_ep: the endpoint to configure
+ *
+ * Return: error code, 0 on success
+ *
+ * This function chooses the right descriptors for a given
+ * endpoint according to gadget speed and saves it in the
+ * endpoint desc field. If the endpoint already has a descriptor
+ * assigned to it - overwrites it with currently corresponding
+ * descriptor. The endpoint maxpacket field is updated according
+ * to the chosen descriptor.
+ * Note: the supplied function should hold all the descriptors
+ * for supported speeds
+ */
+int config_ep_by_speed(struct usb_gadget *g,
+			struct usb_function *f,
+			struct usb_ep *_ep)
+{
+	struct usb_composite_dev	*cdev = get_gadget_data(g);
+	struct usb_endpoint_descriptor *chosen_desc = NULL;
+	struct usb_descriptor_header **speed_desc = NULL;
+
+	struct usb_ss_ep_comp_descriptor *comp_desc = NULL;
+	int want_comp_desc = 0;
+
+	struct usb_descriptor_header **d_spd; /* cursor for speed desc */
+
+	if (!g || !f || !_ep)
+		return -EIO;
+
+	/* select desired speed */
+	switch (g->speed) {
+	case USB_SPEED_SUPER:
+		if (gadget_is_superspeed(g)) {
+			speed_desc = f->ss_descriptors;
+			want_comp_desc = 1;
+			break;
+		}
+		/* else: Fall trough */
+	case USB_SPEED_HIGH:
+		if (gadget_is_dualspeed(g)) {
+			speed_desc = f->hs_descriptors;
+			break;
+		}
+		/* else: fall through */
+	default:
+		speed_desc = f->descriptors;
+	}
+	/* find descriptors */
+	for_each_ep_desc(speed_desc, d_spd) {
+		chosen_desc = (struct usb_endpoint_descriptor *)*d_spd;
+		if (chosen_desc->bEndpointAddress == _ep->address)
+			goto ep_found;
+	}
+	return -EIO;
+
+ep_found:
+	/* commit results */
+	_ep->maxpacket = usb_endpoint_maxp(chosen_desc);
+	_ep->desc = chosen_desc;
+	_ep->comp_desc = NULL;
+	_ep->maxburst = 0;
+	_ep->mult = 0;
+	if (!want_comp_desc)
+		return 0;
+
+	/*
+	 * Companion descriptor should follow EP descriptor
+	 * USB 3.0 spec, #9.6.7
+	 */
+	comp_desc = (struct usb_ss_ep_comp_descriptor *)*(++d_spd);
+	if (!comp_desc ||
+	    (comp_desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP))
+		return -EIO;
+	_ep->comp_desc = comp_desc;
+	if (g->speed == USB_SPEED_SUPER) {
+		switch (usb_endpoint_type(_ep->desc)) {
+		case USB_ENDPOINT_XFER_ISOC:
+			/* mult: bits 1:0 of bmAttributes */
+			_ep->mult = comp_desc->bmAttributes & 0x3;
+		case USB_ENDPOINT_XFER_BULK:
+		case USB_ENDPOINT_XFER_INT:
+			_ep->maxburst = comp_desc->bMaxBurst + 1;
+			break;
+		default:
+			if (comp_desc->bMaxBurst != 0)
+				ERROR(cdev, "ep0 bMaxBurst must be 0\n");
+			_ep->maxburst = 1;
+			break;
+		}
+	}
+	return 0;
+}
+
+/**
+ * usb_add_function() - add a function to a configuration
+ * @config: the configuration
+ * @function: the function being added
+ * Context: single threaded during gadget setup
+ *
+ * After initialization, each configuration must have one or more
+ * functions added to it.  Adding a function involves calling its @bind()
+ * method to allocate resources such as interface and string identifiers
+ * and endpoints.
+ *
+ * This function returns the value of the function's bind(), which is
+ * zero for success else a negative errno value.
+ */
+int usb_add_function(struct usb_configuration *config,
+		struct usb_function *function)
+{
+	int	value = -EINVAL;
+
+	DBG(config->cdev, "adding '%s'/%p to config '%s'/%p\n",
+			function->name, function,
+			config->label, config);
+
+	if (!function->set_alt || !function->disable)
+		goto done;
+
+	function->config = config;
+	list_add_tail(&function->list, &config->functions);
+
+	/* REVISIT *require* function->bind? */
+	if (function->bind) {
+		value = function->bind(config, function);
+		if (value < 0) {
+			list_del(&function->list);
+			function->config = NULL;
+		}
+	} else
+		value = 0;
+
+	/* We allow configurations that don't work at both speeds.
+	 * If we run into a lowspeed Linux system, treat it the same
+	 * as full speed ... it's the function drivers that will need
+	 * to avoid bulk and ISO transfers.
+	 */
+	if (!config->fullspeed && function->descriptors)
+		config->fullspeed = true;
+	if (!config->highspeed && function->hs_descriptors)
+		config->highspeed = true;
+	if (!config->superspeed && function->ss_descriptors)
+		config->superspeed = true;
+
+done:
+	if (value)
+		DBG(config->cdev, "adding '%s'/%p --> %d\n",
+				function->name, function, value);
+	return value;
+}
+
+/**
+ * usb_function_deactivate - prevent function and gadget enumeration
+ * @function: the function that isn't yet ready to respond
+ *
+ * Blocks response of the gadget driver to host enumeration by
+ * preventing the data line pullup from being activated.  This is
+ * normally called during @bind() processing to change from the
+ * initial "ready to respond" state, or when a required resource
+ * becomes available.
+ *
+ * For example, drivers that serve as a passthrough to a userspace
+ * daemon can block enumeration unless that daemon (such as an OBEX,
+ * MTP, or print server) is ready to handle host requests.
+ *
+ * Not all systems support software control of their USB peripheral
+ * data pullups.
+ *
+ * Returns zero on success, else negative errno.
+ */
+int usb_function_deactivate(struct usb_function *function)
+{
+	struct usb_composite_dev	*cdev = function->config->cdev;
+	unsigned long			flags;
+	int				status = 0;
+
+	spin_lock_irqsave(&cdev->lock, flags);
+
+	if (cdev->deactivations == 0)
+		status = usb_gadget_disconnect(cdev->gadget);
+	if (status == 0)
+		cdev->deactivations++;
+
+	spin_unlock_irqrestore(&cdev->lock, flags);
+	return status;
+}
+
+/**
+ * usb_function_activate - allow function and gadget enumeration
+ * @function: function on which usb_function_activate() was called
+ *
+ * Reverses effect of usb_function_deactivate().  If no more functions
+ * are delaying their activation, the gadget driver will respond to
+ * host enumeration procedures.
+ *
+ * Returns zero on success, else negative errno.
+ */
+int usb_function_activate(struct usb_function *function)
+{
+	struct usb_composite_dev	*cdev = function->config->cdev;
+	unsigned long			flags;
+	int				status = 0;
+
+	spin_lock_irqsave(&cdev->lock, flags);
+
+	if (WARN_ON(cdev->deactivations == 0))
+		status = -EINVAL;
+	else {
+		cdev->deactivations--;
+		if (cdev->deactivations == 0)
+			status = usb_gadget_connect(cdev->gadget);
+	}
+
+	spin_unlock_irqrestore(&cdev->lock, flags);
+	return status;
+}
+
+/**
+ * usb_interface_id() - allocate an unused interface ID
+ * @config: configuration associated with the interface
+ * @function: function handling the interface
+ * Context: single threaded during gadget setup
+ *
+ * usb_interface_id() is called from usb_function.bind() callbacks to
+ * allocate new interface IDs.  The function driver will then store that
+ * ID in interface, association, CDC union, and other descriptors.  It
+ * will also handle any control requests targeted at that interface,
+ * particularly changing its altsetting via set_alt().  There may
+ * also be class-specific or vendor-specific requests to handle.
+ *
+ * All interface identifier should be allocated using this routine, to
+ * ensure that for example different functions don't wrongly assign
+ * different meanings to the same identifier.  Note that since interface
+ * identifiers are configuration-specific, functions used in more than
+ * one configuration (or more than once in a given configuration) need
+ * multiple versions of the relevant descriptors.
+ *
+ * Returns the interface ID which was allocated; or -ENODEV if no
+ * more interface IDs can be allocated.
+ */
+int usb_interface_id(struct usb_configuration *config,
+		struct usb_function *function)
+{
+	unsigned id = config->next_interface_id;
+
+	if (id < MAX_CONFIG_INTERFACES) {
+		config->interface[id] = function;
+		config->next_interface_id = id + 1;
+		return id;
+	}
+	return -ENODEV;
+}
+
+static int config_buf(struct usb_configuration *config,
+		enum usb_device_speed speed, void *buf, u8 type)
+{
+	struct usb_config_descriptor	*c = buf;
+	void				*next = buf + USB_DT_CONFIG_SIZE;
+	int				len = USB_BUFSIZ - USB_DT_CONFIG_SIZE;
+	struct usb_function		*f;
+	int				status;
+
+	/* write the config descriptor */
+	c = buf;
+	c->bLength = USB_DT_CONFIG_SIZE;
+	c->bDescriptorType = type;
+	/* wTotalLength is written later */
+	c->bNumInterfaces = config->next_interface_id;
+	c->bConfigurationValue = config->bConfigurationValue;
+	c->iConfiguration = config->iConfiguration;
+	c->bmAttributes = USB_CONFIG_ATT_ONE | config->bmAttributes;
+	c->bMaxPower = config->bMaxPower ? : (CONFIG_USB_GADGET_VBUS_DRAW / 2);
+
+	/* There may be e.g. OTG descriptors */
+	if (config->descriptors) {
+		status = usb_descriptor_fillbuf(next, len,
+				config->descriptors);
+		if (status < 0)
+			return status;
+		len -= status;
+		next += status;
+	}
+
+	/* add each function's descriptors */
+	list_for_each_entry(f, &config->functions, list) {
+		struct usb_descriptor_header **descriptors;
+
+		switch (speed) {
+		case USB_SPEED_SUPER:
+			descriptors = f->ss_descriptors;
+			break;
+		case USB_SPEED_HIGH:
+			descriptors = f->hs_descriptors;
+			break;
+		default:
+			descriptors = f->descriptors;
+		}
+
+		if (!descriptors)
+			continue;
+		status = usb_descriptor_fillbuf(next, len,
+			(const struct usb_descriptor_header **) descriptors);
+		if (status < 0)
+			return status;
+		len -= status;
+		next += status;
+	}
+
+	len = next - buf;
+	c->wTotalLength = cpu_to_le16(len);
+	return len;
+}
+
+static int config_desc(struct usb_composite_dev *cdev, unsigned w_value)
+{
+	struct usb_gadget		*gadget = cdev->gadget;
+	struct usb_configuration	*c;
+	u8				type = w_value >> 8;
+	enum usb_device_speed		speed = USB_SPEED_UNKNOWN;
+
+	if (gadget->speed == USB_SPEED_SUPER)
+		speed = gadget->speed;
+	else if (gadget_is_dualspeed(gadget)) {
+		int	hs = 0;
+		if (gadget->speed == USB_SPEED_HIGH)
+			hs = 1;
+		if (type == USB_DT_OTHER_SPEED_CONFIG)
+			hs = !hs;
+		if (hs)
+			speed = USB_SPEED_HIGH;
+
+	}
+
+	/* This is a lookup by config *INDEX* */
+	w_value &= 0xff;
+	list_for_each_entry(c, &cdev->configs, list) {
+		/* ignore configs that won't work at this speed */
+		switch (speed) {
+		case USB_SPEED_SUPER:
+			if (!c->superspeed)
+				continue;
+			break;
+		case USB_SPEED_HIGH:
+			if (!c->highspeed)
+				continue;
+			break;
+		default:
+			if (!c->fullspeed)
+				continue;
+		}
+
+		if (w_value == 0)
+			return config_buf(c, speed, cdev->req->buf, type);
+		w_value--;
+	}
+	return -EINVAL;
+}
+
+static int count_configs(struct usb_composite_dev *cdev, unsigned type)
+{
+	struct usb_gadget		*gadget = cdev->gadget;
+	struct usb_configuration	*c;
+	unsigned			count = 0;
+	int				hs = 0;
+	int				ss = 0;
+
+	if (gadget_is_dualspeed(gadget)) {
+		if (gadget->speed == USB_SPEED_HIGH)
+			hs = 1;
+		if (gadget->speed == USB_SPEED_SUPER)
+			ss = 1;
+		if (type == USB_DT_DEVICE_QUALIFIER)
+			hs = !hs;
+	}
+	list_for_each_entry(c, &cdev->configs, list) {
+		/* ignore configs that won't work at this speed */
+		if (ss) {
+			if (!c->superspeed)
+				continue;
+		} else if (hs) {
+			if (!c->highspeed)
+				continue;
+		} else {
+			if (!c->fullspeed)
+				continue;
+		}
+		count++;
+	}
+	return count;
+}
+
+/**
+ * bos_desc() - prepares the BOS descriptor.
+ * @cdev: pointer to usb_composite device to generate the bos
+ *	descriptor for
+ *
+ * This function generates the BOS (Binary Device Object)
+ * descriptor and its device capabilities descriptors. The BOS
+ * descriptor should be supported by a SuperSpeed device.
+ */
+static int bos_desc(struct usb_composite_dev *cdev)
+{
+	struct usb_ext_cap_descriptor	*usb_ext;
+	struct usb_ss_cap_descriptor	*ss_cap;
+	struct usb_dcd_config_params	dcd_config_params;
+	struct usb_bos_descriptor	*bos = cdev->req->buf;
+
+	bos->bLength = USB_DT_BOS_SIZE;
+	bos->bDescriptorType = USB_DT_BOS;
+
+	bos->wTotalLength = cpu_to_le16(USB_DT_BOS_SIZE);
+	bos->bNumDeviceCaps = 0;
+
+	/*
+	 * A SuperSpeed device shall include the USB2.0 extension descriptor
+	 * and shall support LPM when operating in USB2.0 HS mode.
+	 */
+	usb_ext = cdev->req->buf + le16_to_cpu(bos->wTotalLength);
+	bos->bNumDeviceCaps++;
+	le16_add_cpu(&bos->wTotalLength, USB_DT_USB_EXT_CAP_SIZE);
+	usb_ext->bLength = USB_DT_USB_EXT_CAP_SIZE;
+	usb_ext->bDescriptorType = USB_DT_DEVICE_CAPABILITY;
+	usb_ext->bDevCapabilityType = USB_CAP_TYPE_EXT;
+	usb_ext->bmAttributes = cpu_to_le32(USB_LPM_SUPPORT);
+
+	/*
+	 * The Superspeed USB Capability descriptor shall be implemented by all
+	 * SuperSpeed devices.
+	 */
+	ss_cap = cdev->req->buf + le16_to_cpu(bos->wTotalLength);
+	bos->bNumDeviceCaps++;
+	le16_add_cpu(&bos->wTotalLength, USB_DT_USB_SS_CAP_SIZE);
+	ss_cap->bLength = USB_DT_USB_SS_CAP_SIZE;
+	ss_cap->bDescriptorType = USB_DT_DEVICE_CAPABILITY;
+	ss_cap->bDevCapabilityType = USB_SS_CAP_TYPE;
+	ss_cap->bmAttributes = 0; /* LTM is not supported yet */
+	ss_cap->wSpeedSupported = cpu_to_le16(USB_LOW_SPEED_OPERATION |
+				USB_FULL_SPEED_OPERATION |
+				USB_HIGH_SPEED_OPERATION |
+				USB_5GBPS_OPERATION);
+	ss_cap->bFunctionalitySupport = USB_LOW_SPEED_OPERATION;
+
+	/* Get Controller configuration */
+	if (cdev->gadget->ops->get_config_params)
+		cdev->gadget->ops->get_config_params(&dcd_config_params);
+	else {
+		dcd_config_params.bU1devExitLat = USB_DEFAULT_U1_DEV_EXIT_LAT;
+		dcd_config_params.bU2DevExitLat =
+			cpu_to_le16(USB_DEFAULT_U2_DEV_EXIT_LAT);
+	}
+	ss_cap->bU1devExitLat = dcd_config_params.bU1devExitLat;
+	ss_cap->bU2DevExitLat = dcd_config_params.bU2DevExitLat;
+
+	return le16_to_cpu(bos->wTotalLength);
+}
+
+static void device_qual(struct usb_composite_dev *cdev)
+{
+	struct usb_qualifier_descriptor	*qual = cdev->req->buf;
+
+	qual->bLength = sizeof(*qual);
+	qual->bDescriptorType = USB_DT_DEVICE_QUALIFIER;
+	/* POLICY: same bcdUSB and device type info at both speeds */
+	qual->bcdUSB = cdev->desc.bcdUSB;
+	qual->bDeviceClass = cdev->desc.bDeviceClass;
+	qual->bDeviceSubClass = cdev->desc.bDeviceSubClass;
+	qual->bDeviceProtocol = cdev->desc.bDeviceProtocol;
+	/* ASSUME same EP0 fifo size at both speeds */
+	qual->bMaxPacketSize0 = cdev->gadget->ep0->maxpacket;
+	qual->bNumConfigurations = count_configs(cdev, USB_DT_DEVICE_QUALIFIER);
+	qual->bRESERVED = 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void reset_config(struct usb_composite_dev *cdev)
+{
+	struct usb_function		*f;
+
+	DBG(cdev, "reset config\n");
+
+	list_for_each_entry(f, &cdev->config->functions, list) {
+		if (f->disable)
+			f->disable(f);
+
+		bitmap_zero(f->endpoints, 32);
+	}
+	cdev->config = NULL;
+}
+
+static int set_config(struct usb_composite_dev *cdev,
+		const struct usb_ctrlrequest *ctrl, unsigned number)
+{
+	struct usb_gadget	*gadget = cdev->gadget;
+	struct usb_configuration *c = NULL;
+	int			result = -EINVAL;
+	unsigned		power = gadget_is_otg(gadget) ? 8 : 100;
+	int			tmp;
+
+	if (number) {
+		list_for_each_entry(c, &cdev->configs, list) {
+			if (c->bConfigurationValue == number) {
+				/*
+				 * We disable the FDs of the previous
+				 * configuration only if the new configuration
+				 * is a valid one
+				 */
+				if (cdev->config)
+					reset_config(cdev);
+				result = 0;
+				break;
+			}
+		}
+		if (result < 0)
+			goto done;
+	} else { /* Zero configuration value - need to reset the config */
+		if (cdev->config)
+			reset_config(cdev);
+		result = 0;
+	}
+
+	INFO(cdev, "%s config #%d: %s\n",
+	     usb_speed_string(gadget->speed),
+	     number, c ? c->label : "unconfigured");
+
+	if (!c)
+		goto done;
+
+	cdev->config = c;
+
+	/* Initialize all interfaces by setting them to altsetting zero. */
+	for (tmp = 0; tmp < MAX_CONFIG_INTERFACES; tmp++) {
+		struct usb_function	*f = c->interface[tmp];
+		struct usb_descriptor_header **descriptors;
+
+		if (!f)
+			break;
+
+		/*
+		 * Record which endpoints are used by the function. This is used
+		 * to dispatch control requests targeted at that endpoint to the
+		 * function's setup callback instead of the current
+		 * configuration's setup callback.
+		 */
+		switch (gadget->speed) {
+		case USB_SPEED_SUPER:
+			descriptors = f->ss_descriptors;
+			break;
+		case USB_SPEED_HIGH:
+			descriptors = f->hs_descriptors;
+			break;
+		default:
+			descriptors = f->descriptors;
+		}
+
+		for (; *descriptors; ++descriptors) {
+			struct usb_endpoint_descriptor *ep;
+			int addr;
+
+			if ((*descriptors)->bDescriptorType != USB_DT_ENDPOINT)
+				continue;
+
+			ep = (struct usb_endpoint_descriptor *)*descriptors;
+			addr = ((ep->bEndpointAddress & 0x80) >> 3)
+			     |  (ep->bEndpointAddress & 0x0f);
+			set_bit(addr, f->endpoints);
+		}
+
+		result = f->set_alt(f, tmp, 0);
+		if (result < 0) {
+			DBG(cdev, "interface %d (%s/%p) alt 0 --> %d\n",
+					tmp, f->name, f, result);
+
+			reset_config(cdev);
+			goto done;
+		}
+
+		if (result == USB_GADGET_DELAYED_STATUS) {
+			DBG(cdev,
+			 "%s: interface %d (%s) requested delayed status\n",
+					__func__, tmp, f->name);
+			cdev->delayed_status++;
+			DBG(cdev, "delayed_status count %d\n",
+					cdev->delayed_status);
+		}
+	}
+
+	/* when we return, be sure our power usage is valid */
+	power = c->bMaxPower ? (2 * c->bMaxPower) : CONFIG_USB_GADGET_VBUS_DRAW;
+done:
+	usb_gadget_vbus_draw(gadget, power);
+	if (result >= 0 && cdev->delayed_status)
+		result = USB_GADGET_DELAYED_STATUS;
+	return result;
+}
+
+/**
+ * usb_add_config() - add a configuration to a device.
+ * @cdev: wraps the USB gadget
+ * @config: the configuration, with bConfigurationValue assigned
+ * @bind: the configuration's bind function
+ * Context: single threaded during gadget setup
+ *
+ * One of the main tasks of a composite @bind() routine is to
+ * add each of the configurations it supports, using this routine.
+ *
+ * This function returns the value of the configuration's @bind(), which
+ * is zero for success else a negative errno value.  Binding configurations
+ * assigns global resources including string IDs, and per-configuration
+ * resources such as interface IDs and endpoints.
+ */
+int usb_add_config(struct usb_composite_dev *cdev,
+		struct usb_configuration *config,
+		int (*bind)(struct usb_configuration *))
+{
+	int				status = -EINVAL;
+	struct usb_configuration	*c;
+
+	DBG(cdev, "adding config #%u '%s'/%p\n",
+			config->bConfigurationValue,
+			config->label, config);
+
+	if (!config->bConfigurationValue || !bind)
+		goto done;
+
+	/* Prevent duplicate configuration identifiers */
+	list_for_each_entry(c, &cdev->configs, list) {
+		if (c->bConfigurationValue == config->bConfigurationValue) {
+			status = -EBUSY;
+			goto done;
+		}
+	}
+
+	config->cdev = cdev;
+	list_add_tail(&config->list, &cdev->configs);
+
+	INIT_LIST_HEAD(&config->functions);
+	config->next_interface_id = 0;
+	memset(config->interface, 0, sizeof(config->interface));
+
+	status = bind(config);
+	if (status < 0) {
+		while (!list_empty(&config->functions)) {
+			struct usb_function		*f;
+
+			f = list_first_entry(&config->functions,
+					struct usb_function, list);
+			list_del(&f->list);
+			if (f->unbind) {
+				DBG(cdev, "unbind function '%s'/%p\n",
+					f->name, f);
+				f->unbind(config, f);
+				/* may free memory for "f" */
+			}
+		}
+		list_del(&config->list);
+		config->cdev = NULL;
+	} else {
+		unsigned	i;
+
+		DBG(cdev, "cfg %d/%p speeds:%s%s%s\n",
+			config->bConfigurationValue, config,
+			config->superspeed ? " super" : "",
+			config->highspeed ? " high" : "",
+			config->fullspeed
+				? (gadget_is_dualspeed(cdev->gadget)
+					? " full"
+					: " full/low")
+				: "");
+
+		for (i = 0; i < MAX_CONFIG_INTERFACES; i++) {
+			struct usb_function	*f = config->interface[i];
+
+			if (!f)
+				continue;
+			DBG(cdev, "  interface %d = %s/%p\n",
+				i, f->name, f);
+		}
+	}
+
+	/* set_alt(), or next bind(), sets up
+	 * ep->driver_data as needed.
+	 */
+	usb_ep_autoconfig_reset(cdev->gadget);
+
+done:
+	if (status)
+		DBG(cdev, "added config '%s'/%u --> %d\n", config->label,
+				config->bConfigurationValue, status);
+	return status;
+}
+
+static void remove_config(struct usb_composite_dev *cdev,
+			      struct usb_configuration *config)
+{
+	while (!list_empty(&config->functions)) {
+		struct usb_function		*f;
+
+		f = list_first_entry(&config->functions,
+				struct usb_function, list);
+		list_del(&f->list);
+		if (f->unbind) {
+			DBG(cdev, "unbind function '%s'/%p\n", f->name, f);
+			f->unbind(config, f);
+			/* may free memory for "f" */
+		}
+	}
+	list_del(&config->list);
+	if (config->unbind) {
+		DBG(cdev, "unbind config '%s'/%p\n", config->label, config);
+		config->unbind(config);
+			/* may free memory for "c" */
+	}
+}
+
+/**
+ * usb_remove_config() - remove a configuration from a device.
+ * @cdev: wraps the USB gadget
+ * @config: the configuration
+ *
+ * Drivers must call usb_gadget_disconnect before calling this function
+ * to disconnect the device from the host and make sure the host will not
+ * try to enumerate the device while we are changing the config list.
+ */
+void usb_remove_config(struct usb_composite_dev *cdev,
+		      struct usb_configuration *config)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&cdev->lock, flags);
+
+	if (cdev->config == config)
+		reset_config(cdev);
+
+	spin_unlock_irqrestore(&cdev->lock, flags);
+
+	remove_config(cdev, config);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* We support strings in multiple languages ... string descriptor zero
+ * says which languages are supported.  The typical case will be that
+ * only one language (probably English) is used, with I18N handled on
+ * the host side.
+ */
+
+static void collect_langs(struct usb_gadget_strings **sp, __le16 *buf)
+{
+	const struct usb_gadget_strings	*s;
+	__le16				language;
+	__le16				*tmp;
+
+	while (*sp) {
+		s = *sp;
+		language = cpu_to_le16(s->language);
+		for (tmp = buf; *tmp && tmp < &buf[126]; tmp++) {
+			if (*tmp == language)
+				goto repeat;
+		}
+		*tmp++ = language;
+repeat:
+		sp++;
+	}
+}
+
+static int lookup_string(
+	struct usb_gadget_strings	**sp,
+	void				*buf,
+	u16				language,
+	int				id
+)
+{
+	struct usb_gadget_strings	*s;
+	int				value;
+
+	while (*sp) {
+		s = *sp++;
+		if (s->language != language)
+			continue;
+		value = usb_gadget_get_string(s, id, buf);
+		if (value > 0)
+			return value;
+	}
+	return -EINVAL;
+}
+
+static int get_string(struct usb_composite_dev *cdev,
+		void *buf, u16 language, int id)
+{
+	struct usb_configuration	*c;
+	struct usb_function		*f;
+	int				len;
+	const char			*str;
+
+	/* Yes, not only is USB's I18N support probably more than most
+	 * folk will ever care about ... also, it's all supported here.
+	 * (Except for UTF8 support for Unicode's "Astral Planes".)
+	 */
+
+	/* 0 == report all available language codes */
+	if (id == 0) {
+		struct usb_string_descriptor	*s = buf;
+		struct usb_gadget_strings	**sp;
+
+		memset(s, 0, 256);
+		s->bDescriptorType = USB_DT_STRING;
+
+		sp = composite->strings;
+		if (sp)
+			collect_langs(sp, s->wData);
+
+		list_for_each_entry(c, &cdev->configs, list) {
+			sp = c->strings;
+			if (sp)
+				collect_langs(sp, s->wData);
+
+			list_for_each_entry(f, &c->functions, list) {
+				sp = f->strings;
+				if (sp)
+					collect_langs(sp, s->wData);
+			}
+		}
+
+		for (len = 0; len <= 126 && s->wData[len]; len++)
+			continue;
+		if (!len)
+			return -EINVAL;
+
+		s->bLength = 2 * (len + 1);
+		return s->bLength;
+	}
+
+	/* Otherwise, look up and return a specified string.  First
+	 * check if the string has not been overridden.
+	 */
+	if (cdev->manufacturer_override == id)
+		str = iManufacturer ?: composite->iManufacturer ?:
+			composite_manufacturer;
+	else if (cdev->product_override == id)
+		str = iProduct ?: composite->iProduct;
+	else if (cdev->serial_override == id)
+		str = iSerialNumber ?: composite->iSerialNumber;
+	else
+		str = NULL;
+	if (str) {
+		struct usb_gadget_strings strings = {
+			.language = language,
+			.strings  = &(struct usb_string) { 0xff, str }
+		};
+		return usb_gadget_get_string(&strings, 0xff, buf);
+	}
+
+	/* String IDs are device-scoped, so we look up each string
+	 * table we're told about.  These lookups are infrequent;
+	 * simpler-is-better here.
+	 */
+	if (composite->strings) {
+		len = lookup_string(composite->strings, buf, language, id);
+		if (len > 0)
+			return len;
+	}
+	list_for_each_entry(c, &cdev->configs, list) {
+		if (c->strings) {
+			len = lookup_string(c->strings, buf, language, id);
+			if (len > 0)
+				return len;
+		}
+		list_for_each_entry(f, &c->functions, list) {
+			if (!f->strings)
+				continue;
+			len = lookup_string(f->strings, buf, language, id);
+			if (len > 0)
+				return len;
+		}
+	}
+	return -EINVAL;
+}
+
+/**
+ * usb_string_id() - allocate an unused string ID
+ * @cdev: the device whose string descriptor IDs are being allocated
+ * Context: single threaded during gadget setup
+ *
+ * @usb_string_id() is called from bind() callbacks to allocate
+ * string IDs.  Drivers for functions, configurations, or gadgets will
+ * then store that ID in the appropriate descriptors and string table.
+ *
+ * All string identifier should be allocated using this,
+ * @usb_string_ids_tab() or @usb_string_ids_n() routine, to ensure
+ * that for example different functions don't wrongly assign different
+ * meanings to the same identifier.
+ */
+int usb_string_id(struct usb_composite_dev *cdev)
+{
+	if (cdev->next_string_id < 254) {
+		/* string id 0 is reserved by USB spec for list of
+		 * supported languages */
+		/* 255 reserved as well? -- mina86 */
+		cdev->next_string_id++;
+		return cdev->next_string_id;
+	}
+	return -ENODEV;
+}
+
+/**
+ * usb_string_ids() - allocate unused string IDs in batch
+ * @cdev: the device whose string descriptor IDs are being allocated
+ * @str: an array of usb_string objects to assign numbers to
+ * Context: single threaded during gadget setup
+ *
+ * @usb_string_ids() is called from bind() callbacks to allocate
+ * string IDs.  Drivers for functions, configurations, or gadgets will
+ * then copy IDs from the string table to the appropriate descriptors
+ * and string table for other languages.
+ *
+ * All string identifier should be allocated using this,
+ * @usb_string_id() or @usb_string_ids_n() routine, to ensure that for
+ * example different functions don't wrongly assign different meanings
+ * to the same identifier.
+ */
+int usb_string_ids_tab(struct usb_composite_dev *cdev, struct usb_string *str)
+{
+	int next = cdev->next_string_id;
+
+	for (; str->s; ++str) {
+		if (unlikely(next >= 254))
+			return -ENODEV;
+		str->id = ++next;
+	}
+
+	cdev->next_string_id = next;
+
+	return 0;
+}
+
+/**
+ * usb_string_ids_n() - allocate unused string IDs in batch
+ * @c: the device whose string descriptor IDs are being allocated
+ * @n: number of string IDs to allocate
+ * Context: single threaded during gadget setup
+ *
+ * Returns the first requested ID.  This ID and next @n-1 IDs are now
+ * valid IDs.  At least provided that @n is non-zero because if it
+ * is, returns last requested ID which is now very useful information.
+ *
+ * @usb_string_ids_n() is called from bind() callbacks to allocate
+ * string IDs.  Drivers for functions, configurations, or gadgets will
+ * then store that ID in the appropriate descriptors and string table.
+ *
+ * All string identifier should be allocated using this,
+ * @usb_string_id() or @usb_string_ids_n() routine, to ensure that for
+ * example different functions don't wrongly assign different meanings
+ * to the same identifier.
+ */
+int usb_string_ids_n(struct usb_composite_dev *c, unsigned n)
+{
+	unsigned next = c->next_string_id;
+	if (unlikely(n > 254 || (unsigned)next + n > 254))
+		return -ENODEV;
+	c->next_string_id += n;
+	return next + 1;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static void composite_setup_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	if (req->status || req->actual != req->length)
+		DBG((struct usb_composite_dev *) ep->driver_data,
+				"setup complete --> %d, %d/%d\n",
+				req->status, req->actual, req->length);
+}
+
+/*
+ * The setup() callback implements all the ep0 functionality that's
+ * not handled lower down, in hardware or the hardware driver(like
+ * device and endpoint feature flags, and their status).  It's all
+ * housekeeping for the gadget function we're implementing.  Most of
+ * the work is in config and function specific setup.
+ */
+static int
+composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
+{
+	struct usb_composite_dev	*cdev = get_gadget_data(gadget);
+	struct usb_request		*req = cdev->req;
+	int				value = -EOPNOTSUPP;
+	int				status = 0;
+	u16				w_index = le16_to_cpu(ctrl->wIndex);
+	u8				intf = w_index & 0xFF;
+	u16				w_value = le16_to_cpu(ctrl->wValue);
+	u16				w_length = le16_to_cpu(ctrl->wLength);
+	struct usb_function		*f = NULL;
+	u8				endp;
+
+	/* partial re-init of the response message; the function or the
+	 * gadget might need to intercept e.g. a control-OUT completion
+	 * when we delegate to it.
+	 */
+	req->zero = 0;
+	req->complete = composite_setup_complete;
+	req->length = 0;
+	gadget->ep0->driver_data = cdev;
+
+	switch (ctrl->bRequest) {
+
+	/* we handle all standard USB descriptors */
+	case USB_REQ_GET_DESCRIPTOR:
+		if (ctrl->bRequestType != USB_DIR_IN)
+			goto unknown;
+		switch (w_value >> 8) {
+
+		case USB_DT_DEVICE:
+			cdev->desc.bNumConfigurations =
+				count_configs(cdev, USB_DT_DEVICE);
+			cdev->desc.bMaxPacketSize0 =
+				cdev->gadget->ep0->maxpacket;
+			if (gadget_is_superspeed(gadget)) {
+				if (gadget->speed >= USB_SPEED_SUPER) {
+					cdev->desc.bcdUSB = cpu_to_le16(0x0300);
+					cdev->desc.bMaxPacketSize0 = 9;
+				} else {
+					cdev->desc.bcdUSB = cpu_to_le16(0x0210);
+				}
+			}
+
+			value = min(w_length, (u16) sizeof cdev->desc);
+			memcpy(req->buf, &cdev->desc, value);
+			break;
+		case USB_DT_DEVICE_QUALIFIER:
+			if (!gadget_is_dualspeed(gadget) ||
+			    gadget->speed >= USB_SPEED_SUPER)
+				break;
+			device_qual(cdev);
+			value = min_t(int, w_length,
+				sizeof(struct usb_qualifier_descriptor));
+			break;
+		case USB_DT_OTHER_SPEED_CONFIG:
+			if (!gadget_is_dualspeed(gadget) ||
+			    gadget->speed >= USB_SPEED_SUPER)
+				break;
+			/* FALLTHROUGH */
+		case USB_DT_CONFIG:
+			value = config_desc(cdev, w_value);
+			if (value >= 0)
+				value = min(w_length, (u16) value);
+			break;
+		case USB_DT_STRING:
+			value = get_string(cdev, req->buf,
+					w_index, w_value & 0xff);
+			if (value >= 0)
+				value = min(w_length, (u16) value);
+			break;
+		case USB_DT_BOS:
+			if (gadget_is_superspeed(gadget)) {
+				value = bos_desc(cdev);
+				value = min(w_length, (u16) value);
+			}
+			break;
+		}
+		break;
+
+	/* any number of configs can work */
+	case USB_REQ_SET_CONFIGURATION:
+		if (ctrl->bRequestType != 0)
+			goto unknown;
+		if (gadget_is_otg(gadget)) {
+			if (gadget->a_hnp_support)
+				DBG(cdev, "HNP available\n");
+			else if (gadget->a_alt_hnp_support)
+				DBG(cdev, "HNP on another port\n");
+			else
+				VDBG(cdev, "HNP inactive\n");
+		}
+		spin_lock(&cdev->lock);
+		value = set_config(cdev, ctrl, w_value);
+		spin_unlock(&cdev->lock);
+		break;
+	case USB_REQ_GET_CONFIGURATION:
+		if (ctrl->bRequestType != USB_DIR_IN)
+			goto unknown;
+		if (cdev->config)
+			*(u8 *)req->buf = cdev->config->bConfigurationValue;
+		else
+			*(u8 *)req->buf = 0;
+		value = min(w_length, (u16) 1);
+		break;
+
+	/* function drivers must handle get/set altsetting; if there's
+	 * no get() method, we know only altsetting zero works.
+	 */
+	case USB_REQ_SET_INTERFACE:
+		if (ctrl->bRequestType != USB_RECIP_INTERFACE)
+			goto unknown;
+		if (!cdev->config || intf >= MAX_CONFIG_INTERFACES)
+			break;
+		f = cdev->config->interface[intf];
+		if (!f)
+			break;
+		if (w_value && !f->set_alt)
+			break;
+		value = f->set_alt(f, w_index, w_value);
+		if (value == USB_GADGET_DELAYED_STATUS) {
+			DBG(cdev,
+			 "%s: interface %d (%s) requested delayed status\n",
+					__func__, intf, f->name);
+			cdev->delayed_status++;
+			DBG(cdev, "delayed_status count %d\n",
+					cdev->delayed_status);
+		}
+		break;
+	case USB_REQ_GET_INTERFACE:
+		if (ctrl->bRequestType != (USB_DIR_IN|USB_RECIP_INTERFACE))
+			goto unknown;
+		if (!cdev->config || intf >= MAX_CONFIG_INTERFACES)
+			break;
+		f = cdev->config->interface[intf];
+		if (!f)
+			break;
+		/* lots of interfaces only need altsetting zero... */
+		value = f->get_alt ? f->get_alt(f, w_index) : 0;
+		if (value < 0)
+			break;
+		*((u8 *)req->buf) = value;
+		value = min(w_length, (u16) 1);
+		break;
+
+	/*
+	 * USB 3.0 additions:
+	 * Function driver should handle get_status request. If such cb
+	 * wasn't supplied we respond with default value = 0
+	 * Note: function driver should supply such cb only for the first
+	 * interface of the function
+	 */
+	case USB_REQ_GET_STATUS:
+		if (!gadget_is_superspeed(gadget))
+			goto unknown;
+		if (ctrl->bRequestType != (USB_DIR_IN | USB_RECIP_INTERFACE))
+			goto unknown;
+		value = 2;	/* This is the length of the get_status reply */
+		put_unaligned_le16(0, req->buf);
+		if (!cdev->config || intf >= MAX_CONFIG_INTERFACES)
+			break;
+		f = cdev->config->interface[intf];
+		if (!f)
+			break;
+		status = f->get_status ? f->get_status(f) : 0;
+		if (status < 0)
+			break;
+		put_unaligned_le16(status & 0x0000ffff, req->buf);
+		break;
+	/*
+	 * Function drivers should handle SetFeature/ClearFeature
+	 * (FUNCTION_SUSPEND) request. function_suspend cb should be supplied
+	 * only for the first interface of the function
+	 */
+	case USB_REQ_CLEAR_FEATURE:
+	case USB_REQ_SET_FEATURE:
+		if (!gadget_is_superspeed(gadget))
+			goto unknown;
+		if (ctrl->bRequestType != (USB_DIR_OUT | USB_RECIP_INTERFACE))
+			goto unknown;
+		switch (w_value) {
+		case USB_INTRF_FUNC_SUSPEND:
+			if (!cdev->config || intf >= MAX_CONFIG_INTERFACES)
+				break;
+			f = cdev->config->interface[intf];
+			if (!f)
+				break;
+			value = 0;
+			if (f->func_suspend)
+				value = f->func_suspend(f, w_index >> 8);
+			if (value < 0) {
+				ERROR(cdev,
+				      "func_suspend() returned error %d\n",
+				      value);
+				value = 0;
+			}
+			break;
+		}
+		break;
+	default:
+unknown:
+		VDBG(cdev,
+			"non-core control req%02x.%02x v%04x i%04x l%d\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+
+		/* functions always handle their interfaces and endpoints...
+		 * punt other recipients (other, WUSB, ...) to the current
+		 * configuration code.
+		 *
+		 * REVISIT it could make sense to let the composite device
+		 * take such requests too, if that's ever needed:  to work
+		 * in config 0, etc.
+		 */
+		switch (ctrl->bRequestType & USB_RECIP_MASK) {
+		case USB_RECIP_INTERFACE:
+			if (!cdev->config || intf >= MAX_CONFIG_INTERFACES)
+				break;
+			f = cdev->config->interface[intf];
+			break;
+
+		case USB_RECIP_ENDPOINT:
+			endp = ((w_index & 0x80) >> 3) | (w_index & 0x0f);
+			list_for_each_entry(f, &cdev->config->functions, list) {
+				if (test_bit(endp, f->endpoints))
+					break;
+			}
+			if (&f->list == &cdev->config->functions)
+				f = NULL;
+			break;
+		}
+
+		if (f && f->setup)
+			value = f->setup(f, ctrl);
+		else {
+			struct usb_configuration	*c;
+
+			c = cdev->config;
+			if (c && c->setup)
+				value = c->setup(c, ctrl);
+		}
+
+		goto done;
+	}
+
+	/* respond with data transfer before status phase? */
+	if (value >= 0 && value != USB_GADGET_DELAYED_STATUS) {
+		req->length = value;
+		req->zero = value < w_length;
+		value = usb_ep_queue(gadget->ep0, req, GFP_ATOMIC);
+		if (value < 0) {
+			DBG(cdev, "ep_queue --> %d\n", value);
+			req->status = 0;
+			composite_setup_complete(gadget->ep0, req);
+		}
+	} else if (value == USB_GADGET_DELAYED_STATUS && w_length != 0) {
+		WARN(cdev,
+			"%s: Delayed status not supported for w_length != 0",
+			__func__);
+	}
+
+done:
+	/* device either stalls (value < 0) or reports success */
+	return value;
+}
+
+static void composite_disconnect(struct usb_gadget *gadget)
+{
+	struct usb_composite_dev	*cdev = get_gadget_data(gadget);
+	unsigned long			flags;
+
+	/* REVISIT:  should we have config and device level
+	 * disconnect callbacks?
+	 */
+	spin_lock_irqsave(&cdev->lock, flags);
+	if (cdev->config)
+		reset_config(cdev);
+	if (composite->disconnect)
+		composite->disconnect(cdev);
+	spin_unlock_irqrestore(&cdev->lock, flags);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static ssize_t composite_show_suspended(struct device *dev,
+					struct device_attribute *attr,
+					char *buf)
+{
+	struct usb_gadget *gadget = dev_to_usb_gadget(dev);
+	struct usb_composite_dev *cdev = get_gadget_data(gadget);
+
+	return sprintf(buf, "%d\n", cdev->suspended);
+}
+
+static DEVICE_ATTR(suspended, 0444, composite_show_suspended, NULL);
+
+static void
+composite_unbind(struct usb_gadget *gadget)
+{
+	struct usb_composite_dev	*cdev = get_gadget_data(gadget);
+
+	/* composite_disconnect() must already have been called
+	 * by the underlying peripheral controller driver!
+	 * so there's no i/o concurrency that could affect the
+	 * state protected by cdev->lock.
+	 */
+	WARN_ON(cdev->config);
+
+	while (!list_empty(&cdev->configs)) {
+		struct usb_configuration	*c;
+		c = list_first_entry(&cdev->configs,
+				struct usb_configuration, list);
+		remove_config(cdev, c);
+	}
+	if (composite->unbind)
+		composite->unbind(cdev);
+
+	if (cdev->req) {
+		kfree(cdev->req->buf);
+		usb_ep_free_request(gadget->ep0, cdev->req);
+	}
+	device_remove_file(&gadget->dev, &dev_attr_suspended);
+	kfree(cdev);
+	set_gadget_data(gadget, NULL);
+	composite = NULL;
+}
+
+static u8 override_id(struct usb_composite_dev *cdev, u8 *desc)
+{
+	if (!*desc) {
+		int ret = usb_string_id(cdev);
+		if (unlikely(ret < 0))
+			WARNING(cdev, "failed to override string ID\n");
+		else
+			*desc = ret;
+	}
+
+	return *desc;
+}
+
+static int composite_bind(struct usb_gadget *gadget)
+{
+	struct usb_composite_dev	*cdev;
+	int				status = -ENOMEM;
+
+	cdev = kzalloc(sizeof *cdev, GFP_KERNEL);
+	if (!cdev)
+		return status;
+
+	spin_lock_init(&cdev->lock);
+	cdev->gadget = gadget;
+	set_gadget_data(gadget, cdev);
+	INIT_LIST_HEAD(&cdev->configs);
+
+	/* preallocate control response and buffer */
+	cdev->req = usb_ep_alloc_request(gadget->ep0, GFP_KERNEL);
+	if (!cdev->req)
+		goto fail;
+	cdev->req->buf = kmalloc(USB_BUFSIZ, GFP_KERNEL);
+	if (!cdev->req->buf)
+		goto fail;
+	cdev->req->complete = composite_setup_complete;
+	gadget->ep0->driver_data = cdev;
+
+	cdev->bufsiz = USB_BUFSIZ;
+	cdev->driver = composite;
+
+	/*
+	 * As per USB compliance update, a device that is actively drawing
+	 * more than 100mA from USB must report itself as bus-powered in
+	 * the GetStatus(DEVICE) call.
+	 */
+	if (CONFIG_USB_GADGET_VBUS_DRAW <= USB_SELF_POWER_VBUS_MAX_DRAW)
+		usb_gadget_set_selfpowered(gadget);
+
+	/* interface and string IDs start at zero via kzalloc.
+	 * we force endpoints to start unassigned; few controller
+	 * drivers will zero ep->driver_data.
+	 */
+	usb_ep_autoconfig_reset(cdev->gadget);
+
+	/* composite gadget needs to assign strings for whole device (like
+	 * serial number), register function drivers, potentially update
+	 * power state and consumption, etc
+	 */
+	status = composite->bind(cdev);
+	if (status < 0)
+		goto fail;
+
+	cdev->desc = *composite->dev;
+
+	/* standardized runtime overrides for device ID data */
+	if (idVendor)
+		cdev->desc.idVendor = cpu_to_le16(idVendor);
+	else
+		idVendor = le16_to_cpu(cdev->desc.idVendor);
+	if (idProduct)
+		cdev->desc.idProduct = cpu_to_le16(idProduct);
+	else
+		idProduct = le16_to_cpu(cdev->desc.idProduct);
+	if (bcdDevice)
+		cdev->desc.bcdDevice = cpu_to_le16(bcdDevice);
+	else
+		bcdDevice = le16_to_cpu(cdev->desc.bcdDevice);
+
+	/* string overrides */
+	if (iManufacturer || !cdev->desc.iManufacturer) {
+		if (!iManufacturer && !composite->iManufacturer &&
+		    !*composite_manufacturer)
+			snprintf(composite_manufacturer,
+				 sizeof composite_manufacturer,
+				 "%s %s with %s",
+				 init_utsname()->sysname,
+				 init_utsname()->release,
+				 gadget->name);
+
+		cdev->manufacturer_override =
+			override_id(cdev, &cdev->desc.iManufacturer);
+	}
+
+	if (iProduct || (!cdev->desc.iProduct && composite->iProduct))
+		cdev->product_override =
+			override_id(cdev, &cdev->desc.iProduct);
+
+	if (iSerialNumber ||
+	    (!cdev->desc.iSerialNumber && composite->iSerialNumber))
+		cdev->serial_override =
+			override_id(cdev, &cdev->desc.iSerialNumber);
+
+	/* has userspace failed to provide a serial number? */
+	if (composite->needs_serial && !cdev->desc.iSerialNumber)
+		WARNING(cdev, "userspace failed to provide iSerialNumber\n");
+
+	/* finish up */
+	status = device_create_file(&gadget->dev, &dev_attr_suspended);
+	if (status)
+		goto fail;
+
+	INFO(cdev, "%s ready\n", composite->name);
+	return 0;
+
+fail:
+	composite_unbind(gadget);
+	return status;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void
+composite_suspend(struct usb_gadget *gadget)
+{
+	struct usb_composite_dev	*cdev = get_gadget_data(gadget);
+	struct usb_function		*f;
+
+	/* REVISIT:  should we have config level
+	 * suspend/resume callbacks?
+	 */
+	DBG(cdev, "suspend\n");
+	if (cdev->config) {
+		list_for_each_entry(f, &cdev->config->functions, list) {
+			if (f->suspend)
+				f->suspend(f);
+		}
+	}
+	if (composite->suspend)
+		composite->suspend(cdev);
+
+	cdev->suspended = 1;
+
+	usb_gadget_vbus_draw(gadget, 2);
+}
+
+static void
+composite_resume(struct usb_gadget *gadget)
+{
+	struct usb_composite_dev	*cdev = get_gadget_data(gadget);
+	struct usb_function		*f;
+	u8				maxpower;
+
+	/* REVISIT:  should we have config level
+	 * suspend/resume callbacks?
+	 */
+	DBG(cdev, "resume\n");
+	if (composite->resume)
+		composite->resume(cdev);
+	if (cdev->config) {
+		list_for_each_entry(f, &cdev->config->functions, list) {
+			if (f->resume)
+				f->resume(f);
+		}
+
+		maxpower = cdev->config->bMaxPower;
+
+		usb_gadget_vbus_draw(gadget, maxpower ?
+			(2 * maxpower) : CONFIG_USB_GADGET_VBUS_DRAW);
+	}
+
+	cdev->suspended = 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static struct usb_gadget_driver composite_driver = {
+	.bind		= composite_bind,
+	.unbind		= composite_unbind,
+
+	.setup		= composite_setup,
+	.disconnect	= composite_disconnect,
+
+	.suspend	= composite_suspend,
+	.resume		= composite_resume,
+
+	.driver	= {
+		.owner		= THIS_MODULE,
+	},
+};
+
+/**
+ * usb_composite_probe() - register a composite driver
+ * @driver: the driver to register
+ * @bind: the callback used to allocate resources that are shared across the
+ *	whole device, such as string IDs, and add its configurations using
+ *	@usb_add_config().  This may fail by returning a negative errno
+ *	value; it should return zero on successful initialization.
+ * Context: single threaded during gadget setup
+ *
+ * This function is used to register drivers using the composite driver
+ * framework.  The return value is zero, or a negative errno value.
+ * Those values normally come from the driver's @bind method, which does
+ * all the work of setting up the driver to match the hardware.
+ *
+ * On successful return, the gadget is ready to respond to requests from
+ * the host, unless one of its components invokes usb_gadget_disconnect()
+ * while it was binding.  That would usually be done in order to wait for
+ * some userspace participation.
+ */
+int usb_composite_probe(struct usb_composite_driver *driver)
+{
+	if (!driver || !driver->dev || composite || !driver->bind)
+		return -EINVAL;
+
+	if (!driver->name)
+		driver->name = "composite";
+	if (!driver->iProduct)
+		driver->iProduct = driver->name;
+	composite_driver.function =  (char *) driver->name;
+	composite_driver.driver.name = driver->name;
+	composite_driver.max_speed = driver->max_speed;
+	composite = driver;
+
+	return usb_gadget_probe_driver(&composite_driver);
+}
+
+/**
+ * usb_composite_unregister() - unregister a composite driver
+ * @driver: the driver to unregister
+ *
+ * This function is used to unregister drivers using the composite
+ * driver framework.
+ */
+void usb_composite_unregister(struct usb_composite_driver *driver)
+{
+	if (composite != driver)
+		return;
+	usb_gadget_unregister_driver(&composite_driver);
+}
+
+/**
+ * usb_composite_setup_continue() - Continue with the control transfer
+ * @cdev: the composite device who's control transfer was kept waiting
+ *
+ * This function must be called by the USB function driver to continue
+ * with the control transfer's data/status stage in case it had requested to
+ * delay the data/status stages. A USB function's setup handler (e.g. set_alt())
+ * can request the composite framework to delay the setup request's data/status
+ * stages by returning USB_GADGET_DELAYED_STATUS.
+ */
+void usb_composite_setup_continue(struct usb_composite_dev *cdev)
+{
+	int			value;
+	struct usb_request	*req = cdev->req;
+	unsigned long		flags;
+
+	DBG(cdev, "%s\n", __func__);
+	spin_lock_irqsave(&cdev->lock, flags);
+
+	if (cdev->delayed_status == 0) {
+		WARN(cdev, "%s: Unexpected call\n", __func__);
+
+	} else if (--cdev->delayed_status == 0) {
+		DBG(cdev, "%s: Completing delayed status\n", __func__);
+		req->length = 0;
+		value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+		if (value < 0) {
+			DBG(cdev, "ep_queue --> %d\n", value);
+			req->status = 0;
+			composite_setup_complete(cdev->gadget->ep0, req);
+		}
+	}
+
+	spin_unlock_irqrestore(&cdev->lock, flags);
+}
+
diff --git a/drivers/staging/ccg/composite.h b/drivers/staging/ccg/composite.h
new file mode 100644
index 0000000..19a5adf
--- /dev/null
+++ b/drivers/staging/ccg/composite.h
@@ -0,0 +1,395 @@
+/*
+ * composite.h -- framework for usb gadgets which are composite devices
+ *
+ * Copyright (C) 2006-2008 David Brownell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+#ifndef	__LINUX_USB_COMPOSITE_H
+#define	__LINUX_USB_COMPOSITE_H
+
+/*
+ * This framework is an optional layer on top of the USB Gadget interface,
+ * making it easier to build (a) Composite devices, supporting multiple
+ * functions within any single configuration, and (b) Multi-configuration
+ * devices, also supporting multiple functions but without necessarily
+ * having more than one function per configuration.
+ *
+ * Example:  a device with a single configuration supporting both network
+ * link and mass storage functions is a composite device.  Those functions
+ * might alternatively be packaged in individual configurations, but in
+ * the composite model the host can use both functions at the same time.
+ */
+
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+
+/*
+ * USB function drivers should return USB_GADGET_DELAYED_STATUS if they
+ * wish to delay the data/status stages of the control transfer till they
+ * are ready. The control transfer will then be kept from completing till
+ * all the function drivers that requested for USB_GADGET_DELAYED_STAUS
+ * invoke usb_composite_setup_continue().
+ */
+#define USB_GADGET_DELAYED_STATUS       0x7fff	/* Impossibly large value */
+
+struct usb_configuration;
+
+/**
+ * struct usb_function - describes one function of a configuration
+ * @name: For diagnostics, identifies the function.
+ * @strings: tables of strings, keyed by identifiers assigned during bind()
+ *	and by language IDs provided in control requests
+ * @descriptors: Table of full (or low) speed descriptors, using interface and
+ *	string identifiers assigned during @bind().  If this pointer is null,
+ *	the function will not be available at full speed (or at low speed).
+ * @hs_descriptors: Table of high speed descriptors, using interface and
+ *	string identifiers assigned during @bind().  If this pointer is null,
+ *	the function will not be available at high speed.
+ * @ss_descriptors: Table of super speed descriptors, using interface and
+ *	string identifiers assigned during @bind(). If this
+ *	pointer is null after initiation, the function will not
+ *	be available at super speed.
+ * @config: assigned when @usb_add_function() is called; this is the
+ *	configuration with which this function is associated.
+ * @bind: Before the gadget can register, all of its functions bind() to the
+ *	available resources including string and interface identifiers used
+ *	in interface or class descriptors; endpoints; I/O buffers; and so on.
+ * @unbind: Reverses @bind; called as a side effect of unregistering the
+ *	driver which added this function.
+ * @set_alt: (REQUIRED) Reconfigures altsettings; function drivers may
+ *	initialize usb_ep.driver data at this time (when it is used).
+ *	Note that setting an interface to its current altsetting resets
+ *	interface state, and that all interfaces have a disabled state.
+ * @get_alt: Returns the active altsetting.  If this is not provided,
+ *	then only altsetting zero is supported.
+ * @disable: (REQUIRED) Indicates the function should be disabled.  Reasons
+ *	include host resetting or reconfiguring the gadget, and disconnection.
+ * @setup: Used for interface-specific control requests.
+ * @suspend: Notifies functions when the host stops sending USB traffic.
+ * @resume: Notifies functions when the host restarts USB traffic.
+ * @get_status: Returns function status as a reply to
+ *	GetStatus() request when the recepient is Interface.
+ * @func_suspend: callback to be called when
+ *	SetFeature(FUNCTION_SUSPEND) is reseived
+ *
+ * A single USB function uses one or more interfaces, and should in most
+ * cases support operation at both full and high speeds.  Each function is
+ * associated by @usb_add_function() with a one configuration; that function
+ * causes @bind() to be called so resources can be allocated as part of
+ * setting up a gadget driver.  Those resources include endpoints, which
+ * should be allocated using @usb_ep_autoconfig().
+ *
+ * To support dual speed operation, a function driver provides descriptors
+ * for both high and full speed operation.  Except in rare cases that don't
+ * involve bulk endpoints, each speed needs different endpoint descriptors.
+ *
+ * Function drivers choose their own strategies for managing instance data.
+ * The simplest strategy just declares it "static', which means the function
+ * can only be activated once.  If the function needs to be exposed in more
+ * than one configuration at a given speed, it needs to support multiple
+ * usb_function structures (one for each configuration).
+ *
+ * A more complex strategy might encapsulate a @usb_function structure inside
+ * a driver-specific instance structure to allows multiple activations.  An
+ * example of multiple activations might be a CDC ACM function that supports
+ * two or more distinct instances within the same configuration, providing
+ * several independent logical data links to a USB host.
+ */
+struct usb_function {
+	const char			*name;
+	struct usb_gadget_strings	**strings;
+	struct usb_descriptor_header	**descriptors;
+	struct usb_descriptor_header	**hs_descriptors;
+	struct usb_descriptor_header	**ss_descriptors;
+
+	struct usb_configuration	*config;
+
+	/* REVISIT:  bind() functions can be marked __init, which
+	 * makes trouble for section mismatch analysis.  See if
+	 * we can't restructure things to avoid mismatching.
+	 * Related:  unbind() may kfree() but bind() won't...
+	 */
+
+	/* configuration management:  bind/unbind */
+	int			(*bind)(struct usb_configuration *,
+					struct usb_function *);
+	void			(*unbind)(struct usb_configuration *,
+					struct usb_function *);
+
+	/* runtime state management */
+	int			(*set_alt)(struct usb_function *,
+					unsigned interface, unsigned alt);
+	int			(*get_alt)(struct usb_function *,
+					unsigned interface);
+	void			(*disable)(struct usb_function *);
+	int			(*setup)(struct usb_function *,
+					const struct usb_ctrlrequest *);
+	void			(*suspend)(struct usb_function *);
+	void			(*resume)(struct usb_function *);
+
+	/* USB 3.0 additions */
+	int			(*get_status)(struct usb_function *);
+	int			(*func_suspend)(struct usb_function *,
+						u8 suspend_opt);
+	/* private: */
+	/* internals */
+	struct list_head		list;
+	DECLARE_BITMAP(endpoints, 32);
+};
+
+int usb_add_function(struct usb_configuration *, struct usb_function *);
+
+int usb_function_deactivate(struct usb_function *);
+int usb_function_activate(struct usb_function *);
+
+int usb_interface_id(struct usb_configuration *, struct usb_function *);
+
+int config_ep_by_speed(struct usb_gadget *g, struct usb_function *f,
+			struct usb_ep *_ep);
+
+#define	MAX_CONFIG_INTERFACES		16	/* arbitrary; max 255 */
+
+/**
+ * struct usb_configuration - represents one gadget configuration
+ * @label: For diagnostics, describes the configuration.
+ * @strings: Tables of strings, keyed by identifiers assigned during @bind()
+ *	and by language IDs provided in control requests.
+ * @descriptors: Table of descriptors preceding all function descriptors.
+ *	Examples include OTG and vendor-specific descriptors.
+ * @unbind: Reverses @bind; called as a side effect of unregistering the
+ *	driver which added this configuration.
+ * @setup: Used to delegate control requests that aren't handled by standard
+ *	device infrastructure or directed at a specific interface.
+ * @bConfigurationValue: Copied into configuration descriptor.
+ * @iConfiguration: Copied into configuration descriptor.
+ * @bmAttributes: Copied into configuration descriptor.
+ * @bMaxPower: Copied into configuration descriptor.
+ * @cdev: assigned by @usb_add_config() before calling @bind(); this is
+ *	the device associated with this configuration.
+ *
+ * Configurations are building blocks for gadget drivers structured around
+ * function drivers.  Simple USB gadgets require only one function and one
+ * configuration, and handle dual-speed hardware by always providing the same
+ * functionality.  Slightly more complex gadgets may have more than one
+ * single-function configuration at a given speed; or have configurations
+ * that only work at one speed.
+ *
+ * Composite devices are, by definition, ones with configurations which
+ * include more than one function.
+ *
+ * The lifecycle of a usb_configuration includes allocation, initialization
+ * of the fields described above, and calling @usb_add_config() to set up
+ * internal data and bind it to a specific device.  The configuration's
+ * @bind() method is then used to initialize all the functions and then
+ * call @usb_add_function() for them.
+ *
+ * Those functions would normally be independent of each other, but that's
+ * not mandatory.  CDC WMC devices are an example where functions often
+ * depend on other functions, with some functions subsidiary to others.
+ * Such interdependency may be managed in any way, so long as all of the
+ * descriptors complete by the time the composite driver returns from
+ * its bind() routine.
+ */
+struct usb_configuration {
+	const char			*label;
+	struct usb_gadget_strings	**strings;
+	const struct usb_descriptor_header **descriptors;
+
+	/* REVISIT:  bind() functions can be marked __init, which
+	 * makes trouble for section mismatch analysis.  See if
+	 * we can't restructure things to avoid mismatching...
+	 */
+
+	/* configuration management: unbind/setup */
+	void			(*unbind)(struct usb_configuration *);
+	int			(*setup)(struct usb_configuration *,
+					const struct usb_ctrlrequest *);
+
+	/* fields in the config descriptor */
+	u8			bConfigurationValue;
+	u8			iConfiguration;
+	u8			bmAttributes;
+	u8			bMaxPower;
+
+	struct usb_composite_dev	*cdev;
+
+	/* private: */
+	/* internals */
+	struct list_head	list;
+	struct list_head	functions;
+	u8			next_interface_id;
+	unsigned		superspeed:1;
+	unsigned		highspeed:1;
+	unsigned		fullspeed:1;
+	struct usb_function	*interface[MAX_CONFIG_INTERFACES];
+};
+
+int usb_add_config(struct usb_composite_dev *,
+		struct usb_configuration *,
+		int (*)(struct usb_configuration *));
+
+void usb_remove_config(struct usb_composite_dev *,
+		struct usb_configuration *);
+
+/**
+ * struct usb_composite_driver - groups configurations into a gadget
+ * @name: For diagnostics, identifies the driver.
+ * @iProduct: Used as iProduct override if @dev->iProduct is not set.
+ *	If NULL value of @name is taken.
+ * @iManufacturer: Used as iManufacturer override if @dev->iManufacturer is
+ *	not set. If NULL a default "<system> <release> with <udc>" value
+ *	will be used.
+ * @iSerialNumber: Used as iSerialNumber override if @dev->iSerialNumber is
+ *	not set.
+ * @dev: Template descriptor for the device, including default device
+ *	identifiers.
+ * @strings: tables of strings, keyed by identifiers assigned during @bind
+ *	and language IDs provided in control requests
+ * @max_speed: Highest speed the driver supports.
+ * @needs_serial: set to 1 if the gadget needs userspace to provide
+ * 	a serial number.  If one is not provided, warning will be printed.
+ * @bind: (REQUIRED) Used to allocate resources that are shared across the
+ *	whole device, such as string IDs, and add its configurations using
+ *	@usb_add_config(). This may fail by returning a negative errno
+ *	value; it should return zero on successful initialization.
+ * @unbind: Reverses @bind; called as a side effect of unregistering
+ *	this driver.
+ * @disconnect: optional driver disconnect method
+ * @suspend: Notifies when the host stops sending USB traffic,
+ *	after function notifications
+ * @resume: Notifies configuration when the host restarts USB traffic,
+ *	before function notifications
+ *
+ * Devices default to reporting self powered operation.  Devices which rely
+ * on bus powered operation should report this in their @bind method.
+ *
+ * Before returning from @bind, various fields in the template descriptor
+ * may be overridden.  These include the idVendor/idProduct/bcdDevice values
+ * normally to bind the appropriate host side driver, and the three strings
+ * (iManufacturer, iProduct, iSerialNumber) normally used to provide user
+ * meaningful device identifiers.  (The strings will not be defined unless
+ * they are defined in @dev and @strings.)  The correct ep0 maxpacket size
+ * is also reported, as defined by the underlying controller driver.
+ */
+struct usb_composite_driver {
+	const char				*name;
+	const char				*iProduct;
+	const char				*iManufacturer;
+	const char				*iSerialNumber;
+	const struct usb_device_descriptor	*dev;
+	struct usb_gadget_strings		**strings;
+	enum usb_device_speed			max_speed;
+	unsigned		needs_serial:1;
+
+	int			(*bind)(struct usb_composite_dev *cdev);
+	int			(*unbind)(struct usb_composite_dev *);
+
+	void			(*disconnect)(struct usb_composite_dev *);
+
+	/* global suspend hooks */
+	void			(*suspend)(struct usb_composite_dev *);
+	void			(*resume)(struct usb_composite_dev *);
+};
+
+extern int usb_composite_probe(struct usb_composite_driver *driver);
+extern void usb_composite_unregister(struct usb_composite_driver *driver);
+extern void usb_composite_setup_continue(struct usb_composite_dev *cdev);
+
+
+/**
+ * struct usb_composite_device - represents one composite usb gadget
+ * @gadget: read-only, abstracts the gadget's usb peripheral controller
+ * @req: used for control responses; buffer is pre-allocated
+ * @bufsiz: size of buffer pre-allocated in @req
+ * @config: the currently active configuration
+ *
+ * One of these devices is allocated and initialized before the
+ * associated device driver's bind() is called.
+ *
+ * OPEN ISSUE:  it appears that some WUSB devices will need to be
+ * built by combining a normal (wired) gadget with a wireless one.
+ * This revision of the gadget framework should probably try to make
+ * sure doing that won't hurt too much.
+ *
+ * One notion for how to handle Wireless USB devices involves:
+ * (a) a second gadget here, discovery mechanism TBD, but likely
+ *     needing separate "register/unregister WUSB gadget" calls;
+ * (b) updates to usb_gadget to include flags "is it wireless",
+ *     "is it wired", plus (presumably in a wrapper structure)
+ *     bandgroup and PHY info;
+ * (c) presumably a wireless_ep wrapping a usb_ep, and reporting
+ *     wireless-specific parameters like maxburst and maxsequence;
+ * (d) configurations that are specific to wireless links;
+ * (e) function drivers that understand wireless configs and will
+ *     support wireless for (additional) function instances;
+ * (f) a function to support association setup (like CBAF), not
+ *     necessarily requiring a wireless adapter;
+ * (g) composite device setup that can create one or more wireless
+ *     configs, including appropriate association setup support;
+ * (h) more, TBD.
+ */
+struct usb_composite_dev {
+	struct usb_gadget		*gadget;
+	struct usb_request		*req;
+	unsigned			bufsiz;
+
+	struct usb_configuration	*config;
+
+	/* private: */
+	/* internals */
+	unsigned int			suspended:1;
+	struct usb_device_descriptor	desc;
+	struct list_head		configs;
+	struct usb_composite_driver	*driver;
+	u8				next_string_id;
+	u8				manufacturer_override;
+	u8				product_override;
+	u8				serial_override;
+
+	/* the gadget driver won't enable the data pullup
+	 * while the deactivation count is nonzero.
+	 */
+	unsigned			deactivations;
+
+	/* the composite driver won't complete the control transfer's
+	 * data/status stages till delayed_status is zero.
+	 */
+	int				delayed_status;
+
+	/* protects deactivations and delayed_status counts*/
+	spinlock_t			lock;
+};
+
+extern int usb_string_id(struct usb_composite_dev *c);
+extern int usb_string_ids_tab(struct usb_composite_dev *c,
+			      struct usb_string *str);
+extern int usb_string_ids_n(struct usb_composite_dev *c, unsigned n);
+
+
+/* messaging utils */
+#define DBG(d, fmt, args...) \
+	dev_dbg(&(d)->gadget->dev , fmt , ## args)
+#define VDBG(d, fmt, args...) \
+	dev_vdbg(&(d)->gadget->dev , fmt , ## args)
+#define ERROR(d, fmt, args...) \
+	dev_err(&(d)->gadget->dev , fmt , ## args)
+#define WARNING(d, fmt, args...) \
+	dev_warn(&(d)->gadget->dev , fmt , ## args)
+#define INFO(d, fmt, args...) \
+	dev_info(&(d)->gadget->dev , fmt , ## args)
+
+#endif	/* __LINUX_USB_COMPOSITE_H */
diff --git a/drivers/staging/ccg/config.c b/drivers/staging/ccg/config.c
new file mode 100644
index 0000000..7542a72
--- /dev/null
+++ b/drivers/staging/ccg/config.c
@@ -0,0 +1,158 @@
+/*
+ * usb/gadget/config.c -- simplify building config descriptors
+ *
+ * Copyright (C) 2003 David Brownell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/string.h>
+#include <linux/device.h>
+
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+
+
+/**
+ * usb_descriptor_fillbuf - fill buffer with descriptors
+ * @buf: Buffer to be filled
+ * @buflen: Size of buf
+ * @src: Array of descriptor pointers, terminated by null pointer.
+ *
+ * Copies descriptors into the buffer, returning the length or a
+ * negative error code if they can't all be copied.  Useful when
+ * assembling descriptors for an associated set of interfaces used
+ * as part of configuring a composite device; or in other cases where
+ * sets of descriptors need to be marshaled.
+ */
+int
+usb_descriptor_fillbuf(void *buf, unsigned buflen,
+		const struct usb_descriptor_header **src)
+{
+	u8	*dest = buf;
+
+	if (!src)
+		return -EINVAL;
+
+	/* fill buffer from src[] until null descriptor ptr */
+	for (; NULL != *src; src++) {
+		unsigned		len = (*src)->bLength;
+
+		if (len > buflen)
+			return -EINVAL;
+		memcpy(dest, *src, len);
+		buflen -= len;
+		dest += len;
+	}
+	return dest - (u8 *)buf;
+}
+
+
+/**
+ * usb_gadget_config_buf - builts a complete configuration descriptor
+ * @config: Header for the descriptor, including characteristics such
+ *	as power requirements and number of interfaces.
+ * @desc: Null-terminated vector of pointers to the descriptors (interface,
+ *	endpoint, etc) defining all functions in this device configuration.
+ * @buf: Buffer for the resulting configuration descriptor.
+ * @length: Length of buffer.  If this is not big enough to hold the
+ *	entire configuration descriptor, an error code will be returned.
+ *
+ * This copies descriptors into the response buffer, building a descriptor
+ * for that configuration.  It returns the buffer length or a negative
+ * status code.  The config.wTotalLength field is set to match the length
+ * of the result, but other descriptor fields (including power usage and
+ * interface count) must be set by the caller.
+ *
+ * Gadget drivers could use this when constructing a config descriptor
+ * in response to USB_REQ_GET_DESCRIPTOR.  They will need to patch the
+ * resulting bDescriptorType value if USB_DT_OTHER_SPEED_CONFIG is needed.
+ */
+int usb_gadget_config_buf(
+	const struct usb_config_descriptor	*config,
+	void					*buf,
+	unsigned				length,
+	const struct usb_descriptor_header	**desc
+)
+{
+	struct usb_config_descriptor		*cp = buf;
+	int					len;
+
+	/* config descriptor first */
+	if (length < USB_DT_CONFIG_SIZE || !desc)
+		return -EINVAL;
+	*cp = *config;
+
+	/* then interface/endpoint/class/vendor/... */
+	len = usb_descriptor_fillbuf(USB_DT_CONFIG_SIZE + (u8*)buf,
+			length - USB_DT_CONFIG_SIZE, desc);
+	if (len < 0)
+		return len;
+	len += USB_DT_CONFIG_SIZE;
+	if (len > 0xffff)
+		return -EINVAL;
+
+	/* patch up the config descriptor */
+	cp->bLength = USB_DT_CONFIG_SIZE;
+	cp->bDescriptorType = USB_DT_CONFIG;
+	cp->wTotalLength = cpu_to_le16(len);
+	cp->bmAttributes |= USB_CONFIG_ATT_ONE;
+	return len;
+}
+
+/**
+ * usb_copy_descriptors - copy a vector of USB descriptors
+ * @src: null-terminated vector to copy
+ * Context: initialization code, which may sleep
+ *
+ * This makes a copy of a vector of USB descriptors.  Its primary use
+ * is to support usb_function objects which can have multiple copies,
+ * each needing different descriptors.  Functions may have static
+ * tables of descriptors, which are used as templates and customized
+ * with identifiers (for interfaces, strings, endpoints, and more)
+ * as needed by a given function instance.
+ */
+struct usb_descriptor_header **
+usb_copy_descriptors(struct usb_descriptor_header **src)
+{
+	struct usb_descriptor_header **tmp;
+	unsigned bytes;
+	unsigned n_desc;
+	void *mem;
+	struct usb_descriptor_header **ret;
+
+	/* count descriptors and their sizes; then add vector size */
+	for (bytes = 0, n_desc = 0, tmp = src; *tmp; tmp++, n_desc++)
+		bytes += (*tmp)->bLength;
+	bytes += (n_desc + 1) * sizeof(*tmp);
+
+	mem = kmalloc(bytes, GFP_KERNEL);
+	if (!mem)
+		return NULL;
+
+	/* fill in pointers starting at "tmp",
+	 * to descriptors copied starting at "mem";
+	 * and return "ret"
+	 */
+	tmp = mem;
+	ret = mem;
+	mem += (n_desc + 1) * sizeof(*tmp);
+	while (*src) {
+		memcpy(mem, *src, (*src)->bLength);
+		*tmp = mem;
+		tmp++;
+		mem += (*src)->bLength;
+		src++;
+	}
+	*tmp = NULL;
+
+	return ret;
+}
+
diff --git a/drivers/staging/ccg/epautoconf.c b/drivers/staging/ccg/epautoconf.c
new file mode 100644
index 0000000..51f3d42
--- /dev/null
+++ b/drivers/staging/ccg/epautoconf.c
@@ -0,0 +1,393 @@
+/*
+ * epautoconf.c -- endpoint autoconfiguration for usb gadget drivers
+ *
+ * Copyright (C) 2004 David Brownell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/device.h>
+
+#include <linux/ctype.h>
+#include <linux/string.h>
+
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+
+#include "gadget_chips.h"
+
+
+/* we must assign addresses for configurable endpoints (like net2280) */
+static unsigned epnum;
+
+// #define MANY_ENDPOINTS
+#ifdef MANY_ENDPOINTS
+/* more than 15 configurable endpoints */
+static unsigned in_epnum;
+#endif
+
+
+/*
+ * This should work with endpoints from controller drivers sharing the
+ * same endpoint naming convention.  By example:
+ *
+ *	- ep1, ep2, ... address is fixed, not direction or type
+ *	- ep1in, ep2out, ... address and direction are fixed, not type
+ *	- ep1-bulk, ep2-bulk, ... address and type are fixed, not direction
+ *	- ep1in-bulk, ep2out-iso, ... all three are fixed
+ *	- ep-* ... no functionality restrictions
+ *
+ * Type suffixes are "-bulk", "-iso", or "-int".  Numbers are decimal.
+ * Less common restrictions are implied by gadget_is_*().
+ *
+ * NOTE:  each endpoint is unidirectional, as specified by its USB
+ * descriptor; and isn't specific to a configuration or altsetting.
+ */
+static int
+ep_matches (
+	struct usb_gadget		*gadget,
+	struct usb_ep			*ep,
+	struct usb_endpoint_descriptor	*desc,
+	struct usb_ss_ep_comp_descriptor *ep_comp
+)
+{
+	u8		type;
+	const char	*tmp;
+	u16		max;
+
+	int		num_req_streams = 0;
+
+	/* endpoint already claimed? */
+	if (NULL != ep->driver_data)
+		return 0;
+
+	/* only support ep0 for portable CONTROL traffic */
+	type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
+	if (USB_ENDPOINT_XFER_CONTROL == type)
+		return 0;
+
+	/* some other naming convention */
+	if ('e' != ep->name[0])
+		return 0;
+
+	/* type-restriction:  "-iso", "-bulk", or "-int".
+	 * direction-restriction:  "in", "out".
+	 */
+	if ('-' != ep->name[2]) {
+		tmp = strrchr (ep->name, '-');
+		if (tmp) {
+			switch (type) {
+			case USB_ENDPOINT_XFER_INT:
+				/* bulk endpoints handle interrupt transfers,
+				 * except the toggle-quirky iso-synch kind
+				 */
+				if ('s' == tmp[2])	// == "-iso"
+					return 0;
+				/* for now, avoid PXA "interrupt-in";
+				 * it's documented as never using DATA1.
+				 */
+				if (gadget_is_pxa (gadget)
+						&& 'i' == tmp [1])
+					return 0;
+				break;
+			case USB_ENDPOINT_XFER_BULK:
+				if ('b' != tmp[1])	// != "-bulk"
+					return 0;
+				break;
+			case USB_ENDPOINT_XFER_ISOC:
+				if ('s' != tmp[2])	// != "-iso"
+					return 0;
+			}
+		} else {
+			tmp = ep->name + strlen (ep->name);
+		}
+
+		/* direction-restriction:  "..in-..", "out-.." */
+		tmp--;
+		if (!isdigit (*tmp)) {
+			if (desc->bEndpointAddress & USB_DIR_IN) {
+				if ('n' != *tmp)
+					return 0;
+			} else {
+				if ('t' != *tmp)
+					return 0;
+			}
+		}
+	}
+
+	/*
+	 * Get the number of required streams from the EP companion
+	 * descriptor and see if the EP matches it
+	 */
+	if (usb_endpoint_xfer_bulk(desc)) {
+		if (ep_comp && gadget->max_speed >= USB_SPEED_SUPER) {
+			num_req_streams = ep_comp->bmAttributes & 0x1f;
+			if (num_req_streams > ep->max_streams)
+				return 0;
+		}
+
+	}
+
+	/*
+	 * If the protocol driver hasn't yet decided on wMaxPacketSize
+	 * and wants to know the maximum possible, provide the info.
+	 */
+	if (desc->wMaxPacketSize == 0)
+		desc->wMaxPacketSize = cpu_to_le16(ep->maxpacket);
+
+	/* endpoint maxpacket size is an input parameter, except for bulk
+	 * where it's an output parameter representing the full speed limit.
+	 * the usb spec fixes high speed bulk maxpacket at 512 bytes.
+	 */
+	max = 0x7ff & usb_endpoint_maxp(desc);
+	switch (type) {
+	case USB_ENDPOINT_XFER_INT:
+		/* INT:  limit 64 bytes full speed, 1024 high/super speed */
+		if (!gadget_is_dualspeed(gadget) && max > 64)
+			return 0;
+		/* FALLTHROUGH */
+
+	case USB_ENDPOINT_XFER_ISOC:
+		/* ISO:  limit 1023 bytes full speed, 1024 high/super speed */
+		if (ep->maxpacket < max)
+			return 0;
+		if (!gadget_is_dualspeed(gadget) && max > 1023)
+			return 0;
+
+		/* BOTH:  "high bandwidth" works only at high speed */
+		if ((desc->wMaxPacketSize & cpu_to_le16(3<<11))) {
+			if (!gadget_is_dualspeed(gadget))
+				return 0;
+			/* configure your hardware with enough buffering!! */
+		}
+		break;
+	}
+
+	/* MATCH!! */
+
+	/* report address */
+	desc->bEndpointAddress &= USB_DIR_IN;
+	if (isdigit (ep->name [2])) {
+		u8	num = simple_strtoul (&ep->name [2], NULL, 10);
+		desc->bEndpointAddress |= num;
+#ifdef	MANY_ENDPOINTS
+	} else if (desc->bEndpointAddress & USB_DIR_IN) {
+		if (++in_epnum > 15)
+			return 0;
+		desc->bEndpointAddress = USB_DIR_IN | in_epnum;
+#endif
+	} else {
+		if (++epnum > 15)
+			return 0;
+		desc->bEndpointAddress |= epnum;
+	}
+
+	/* report (variable) full speed bulk maxpacket */
+	if ((USB_ENDPOINT_XFER_BULK == type) && !ep_comp) {
+		int size = ep->maxpacket;
+
+		/* min() doesn't work on bitfields with gcc-3.5 */
+		if (size > 64)
+			size = 64;
+		desc->wMaxPacketSize = cpu_to_le16(size);
+	}
+	ep->address = desc->bEndpointAddress;
+	return 1;
+}
+
+static struct usb_ep *
+find_ep (struct usb_gadget *gadget, const char *name)
+{
+	struct usb_ep	*ep;
+
+	list_for_each_entry (ep, &gadget->ep_list, ep_list) {
+		if (0 == strcmp (ep->name, name))
+			return ep;
+	}
+	return NULL;
+}
+
+/**
+ * usb_ep_autoconfig_ss() - choose an endpoint matching the ep
+ * descriptor and ep companion descriptor
+ * @gadget: The device to which the endpoint must belong.
+ * @desc: Endpoint descriptor, with endpoint direction and transfer mode
+ *    initialized.  For periodic transfers, the maximum packet
+ *    size must also be initialized.  This is modified on
+ *    success.
+ * @ep_comp: Endpoint companion descriptor, with the required
+ *    number of streams. Will be modified when the chosen EP
+ *    supports a different number of streams.
+ *
+ * This routine replaces the usb_ep_autoconfig when needed
+ * superspeed enhancments. If such enhancemnets are required,
+ * the FD should call usb_ep_autoconfig_ss directly and provide
+ * the additional ep_comp parameter.
+ *
+ * By choosing an endpoint to use with the specified descriptor,
+ * this routine simplifies writing gadget drivers that work with
+ * multiple USB device controllers.  The endpoint would be
+ * passed later to usb_ep_enable(), along with some descriptor.
+ *
+ * That second descriptor won't always be the same as the first one.
+ * For example, isochronous endpoints can be autoconfigured for high
+ * bandwidth, and then used in several lower bandwidth altsettings.
+ * Also, high and full speed descriptors will be different.
+ *
+ * Be sure to examine and test the results of autoconfiguration
+ * on your hardware.  This code may not make the best choices
+ * about how to use the USB controller, and it can't know all
+ * the restrictions that may apply. Some combinations of driver
+ * and hardware won't be able to autoconfigure.
+ *
+ * On success, this returns an un-claimed usb_ep, and modifies the endpoint
+ * descriptor bEndpointAddress.  For bulk endpoints, the wMaxPacket value
+ * is initialized as if the endpoint were used at full speed and
+ * the bmAttribute field in the ep companion descriptor is
+ * updated with the assigned number of streams if it is
+ * different from the original value. To prevent the endpoint
+ * from being returned by a later autoconfig call, claim it by
+ * assigning ep->driver_data to some non-null value.
+ *
+ * On failure, this returns a null endpoint descriptor.
+ */
+struct usb_ep *usb_ep_autoconfig_ss(
+	struct usb_gadget		*gadget,
+	struct usb_endpoint_descriptor	*desc,
+	struct usb_ss_ep_comp_descriptor *ep_comp
+)
+{
+	struct usb_ep	*ep;
+	u8		type;
+
+	type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
+
+	/* First, apply chip-specific "best usage" knowledge.
+	 * This might make a good usb_gadget_ops hook ...
+	 */
+	if (gadget_is_net2280 (gadget) && type == USB_ENDPOINT_XFER_INT) {
+		/* ep-e, ep-f are PIO with only 64 byte fifos */
+		ep = find_ep (gadget, "ep-e");
+		if (ep && ep_matches(gadget, ep, desc, ep_comp))
+			goto found_ep;
+		ep = find_ep (gadget, "ep-f");
+		if (ep && ep_matches(gadget, ep, desc, ep_comp))
+			goto found_ep;
+
+	} else if (gadget_is_goku (gadget)) {
+		if (USB_ENDPOINT_XFER_INT == type) {
+			/* single buffering is enough */
+			ep = find_ep(gadget, "ep3-bulk");
+			if (ep && ep_matches(gadget, ep, desc, ep_comp))
+				goto found_ep;
+		} else if (USB_ENDPOINT_XFER_BULK == type
+				&& (USB_DIR_IN & desc->bEndpointAddress)) {
+			/* DMA may be available */
+			ep = find_ep(gadget, "ep2-bulk");
+			if (ep && ep_matches(gadget, ep, desc,
+					      ep_comp))
+				goto found_ep;
+		}
+
+#ifdef CONFIG_BLACKFIN
+	} else if (gadget_is_musbhdrc(gadget)) {
+		if ((USB_ENDPOINT_XFER_BULK == type) ||
+		    (USB_ENDPOINT_XFER_ISOC == type)) {
+			if (USB_DIR_IN & desc->bEndpointAddress)
+				ep = find_ep (gadget, "ep5in");
+			else
+				ep = find_ep (gadget, "ep6out");
+		} else if (USB_ENDPOINT_XFER_INT == type) {
+			if (USB_DIR_IN & desc->bEndpointAddress)
+				ep = find_ep(gadget, "ep1in");
+			else
+				ep = find_ep(gadget, "ep2out");
+		} else
+			ep = NULL;
+		if (ep && ep_matches(gadget, ep, desc, ep_comp))
+			goto found_ep;
+#endif
+	}
+
+	/* Second, look at endpoints until an unclaimed one looks usable */
+	list_for_each_entry (ep, &gadget->ep_list, ep_list) {
+		if (ep_matches(gadget, ep, desc, ep_comp))
+			goto found_ep;
+	}
+
+	/* Fail */
+	return NULL;
+found_ep:
+	ep->desc = NULL;
+	ep->comp_desc = NULL;
+	return ep;
+}
+
+/**
+ * usb_ep_autoconfig() - choose an endpoint matching the
+ * descriptor
+ * @gadget: The device to which the endpoint must belong.
+ * @desc: Endpoint descriptor, with endpoint direction and transfer mode
+ *	initialized.  For periodic transfers, the maximum packet
+ *	size must also be initialized.  This is modified on success.
+ *
+ * By choosing an endpoint to use with the specified descriptor, this
+ * routine simplifies writing gadget drivers that work with multiple
+ * USB device controllers.  The endpoint would be passed later to
+ * usb_ep_enable(), along with some descriptor.
+ *
+ * That second descriptor won't always be the same as the first one.
+ * For example, isochronous endpoints can be autoconfigured for high
+ * bandwidth, and then used in several lower bandwidth altsettings.
+ * Also, high and full speed descriptors will be different.
+ *
+ * Be sure to examine and test the results of autoconfiguration on your
+ * hardware.  This code may not make the best choices about how to use the
+ * USB controller, and it can't know all the restrictions that may apply.
+ * Some combinations of driver and hardware won't be able to autoconfigure.
+ *
+ * On success, this returns an un-claimed usb_ep, and modifies the endpoint
+ * descriptor bEndpointAddress.  For bulk endpoints, the wMaxPacket value
+ * is initialized as if the endpoint were used at full speed.  To prevent
+ * the endpoint from being returned by a later autoconfig call, claim it
+ * by assigning ep->driver_data to some non-null value.
+ *
+ * On failure, this returns a null endpoint descriptor.
+ */
+struct usb_ep *usb_ep_autoconfig(
+	struct usb_gadget		*gadget,
+	struct usb_endpoint_descriptor	*desc
+)
+{
+	return usb_ep_autoconfig_ss(gadget, desc, NULL);
+}
+
+
+/**
+ * usb_ep_autoconfig_reset - reset endpoint autoconfig state
+ * @gadget: device for which autoconfig state will be reset
+ *
+ * Use this for devices where one configuration may need to assign
+ * endpoint resources very differently from the next one.  It clears
+ * state such as ep->driver_data and the record of assigned endpoints
+ * used by usb_ep_autoconfig().
+ */
+void usb_ep_autoconfig_reset (struct usb_gadget *gadget)
+{
+	struct usb_ep	*ep;
+
+	list_for_each_entry (ep, &gadget->ep_list, ep_list) {
+		ep->driver_data = NULL;
+	}
+#ifdef	MANY_ENDPOINTS
+	in_epnum = 0;
+#endif
+	epnum = 0;
+}
+
diff --git a/drivers/staging/ccg/f_acm.c b/drivers/staging/ccg/f_acm.c
new file mode 100644
index 0000000..d672250a
--- /dev/null
+++ b/drivers/staging/ccg/f_acm.c
@@ -0,0 +1,814 @@
+/*
+ * f_acm.c -- USB CDC serial (ACM) function driver
+ *
+ * Copyright (C) 2003 Al Borchers (alborchers at steinerpoint.com)
+ * Copyright (C) 2008 by David Brownell
+ * Copyright (C) 2008 by Nokia Corporation
+ * Copyright (C) 2009 by Samsung Electronics
+ * Author: Michal Nazarewicz (mina86 at mina86.com)
+ *
+ * This software is distributed under the terms of the GNU General
+ * Public License ("GPL") as published by the Free Software Foundation,
+ * either version 2 of that License or (at your option) any later version.
+ */
+
+/* #define VERBOSE_DEBUG */
+
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+
+#include "u_serial.h"
+#include "gadget_chips.h"
+
+
+/*
+ * This CDC ACM function support just wraps control functions and
+ * notifications around the generic serial-over-usb code.
+ *
+ * Because CDC ACM is standardized by the USB-IF, many host operating
+ * systems have drivers for it.  Accordingly, ACM is the preferred
+ * interop solution for serial-port type connections.  The control
+ * models are often not necessary, and in any case don't do much in
+ * this bare-bones implementation.
+ *
+ * Note that even MS-Windows has some support for ACM.  However, that
+ * support is somewhat broken because when you use ACM in a composite
+ * device, having multiple interfaces confuses the poor OS.  It doesn't
+ * seem to understand CDC Union descriptors.  The new "association"
+ * descriptors (roughly equivalent to CDC Unions) may sometimes help.
+ */
+
+struct f_acm {
+	struct gserial			port;
+	u8				ctrl_id, data_id;
+	u8				port_num;
+
+	u8				pending;
+
+	/* lock is mostly for pending and notify_req ... they get accessed
+	 * by callbacks both from tty (open/close/break) under its spinlock,
+	 * and notify_req.complete() which can't use that lock.
+	 */
+	spinlock_t			lock;
+
+	struct usb_ep			*notify;
+	struct usb_request		*notify_req;
+
+	struct usb_cdc_line_coding	port_line_coding;	/* 8-N-1 etc */
+
+	/* SetControlLineState request -- CDC 1.1 section 6.2.14 (INPUT) */
+	u16				port_handshake_bits;
+#define ACM_CTRL_RTS	(1 << 1)	/* unused with full duplex */
+#define ACM_CTRL_DTR	(1 << 0)	/* host is ready for data r/w */
+
+	/* SerialState notification -- CDC 1.1 section 6.3.5 (OUTPUT) */
+	u16				serial_state;
+#define ACM_CTRL_OVERRUN	(1 << 6)
+#define ACM_CTRL_PARITY		(1 << 5)
+#define ACM_CTRL_FRAMING	(1 << 4)
+#define ACM_CTRL_RI		(1 << 3)
+#define ACM_CTRL_BRK		(1 << 2)
+#define ACM_CTRL_DSR		(1 << 1)
+#define ACM_CTRL_DCD		(1 << 0)
+};
+
+static inline struct f_acm *func_to_acm(struct usb_function *f)
+{
+	return container_of(f, struct f_acm, port.func);
+}
+
+static inline struct f_acm *port_to_acm(struct gserial *p)
+{
+	return container_of(p, struct f_acm, port);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* notification endpoint uses smallish and infrequent fixed-size messages */
+
+#define GS_LOG2_NOTIFY_INTERVAL		5	/* 1 << 5 == 32 msec */
+#define GS_NOTIFY_MAXPACKET		10	/* notification + 2 bytes */
+
+/* interface and class descriptors: */
+
+static struct usb_interface_assoc_descriptor
+acm_iad_descriptor = {
+	.bLength =		sizeof acm_iad_descriptor,
+	.bDescriptorType =	USB_DT_INTERFACE_ASSOCIATION,
+
+	/* .bFirstInterface =	DYNAMIC, */
+	.bInterfaceCount = 	2,	// control + data
+	.bFunctionClass =	USB_CLASS_COMM,
+	.bFunctionSubClass =	USB_CDC_SUBCLASS_ACM,
+	.bFunctionProtocol =	USB_CDC_ACM_PROTO_AT_V25TER,
+	/* .iFunction =		DYNAMIC */
+};
+
+
+static struct usb_interface_descriptor acm_control_interface_desc = {
+	.bLength =		USB_DT_INTERFACE_SIZE,
+	.bDescriptorType =	USB_DT_INTERFACE,
+	/* .bInterfaceNumber = DYNAMIC */
+	.bNumEndpoints =	1,
+	.bInterfaceClass =	USB_CLASS_COMM,
+	.bInterfaceSubClass =	USB_CDC_SUBCLASS_ACM,
+	.bInterfaceProtocol =	USB_CDC_ACM_PROTO_AT_V25TER,
+	/* .iInterface = DYNAMIC */
+};
+
+static struct usb_interface_descriptor acm_data_interface_desc = {
+	.bLength =		USB_DT_INTERFACE_SIZE,
+	.bDescriptorType =	USB_DT_INTERFACE,
+	/* .bInterfaceNumber = DYNAMIC */
+	.bNumEndpoints =	2,
+	.bInterfaceClass =	USB_CLASS_CDC_DATA,
+	.bInterfaceSubClass =	0,
+	.bInterfaceProtocol =	0,
+	/* .iInterface = DYNAMIC */
+};
+
+static struct usb_cdc_header_desc acm_header_desc = {
+	.bLength =		sizeof(acm_header_desc),
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_HEADER_TYPE,
+	.bcdCDC =		cpu_to_le16(0x0110),
+};
+
+static struct usb_cdc_call_mgmt_descriptor
+acm_call_mgmt_descriptor = {
+	.bLength =		sizeof(acm_call_mgmt_descriptor),
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_CALL_MANAGEMENT_TYPE,
+	.bmCapabilities =	0,
+	/* .bDataInterface = DYNAMIC */
+};
+
+static struct usb_cdc_acm_descriptor acm_descriptor = {
+	.bLength =		sizeof(acm_descriptor),
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_ACM_TYPE,
+	.bmCapabilities =	USB_CDC_CAP_LINE,
+};
+
+static struct usb_cdc_union_desc acm_union_desc = {
+	.bLength =		sizeof(acm_union_desc),
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_UNION_TYPE,
+	/* .bMasterInterface0 =	DYNAMIC */
+	/* .bSlaveInterface0 =	DYNAMIC */
+};
+
+/* full speed support: */
+
+static struct usb_endpoint_descriptor acm_fs_notify_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	cpu_to_le16(GS_NOTIFY_MAXPACKET),
+	.bInterval =		1 << GS_LOG2_NOTIFY_INTERVAL,
+};
+
+static struct usb_endpoint_descriptor acm_fs_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor acm_fs_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_descriptor_header *acm_fs_function[] = {
+	(struct usb_descriptor_header *) &acm_iad_descriptor,
+	(struct usb_descriptor_header *) &acm_control_interface_desc,
+	(struct usb_descriptor_header *) &acm_header_desc,
+	(struct usb_descriptor_header *) &acm_call_mgmt_descriptor,
+	(struct usb_descriptor_header *) &acm_descriptor,
+	(struct usb_descriptor_header *) &acm_union_desc,
+	(struct usb_descriptor_header *) &acm_fs_notify_desc,
+	(struct usb_descriptor_header *) &acm_data_interface_desc,
+	(struct usb_descriptor_header *) &acm_fs_in_desc,
+	(struct usb_descriptor_header *) &acm_fs_out_desc,
+	NULL,
+};
+
+/* high speed support: */
+
+static struct usb_endpoint_descriptor acm_hs_notify_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	cpu_to_le16(GS_NOTIFY_MAXPACKET),
+	.bInterval =		GS_LOG2_NOTIFY_INTERVAL+4,
+};
+
+static struct usb_endpoint_descriptor acm_hs_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor acm_hs_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(512),
+};
+
+static struct usb_descriptor_header *acm_hs_function[] = {
+	(struct usb_descriptor_header *) &acm_iad_descriptor,
+	(struct usb_descriptor_header *) &acm_control_interface_desc,
+	(struct usb_descriptor_header *) &acm_header_desc,
+	(struct usb_descriptor_header *) &acm_call_mgmt_descriptor,
+	(struct usb_descriptor_header *) &acm_descriptor,
+	(struct usb_descriptor_header *) &acm_union_desc,
+	(struct usb_descriptor_header *) &acm_hs_notify_desc,
+	(struct usb_descriptor_header *) &acm_data_interface_desc,
+	(struct usb_descriptor_header *) &acm_hs_in_desc,
+	(struct usb_descriptor_header *) &acm_hs_out_desc,
+	NULL,
+};
+
+static struct usb_endpoint_descriptor acm_ss_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(1024),
+};
+
+static struct usb_endpoint_descriptor acm_ss_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor acm_ss_bulk_comp_desc = {
+	.bLength =              sizeof acm_ss_bulk_comp_desc,
+	.bDescriptorType =      USB_DT_SS_ENDPOINT_COMP,
+};
+
+static struct usb_descriptor_header *acm_ss_function[] = {
+	(struct usb_descriptor_header *) &acm_iad_descriptor,
+	(struct usb_descriptor_header *) &acm_control_interface_desc,
+	(struct usb_descriptor_header *) &acm_header_desc,
+	(struct usb_descriptor_header *) &acm_call_mgmt_descriptor,
+	(struct usb_descriptor_header *) &acm_descriptor,
+	(struct usb_descriptor_header *) &acm_union_desc,
+	(struct usb_descriptor_header *) &acm_hs_notify_desc,
+	(struct usb_descriptor_header *) &acm_ss_bulk_comp_desc,
+	(struct usb_descriptor_header *) &acm_data_interface_desc,
+	(struct usb_descriptor_header *) &acm_ss_in_desc,
+	(struct usb_descriptor_header *) &acm_ss_bulk_comp_desc,
+	(struct usb_descriptor_header *) &acm_ss_out_desc,
+	(struct usb_descriptor_header *) &acm_ss_bulk_comp_desc,
+	NULL,
+};
+
+/* string descriptors: */
+
+#define ACM_CTRL_IDX	0
+#define ACM_DATA_IDX	1
+#define ACM_IAD_IDX	2
+
+/* static strings, in UTF-8 */
+static struct usb_string acm_string_defs[] = {
+	[ACM_CTRL_IDX].s = "CDC Abstract Control Model (ACM)",
+	[ACM_DATA_IDX].s = "CDC ACM Data",
+	[ACM_IAD_IDX ].s = "CDC Serial",
+	{  /* ZEROES END LIST */ },
+};
+
+static struct usb_gadget_strings acm_string_table = {
+	.language =		0x0409,	/* en-us */
+	.strings =		acm_string_defs,
+};
+
+static struct usb_gadget_strings *acm_strings[] = {
+	&acm_string_table,
+	NULL,
+};
+
+/*-------------------------------------------------------------------------*/
+
+/* ACM control ... data handling is delegated to tty library code.
+ * The main task of this function is to activate and deactivate
+ * that code based on device state; track parameters like line
+ * speed, handshake state, and so on; and issue notifications.
+ */
+
+static void acm_complete_set_line_coding(struct usb_ep *ep,
+		struct usb_request *req)
+{
+	struct f_acm	*acm = ep->driver_data;
+	struct usb_composite_dev *cdev = acm->port.func.config->cdev;
+
+	if (req->status != 0) {
+		DBG(cdev, "acm ttyGS%d completion, err %d\n",
+				acm->port_num, req->status);
+		return;
+	}
+
+	/* normal completion */
+	if (req->actual != sizeof(acm->port_line_coding)) {
+		DBG(cdev, "acm ttyGS%d short resp, len %d\n",
+				acm->port_num, req->actual);
+		usb_ep_set_halt(ep);
+	} else {
+		struct usb_cdc_line_coding	*value = req->buf;
+
+		/* REVISIT:  we currently just remember this data.
+		 * If we change that, (a) validate it first, then
+		 * (b) update whatever hardware needs updating,
+		 * (c) worry about locking.  This is information on
+		 * the order of 9600-8-N-1 ... most of which means
+		 * nothing unless we control a real RS232 line.
+		 */
+		acm->port_line_coding = *value;
+	}
+}
+
+static int acm_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+	struct f_acm		*acm = func_to_acm(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+	struct usb_request	*req = cdev->req;
+	int			value = -EOPNOTSUPP;
+	u16			w_index = le16_to_cpu(ctrl->wIndex);
+	u16			w_value = le16_to_cpu(ctrl->wValue);
+	u16			w_length = le16_to_cpu(ctrl->wLength);
+
+	/* composite driver infrastructure handles everything except
+	 * CDC class messages; interface activation uses set_alt().
+	 *
+	 * Note CDC spec table 4 lists the ACM request profile.  It requires
+	 * encapsulated command support ... we don't handle any, and respond
+	 * to them by stalling.  Options include get/set/clear comm features
+	 * (not that useful) and SEND_BREAK.
+	 */
+	switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
+
+	/* SET_LINE_CODING ... just read and save what the host sends */
+	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+			| USB_CDC_REQ_SET_LINE_CODING:
+		if (w_length != sizeof(struct usb_cdc_line_coding)
+				|| w_index != acm->ctrl_id)
+			goto invalid;
+
+		value = w_length;
+		cdev->gadget->ep0->driver_data = acm;
+		req->complete = acm_complete_set_line_coding;
+		break;
+
+	/* GET_LINE_CODING ... return what host sent, or initial value */
+	case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+			| USB_CDC_REQ_GET_LINE_CODING:
+		if (w_index != acm->ctrl_id)
+			goto invalid;
+
+		value = min_t(unsigned, w_length,
+				sizeof(struct usb_cdc_line_coding));
+		memcpy(req->buf, &acm->port_line_coding, value);
+		break;
+
+	/* SET_CONTROL_LINE_STATE ... save what the host sent */
+	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+			| USB_CDC_REQ_SET_CONTROL_LINE_STATE:
+		if (w_index != acm->ctrl_id)
+			goto invalid;
+
+		value = 0;
+
+		/* FIXME we should not allow data to flow until the
+		 * host sets the ACM_CTRL_DTR bit; and when it clears
+		 * that bit, we should return to that no-flow state.
+		 */
+		acm->port_handshake_bits = w_value;
+		break;
+
+	default:
+invalid:
+		VDBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+	}
+
+	/* respond with data transfer or status phase? */
+	if (value >= 0) {
+		DBG(cdev, "acm ttyGS%d req%02x.%02x v%04x i%04x l%d\n",
+			acm->port_num, ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+		req->zero = 0;
+		req->length = value;
+		value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+		if (value < 0)
+			ERROR(cdev, "acm response on ttyGS%d, err %d\n",
+					acm->port_num, value);
+	}
+
+	/* device either stalls (value < 0) or reports success */
+	return value;
+}
+
+static int acm_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+	struct f_acm		*acm = func_to_acm(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+
+	/* we know alt == 0, so this is an activation or a reset */
+
+	if (intf == acm->ctrl_id) {
+		if (acm->notify->driver_data) {
+			VDBG(cdev, "reset acm control interface %d\n", intf);
+			usb_ep_disable(acm->notify);
+		} else {
+			VDBG(cdev, "init acm ctrl interface %d\n", intf);
+			if (config_ep_by_speed(cdev->gadget, f, acm->notify))
+				return -EINVAL;
+		}
+		usb_ep_enable(acm->notify);
+		acm->notify->driver_data = acm;
+
+	} else if (intf == acm->data_id) {
+		if (acm->port.in->driver_data) {
+			DBG(cdev, "reset acm ttyGS%d\n", acm->port_num);
+			gserial_disconnect(&acm->port);
+		}
+		if (!acm->port.in->desc || !acm->port.out->desc) {
+			DBG(cdev, "activate acm ttyGS%d\n", acm->port_num);
+			if (config_ep_by_speed(cdev->gadget, f,
+					       acm->port.in) ||
+			    config_ep_by_speed(cdev->gadget, f,
+					       acm->port.out)) {
+				acm->port.in->desc = NULL;
+				acm->port.out->desc = NULL;
+				return -EINVAL;
+			}
+		}
+		gserial_connect(&acm->port, acm->port_num);
+
+	} else
+		return -EINVAL;
+
+	return 0;
+}
+
+static void acm_disable(struct usb_function *f)
+{
+	struct f_acm	*acm = func_to_acm(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+
+	DBG(cdev, "acm ttyGS%d deactivated\n", acm->port_num);
+	gserial_disconnect(&acm->port);
+	usb_ep_disable(acm->notify);
+	acm->notify->driver_data = NULL;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/**
+ * acm_cdc_notify - issue CDC notification to host
+ * @acm: wraps host to be notified
+ * @type: notification type
+ * @value: Refer to cdc specs, wValue field.
+ * @data: data to be sent
+ * @length: size of data
+ * Context: irqs blocked, acm->lock held, acm_notify_req non-null
+ *
+ * Returns zero on success or a negative errno.
+ *
+ * See section 6.3.5 of the CDC 1.1 specification for information
+ * about the only notification we issue:  SerialState change.
+ */
+static int acm_cdc_notify(struct f_acm *acm, u8 type, u16 value,
+		void *data, unsigned length)
+{
+	struct usb_ep			*ep = acm->notify;
+	struct usb_request		*req;
+	struct usb_cdc_notification	*notify;
+	const unsigned			len = sizeof(*notify) + length;
+	void				*buf;
+	int				status;
+
+	req = acm->notify_req;
+	acm->notify_req = NULL;
+	acm->pending = false;
+
+	req->length = len;
+	notify = req->buf;
+	buf = notify + 1;
+
+	notify->bmRequestType = USB_DIR_IN | USB_TYPE_CLASS
+			| USB_RECIP_INTERFACE;
+	notify->bNotificationType = type;
+	notify->wValue = cpu_to_le16(value);
+	notify->wIndex = cpu_to_le16(acm->ctrl_id);
+	notify->wLength = cpu_to_le16(length);
+	memcpy(buf, data, length);
+
+	/* ep_queue() can complete immediately if it fills the fifo... */
+	spin_unlock(&acm->lock);
+	status = usb_ep_queue(ep, req, GFP_ATOMIC);
+	spin_lock(&acm->lock);
+
+	if (status < 0) {
+		ERROR(acm->port.func.config->cdev,
+				"acm ttyGS%d can't notify serial state, %d\n",
+				acm->port_num, status);
+		acm->notify_req = req;
+	}
+
+	return status;
+}
+
+static int acm_notify_serial_state(struct f_acm *acm)
+{
+	struct usb_composite_dev *cdev = acm->port.func.config->cdev;
+	int			status;
+
+	spin_lock(&acm->lock);
+	if (acm->notify_req) {
+		DBG(cdev, "acm ttyGS%d serial state %04x\n",
+				acm->port_num, acm->serial_state);
+		status = acm_cdc_notify(acm, USB_CDC_NOTIFY_SERIAL_STATE,
+				0, &acm->serial_state, sizeof(acm->serial_state));
+	} else {
+		acm->pending = true;
+		status = 0;
+	}
+	spin_unlock(&acm->lock);
+	return status;
+}
+
+static void acm_cdc_notify_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct f_acm		*acm = req->context;
+	u8			doit = false;
+
+	/* on this call path we do NOT hold the port spinlock,
+	 * which is why ACM needs its own spinlock
+	 */
+	spin_lock(&acm->lock);
+	if (req->status != -ESHUTDOWN)
+		doit = acm->pending;
+	acm->notify_req = req;
+	spin_unlock(&acm->lock);
+
+	if (doit)
+		acm_notify_serial_state(acm);
+}
+
+/* connect == the TTY link is open */
+
+static void acm_connect(struct gserial *port)
+{
+	struct f_acm		*acm = port_to_acm(port);
+
+	acm->serial_state |= ACM_CTRL_DSR | ACM_CTRL_DCD;
+	acm_notify_serial_state(acm);
+}
+
+static void acm_disconnect(struct gserial *port)
+{
+	struct f_acm		*acm = port_to_acm(port);
+
+	acm->serial_state &= ~(ACM_CTRL_DSR | ACM_CTRL_DCD);
+	acm_notify_serial_state(acm);
+}
+
+static int acm_send_break(struct gserial *port, int duration)
+{
+	struct f_acm		*acm = port_to_acm(port);
+	u16			state;
+
+	state = acm->serial_state;
+	state &= ~ACM_CTRL_BRK;
+	if (duration)
+		state |= ACM_CTRL_BRK;
+
+	acm->serial_state = state;
+	return acm_notify_serial_state(acm);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* ACM function driver setup/binding */
+static int
+acm_bind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct usb_composite_dev *cdev = c->cdev;
+	struct f_acm		*acm = func_to_acm(f);
+	int			status;
+	struct usb_ep		*ep;
+
+	/* allocate instance-specific interface IDs, and patch descriptors */
+	status = usb_interface_id(c, f);
+	if (status < 0)
+		goto fail;
+	acm->ctrl_id = status;
+	acm_iad_descriptor.bFirstInterface = status;
+
+	acm_control_interface_desc.bInterfaceNumber = status;
+	acm_union_desc .bMasterInterface0 = status;
+
+	status = usb_interface_id(c, f);
+	if (status < 0)
+		goto fail;
+	acm->data_id = status;
+
+	acm_data_interface_desc.bInterfaceNumber = status;
+	acm_union_desc.bSlaveInterface0 = status;
+	acm_call_mgmt_descriptor.bDataInterface = status;
+
+	status = -ENODEV;
+
+	/* allocate instance-specific endpoints */
+	ep = usb_ep_autoconfig(cdev->gadget, &acm_fs_in_desc);
+	if (!ep)
+		goto fail;
+	acm->port.in = ep;
+	ep->driver_data = cdev;	/* claim */
+
+	ep = usb_ep_autoconfig(cdev->gadget, &acm_fs_out_desc);
+	if (!ep)
+		goto fail;
+	acm->port.out = ep;
+	ep->driver_data = cdev;	/* claim */
+
+	ep = usb_ep_autoconfig(cdev->gadget, &acm_fs_notify_desc);
+	if (!ep)
+		goto fail;
+	acm->notify = ep;
+	ep->driver_data = cdev;	/* claim */
+
+	/* allocate notification */
+	acm->notify_req = gs_alloc_req(ep,
+			sizeof(struct usb_cdc_notification) + 2,
+			GFP_KERNEL);
+	if (!acm->notify_req)
+		goto fail;
+
+	acm->notify_req->complete = acm_cdc_notify_complete;
+	acm->notify_req->context = acm;
+
+	/* copy descriptors */
+	f->descriptors = usb_copy_descriptors(acm_fs_function);
+	if (!f->descriptors)
+		goto fail;
+
+	/* support all relevant hardware speeds... we expect that when
+	 * hardware is dual speed, all bulk-capable endpoints work at
+	 * both speeds
+	 */
+	if (gadget_is_dualspeed(c->cdev->gadget)) {
+		acm_hs_in_desc.bEndpointAddress =
+				acm_fs_in_desc.bEndpointAddress;
+		acm_hs_out_desc.bEndpointAddress =
+				acm_fs_out_desc.bEndpointAddress;
+		acm_hs_notify_desc.bEndpointAddress =
+				acm_fs_notify_desc.bEndpointAddress;
+
+		/* copy descriptors */
+		f->hs_descriptors = usb_copy_descriptors(acm_hs_function);
+	}
+	if (gadget_is_superspeed(c->cdev->gadget)) {
+		acm_ss_in_desc.bEndpointAddress =
+			acm_fs_in_desc.bEndpointAddress;
+		acm_ss_out_desc.bEndpointAddress =
+			acm_fs_out_desc.bEndpointAddress;
+
+		/* copy descriptors, and track endpoint copies */
+		f->ss_descriptors = usb_copy_descriptors(acm_ss_function);
+		if (!f->ss_descriptors)
+			goto fail;
+	}
+
+	DBG(cdev, "acm ttyGS%d: %s speed IN/%s OUT/%s NOTIFY/%s\n",
+			acm->port_num,
+			gadget_is_superspeed(c->cdev->gadget) ? "super" :
+			gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+			acm->port.in->name, acm->port.out->name,
+			acm->notify->name);
+	return 0;
+
+fail:
+	if (acm->notify_req)
+		gs_free_req(acm->notify, acm->notify_req);
+
+	/* we might as well release our claims on endpoints */
+	if (acm->notify)
+		acm->notify->driver_data = NULL;
+	if (acm->port.out)
+		acm->port.out->driver_data = NULL;
+	if (acm->port.in)
+		acm->port.in->driver_data = NULL;
+
+	ERROR(cdev, "%s/%p: can't bind, err %d\n", f->name, f, status);
+
+	return status;
+}
+
+static void
+acm_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct f_acm		*acm = func_to_acm(f);
+
+	if (gadget_is_dualspeed(c->cdev->gadget))
+		usb_free_descriptors(f->hs_descriptors);
+	if (gadget_is_superspeed(c->cdev->gadget))
+		usb_free_descriptors(f->ss_descriptors);
+	usb_free_descriptors(f->descriptors);
+	gs_free_req(acm->notify, acm->notify_req);
+	kfree(acm);
+}
+
+/* Some controllers can't support CDC ACM ... */
+static inline bool can_support_cdc(struct usb_configuration *c)
+{
+	/* everything else is *probably* fine ... */
+	return true;
+}
+
+/**
+ * acm_bind_config - add a CDC ACM function to a configuration
+ * @c: the configuration to support the CDC ACM instance
+ * @port_num: /dev/ttyGS* port this interface will use
+ * Context: single threaded during gadget setup
+ *
+ * Returns zero on success, else negative errno.
+ *
+ * Caller must have called @gserial_setup() with enough ports to
+ * handle all the ones it binds.  Caller is also responsible
+ * for calling @gserial_cleanup() before module unload.
+ */
+int acm_bind_config(struct usb_configuration *c, u8 port_num)
+{
+	struct f_acm	*acm;
+	int		status;
+
+	if (!can_support_cdc(c))
+		return -EINVAL;
+
+	/* REVISIT might want instance-specific strings to help
+	 * distinguish instances ...
+	 */
+
+	/* maybe allocate device-global string IDs, and patch descriptors */
+	if (acm_string_defs[ACM_CTRL_IDX].id == 0) {
+		status = usb_string_id(c->cdev);
+		if (status < 0)
+			return status;
+		acm_string_defs[ACM_CTRL_IDX].id = status;
+
+		acm_control_interface_desc.iInterface = status;
+
+		status = usb_string_id(c->cdev);
+		if (status < 0)
+			return status;
+		acm_string_defs[ACM_DATA_IDX].id = status;
+
+		acm_data_interface_desc.iInterface = status;
+
+		status = usb_string_id(c->cdev);
+		if (status < 0)
+			return status;
+		acm_string_defs[ACM_IAD_IDX].id = status;
+
+		acm_iad_descriptor.iFunction = status;
+	}
+
+	/* allocate and initialize one new instance */
+	acm = kzalloc(sizeof *acm, GFP_KERNEL);
+	if (!acm)
+		return -ENOMEM;
+
+	spin_lock_init(&acm->lock);
+
+	acm->port_num = port_num;
+
+	acm->port.connect = acm_connect;
+	acm->port.disconnect = acm_disconnect;
+	acm->port.send_break = acm_send_break;
+
+	acm->port.func.name = "acm";
+	acm->port.func.strings = acm_strings;
+	/* descriptors are per-instance copies */
+	acm->port.func.bind = acm_bind;
+	acm->port.func.unbind = acm_unbind;
+	acm->port.func.set_alt = acm_set_alt;
+	acm->port.func.setup = acm_setup;
+	acm->port.func.disable = acm_disable;
+
+	status = usb_add_function(c, &acm->port.func);
+	if (status)
+		kfree(acm);
+	return status;
+}
diff --git a/drivers/staging/ccg/f_fs.c b/drivers/staging/ccg/f_fs.c
new file mode 100644
index 0000000..8adc79d
--- /dev/null
+++ b/drivers/staging/ccg/f_fs.c
@@ -0,0 +1,2455 @@
+/*
+ * f_fs.c -- user mode file system API for USB composite function controllers
+ *
+ * Copyright (C) 2010 Samsung Electronics
+ * Author: Michal Nazarewicz <mina86 at mina86.com>
+ *
+ * Based on inode.c (GadgetFS) which was:
+ * Copyright (C) 2003-2004 David Brownell
+ * Copyright (C) 2003 Agilent Technologies
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+
+/* #define DEBUG */
+/* #define VERBOSE_DEBUG */
+
+#include <linux/blkdev.h>
+#include <linux/pagemap.h>
+#include <linux/export.h>
+#include <linux/hid.h>
+#include <asm/unaligned.h>
+
+#include <linux/usb/composite.h>
+#include <linux/usb/functionfs.h>
+
+
+#define FUNCTIONFS_MAGIC	0xa647361 /* Chosen by a honest dice roll ;) */
+
+
+/* Debugging ****************************************************************/
+
+#ifdef VERBOSE_DEBUG
+#  define pr_vdebug pr_debug
+#  define ffs_dump_mem(prefix, ptr, len) \
+	print_hex_dump_bytes(pr_fmt(prefix ": "), DUMP_PREFIX_NONE, ptr, len)
+#else
+#  define pr_vdebug(...)                 do { } while (0)
+#  define ffs_dump_mem(prefix, ptr, len) do { } while (0)
+#endif /* VERBOSE_DEBUG */
+
+#define ENTER()    pr_vdebug("%s()\n", __func__)
+
+
+/* The data structure and setup file ****************************************/
+
+enum ffs_state {
+	/*
+	 * Waiting for descriptors and strings.
+	 *
+	 * In this state no open(2), read(2) or write(2) on epfiles
+	 * may succeed (which should not be the problem as there
+	 * should be no such files opened in the first place).
+	 */
+	FFS_READ_DESCRIPTORS,
+	FFS_READ_STRINGS,
+
+	/*
+	 * We've got descriptors and strings.  We are or have called
+	 * functionfs_ready_callback().  functionfs_bind() may have
+	 * been called but we don't know.
+	 *
+	 * This is the only state in which operations on epfiles may
+	 * succeed.
+	 */
+	FFS_ACTIVE,
+
+	/*
+	 * All endpoints have been closed.  This state is also set if
+	 * we encounter an unrecoverable error.  The only
+	 * unrecoverable error is situation when after reading strings
+	 * from user space we fail to initialise epfiles or
+	 * functionfs_ready_callback() returns with error (<0).
+	 *
+	 * In this state no open(2), read(2) or write(2) (both on ep0
+	 * as well as epfile) may succeed (at this point epfiles are
+	 * unlinked and all closed so this is not a problem; ep0 is
+	 * also closed but ep0 file exists and so open(2) on ep0 must
+	 * fail).
+	 */
+	FFS_CLOSING
+};
+
+
+enum ffs_setup_state {
+	/* There is no setup request pending. */
+	FFS_NO_SETUP,
+	/*
+	 * User has read events and there was a setup request event
+	 * there.  The next read/write on ep0 will handle the
+	 * request.
+	 */
+	FFS_SETUP_PENDING,
+	/*
+	 * There was event pending but before user space handled it
+	 * some other event was introduced which canceled existing
+	 * setup.  If this state is set read/write on ep0 return
+	 * -EIDRM.  This state is only set when adding event.
+	 */
+	FFS_SETUP_CANCELED
+};
+
+
+
+struct ffs_epfile;
+struct ffs_function;
+
+struct ffs_data {
+	struct usb_gadget		*gadget;
+
+	/*
+	 * Protect access read/write operations, only one read/write
+	 * at a time.  As a consequence protects ep0req and company.
+	 * While setup request is being processed (queued) this is
+	 * held.
+	 */
+	struct mutex			mutex;
+
+	/*
+	 * Protect access to endpoint related structures (basically
+	 * usb_ep_queue(), usb_ep_dequeue(), etc. calls) except for
+	 * endpoint zero.
+	 */
+	spinlock_t			eps_lock;
+
+	/*
+	 * XXX REVISIT do we need our own request? Since we are not
+	 * handling setup requests immediately user space may be so
+	 * slow that another setup will be sent to the gadget but this
+	 * time not to us but another function and then there could be
+	 * a race.  Is that the case? Or maybe we can use cdev->req
+	 * after all, maybe we just need some spinlock for that?
+	 */
+	struct usb_request		*ep0req;		/* P: mutex */
+	struct completion		ep0req_completion;	/* P: mutex */
+	int				ep0req_status;		/* P: mutex */
+
+	/* reference counter */
+	atomic_t			ref;
+	/* how many files are opened (EP0 and others) */
+	atomic_t			opened;
+
+	/* EP0 state */
+	enum ffs_state			state;
+
+	/*
+	 * Possible transitions:
+	 * + FFS_NO_SETUP       -> FFS_SETUP_PENDING  -- P: ev.waitq.lock
+	 *               happens only in ep0 read which is P: mutex
+	 * + FFS_SETUP_PENDING  -> FFS_NO_SETUP       -- P: ev.waitq.lock
+	 *               happens only in ep0 i/o  which is P: mutex
+	 * + FFS_SETUP_PENDING  -> FFS_SETUP_CANCELED -- P: ev.waitq.lock
+	 * + FFS_SETUP_CANCELED -> FFS_NO_SETUP       -- cmpxchg
+	 */
+	enum ffs_setup_state		setup_state;
+
+#define FFS_SETUP_STATE(ffs)					\
+	((enum ffs_setup_state)cmpxchg(&(ffs)->setup_state,	\
+				       FFS_SETUP_CANCELED, FFS_NO_SETUP))
+
+	/* Events & such. */
+	struct {
+		u8				types[4];
+		unsigned short			count;
+		/* XXX REVISIT need to update it in some places, or do we? */
+		unsigned short			can_stall;
+		struct usb_ctrlrequest		setup;
+
+		wait_queue_head_t		waitq;
+	} ev; /* the whole structure, P: ev.waitq.lock */
+
+	/* Flags */
+	unsigned long			flags;
+#define FFS_FL_CALL_CLOSED_CALLBACK 0
+#define FFS_FL_BOUND                1
+
+	/* Active function */
+	struct ffs_function		*func;
+
+	/*
+	 * Device name, write once when file system is mounted.
+	 * Intended for user to read if she wants.
+	 */
+	const char			*dev_name;
+	/* Private data for our user (ie. gadget).  Managed by user. */
+	void				*private_data;
+
+	/* filled by __ffs_data_got_descs() */
+	/*
+	 * Real descriptors are 16 bytes after raw_descs (so you need
+	 * to skip 16 bytes (ie. ffs->raw_descs + 16) to get to the
+	 * first full speed descriptor).  raw_descs_length and
+	 * raw_fs_descs_length do not have those 16 bytes added.
+	 */
+	const void			*raw_descs;
+	unsigned			raw_descs_length;
+	unsigned			raw_fs_descs_length;
+	unsigned			fs_descs_count;
+	unsigned			hs_descs_count;
+
+	unsigned short			strings_count;
+	unsigned short			interfaces_count;
+	unsigned short			eps_count;
+	unsigned short			_pad1;
+
+	/* filled by __ffs_data_got_strings() */
+	/* ids in stringtabs are set in functionfs_bind() */
+	const void			*raw_strings;
+	struct usb_gadget_strings	**stringtabs;
+
+	/*
+	 * File system's super block, write once when file system is
+	 * mounted.
+	 */
+	struct super_block		*sb;
+
+	/* File permissions, written once when fs is mounted */
+	struct ffs_file_perms {
+		umode_t				mode;
+		uid_t				uid;
+		gid_t				gid;
+	}				file_perms;
+
+	/*
+	 * The endpoint files, filled by ffs_epfiles_create(),
+	 * destroyed by ffs_epfiles_destroy().
+	 */
+	struct ffs_epfile		*epfiles;
+};
+
+/* Reference counter handling */
+static void ffs_data_get(struct ffs_data *ffs);
+static void ffs_data_put(struct ffs_data *ffs);
+/* Creates new ffs_data object. */
+static struct ffs_data *__must_check ffs_data_new(void) __attribute__((malloc));
+
+/* Opened counter handling. */
+static void ffs_data_opened(struct ffs_data *ffs);
+static void ffs_data_closed(struct ffs_data *ffs);
+
+/* Called with ffs->mutex held; take over ownership of data. */
+static int __must_check
+__ffs_data_got_descs(struct ffs_data *ffs, char *data, size_t len);
+static int __must_check
+__ffs_data_got_strings(struct ffs_data *ffs, char *data, size_t len);
+
+
+/* The function structure ***************************************************/
+
+struct ffs_ep;
+
+struct ffs_function {
+	struct usb_configuration	*conf;
+	struct usb_gadget		*gadget;
+	struct ffs_data			*ffs;
+
+	struct ffs_ep			*eps;
+	u8				eps_revmap[16];
+	short				*interfaces_nums;
+
+	struct usb_function		function;
+};
+
+
+static struct ffs_function *ffs_func_from_usb(struct usb_function *f)
+{
+	return container_of(f, struct ffs_function, function);
+}
+
+static void ffs_func_free(struct ffs_function *func);
+
+static void ffs_func_eps_disable(struct ffs_function *func);
+static int __must_check ffs_func_eps_enable(struct ffs_function *func);
+
+static int ffs_func_bind(struct usb_configuration *,
+			 struct usb_function *);
+static void ffs_func_unbind(struct usb_configuration *,
+			    struct usb_function *);
+static int ffs_func_set_alt(struct usb_function *, unsigned, unsigned);
+static void ffs_func_disable(struct usb_function *);
+static int ffs_func_setup(struct usb_function *,
+			  const struct usb_ctrlrequest *);
+static void ffs_func_suspend(struct usb_function *);
+static void ffs_func_resume(struct usb_function *);
+
+
+static int ffs_func_revmap_ep(struct ffs_function *func, u8 num);
+static int ffs_func_revmap_intf(struct ffs_function *func, u8 intf);
+
+
+/* The endpoints structures *************************************************/
+
+struct ffs_ep {
+	struct usb_ep			*ep;	/* P: ffs->eps_lock */
+	struct usb_request		*req;	/* P: epfile->mutex */
+
+	/* [0]: full speed, [1]: high speed */
+	struct usb_endpoint_descriptor	*descs[2];
+
+	u8				num;
+
+	int				status;	/* P: epfile->mutex */
+};
+
+struct ffs_epfile {
+	/* Protects ep->ep and ep->req. */
+	struct mutex			mutex;
+	wait_queue_head_t		wait;
+
+	struct ffs_data			*ffs;
+	struct ffs_ep			*ep;	/* P: ffs->eps_lock */
+
+	struct dentry			*dentry;
+
+	char				name[5];
+
+	unsigned char			in;	/* P: ffs->eps_lock */
+	unsigned char			isoc;	/* P: ffs->eps_lock */
+
+	unsigned char			_pad;
+};
+
+static int  __must_check ffs_epfiles_create(struct ffs_data *ffs);
+static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count);
+
+static struct inode *__must_check
+ffs_sb_create_file(struct super_block *sb, const char *name, void *data,
+		   const struct file_operations *fops,
+		   struct dentry **dentry_p);
+
+
+/* Misc helper functions ****************************************************/
+
+static int ffs_mutex_lock(struct mutex *mutex, unsigned nonblock)
+	__attribute__((warn_unused_result, nonnull));
+static char *ffs_prepare_buffer(const char * __user buf, size_t len)
+	__attribute__((warn_unused_result, nonnull));
+
+
+/* Control file aka ep0 *****************************************************/
+
+static void ffs_ep0_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct ffs_data *ffs = req->context;
+
+	complete_all(&ffs->ep0req_completion);
+}
+
+static int __ffs_ep0_queue_wait(struct ffs_data *ffs, char *data, size_t len)
+{
+	struct usb_request *req = ffs->ep0req;
+	int ret;
+
+	req->zero     = len < le16_to_cpu(ffs->ev.setup.wLength);
+
+	spin_unlock_irq(&ffs->ev.waitq.lock);
+
+	req->buf      = data;
+	req->length   = len;
+
+	/*
+	 * UDC layer requires to provide a buffer even for ZLP, but should
+	 * not use it at all. Let's provide some poisoned pointer to catch
+	 * possible bug in the driver.
+	 */
+	if (req->buf == NULL)
+		req->buf = (void *)0xDEADBABE;
+
+	INIT_COMPLETION(ffs->ep0req_completion);
+
+	ret = usb_ep_queue(ffs->gadget->ep0, req, GFP_ATOMIC);
+	if (unlikely(ret < 0))
+		return ret;
+
+	ret = wait_for_completion_interruptible(&ffs->ep0req_completion);
+	if (unlikely(ret)) {
+		usb_ep_dequeue(ffs->gadget->ep0, req);
+		return -EINTR;
+	}
+
+	ffs->setup_state = FFS_NO_SETUP;
+	return ffs->ep0req_status;
+}
+
+static int __ffs_ep0_stall(struct ffs_data *ffs)
+{
+	if (ffs->ev.can_stall) {
+		pr_vdebug("ep0 stall\n");
+		usb_ep_set_halt(ffs->gadget->ep0);
+		ffs->setup_state = FFS_NO_SETUP;
+		return -EL2HLT;
+	} else {
+		pr_debug("bogus ep0 stall!\n");
+		return -ESRCH;
+	}
+}
+
+static ssize_t ffs_ep0_write(struct file *file, const char __user *buf,
+			     size_t len, loff_t *ptr)
+{
+	struct ffs_data *ffs = file->private_data;
+	ssize_t ret;
+	char *data;
+
+	ENTER();
+
+	/* Fast check if setup was canceled */
+	if (FFS_SETUP_STATE(ffs) == FFS_SETUP_CANCELED)
+		return -EIDRM;
+
+	/* Acquire mutex */
+	ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);
+	if (unlikely(ret < 0))
+		return ret;
+
+	/* Check state */
+	switch (ffs->state) {
+	case FFS_READ_DESCRIPTORS:
+	case FFS_READ_STRINGS:
+		/* Copy data */
+		if (unlikely(len < 16)) {
+			ret = -EINVAL;
+			break;
+		}
+
+		data = ffs_prepare_buffer(buf, len);
+		if (IS_ERR(data)) {
+			ret = PTR_ERR(data);
+			break;
+		}
+
+		/* Handle data */
+		if (ffs->state == FFS_READ_DESCRIPTORS) {
+			pr_info("read descriptors\n");
+			ret = __ffs_data_got_descs(ffs, data, len);
+			if (unlikely(ret < 0))
+				break;
+
+			ffs->state = FFS_READ_STRINGS;
+			ret = len;
+		} else {
+			pr_info("read strings\n");
+			ret = __ffs_data_got_strings(ffs, data, len);
+			if (unlikely(ret < 0))
+				break;
+
+			ret = ffs_epfiles_create(ffs);
+			if (unlikely(ret)) {
+				ffs->state = FFS_CLOSING;
+				break;
+			}
+
+			ffs->state = FFS_ACTIVE;
+			mutex_unlock(&ffs->mutex);
+
+			ret = functionfs_ready_callback(ffs);
+			if (unlikely(ret < 0)) {
+				ffs->state = FFS_CLOSING;
+				return ret;
+			}
+
+			set_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags);
+			return len;
+		}
+		break;
+
+	case FFS_ACTIVE:
+		data = NULL;
+		/*
+		 * We're called from user space, we can use _irq
+		 * rather then _irqsave
+		 */
+		spin_lock_irq(&ffs->ev.waitq.lock);
+		switch (FFS_SETUP_STATE(ffs)) {
+		case FFS_SETUP_CANCELED:
+			ret = -EIDRM;
+			goto done_spin;
+
+		case FFS_NO_SETUP:
+			ret = -ESRCH;
+			goto done_spin;
+
+		case FFS_SETUP_PENDING:
+			break;
+		}
+
+		/* FFS_SETUP_PENDING */
+		if (!(ffs->ev.setup.bRequestType & USB_DIR_IN)) {
+			spin_unlock_irq(&ffs->ev.waitq.lock);
+			ret = __ffs_ep0_stall(ffs);
+			break;
+		}
+
+		/* FFS_SETUP_PENDING and not stall */
+		len = min(len, (size_t)le16_to_cpu(ffs->ev.setup.wLength));
+
+		spin_unlock_irq(&ffs->ev.waitq.lock);
+
+		data = ffs_prepare_buffer(buf, len);
+		if (IS_ERR(data)) {
+			ret = PTR_ERR(data);
+			break;
+		}
+
+		spin_lock_irq(&ffs->ev.waitq.lock);
+
+		/*
+		 * We are guaranteed to be still in FFS_ACTIVE state
+		 * but the state of setup could have changed from
+		 * FFS_SETUP_PENDING to FFS_SETUP_CANCELED so we need
+		 * to check for that.  If that happened we copied data
+		 * from user space in vain but it's unlikely.
+		 *
+		 * For sure we are not in FFS_NO_SETUP since this is
+		 * the only place FFS_SETUP_PENDING -> FFS_NO_SETUP
+		 * transition can be performed and it's protected by
+		 * mutex.
+		 */
+		if (FFS_SETUP_STATE(ffs) == FFS_SETUP_CANCELED) {
+			ret = -EIDRM;
+done_spin:
+			spin_unlock_irq(&ffs->ev.waitq.lock);
+		} else {
+			/* unlocks spinlock */
+			ret = __ffs_ep0_queue_wait(ffs, data, len);
+		}
+		kfree(data);
+		break;
+
+	default:
+		ret = -EBADFD;
+		break;
+	}
+
+	mutex_unlock(&ffs->mutex);
+	return ret;
+}
+
+static ssize_t __ffs_ep0_read_events(struct ffs_data *ffs, char __user *buf,
+				     size_t n)
+{
+	/*
+	 * We are holding ffs->ev.waitq.lock and ffs->mutex and we need
+	 * to release them.
+	 */
+	struct usb_functionfs_event events[n];
+	unsigned i = 0;
+
+	memset(events, 0, sizeof events);
+
+	do {
+		events[i].type = ffs->ev.types[i];
+		if (events[i].type == FUNCTIONFS_SETUP) {
+			events[i].u.setup = ffs->ev.setup;
+			ffs->setup_state = FFS_SETUP_PENDING;
+		}
+	} while (++i < n);
+
+	if (n < ffs->ev.count) {
+		ffs->ev.count -= n;
+		memmove(ffs->ev.types, ffs->ev.types + n,
+			ffs->ev.count * sizeof *ffs->ev.types);
+	} else {
+		ffs->ev.count = 0;
+	}
+
+	spin_unlock_irq(&ffs->ev.waitq.lock);
+	mutex_unlock(&ffs->mutex);
+
+	return unlikely(__copy_to_user(buf, events, sizeof events))
+		? -EFAULT : sizeof events;
+}
+
+static ssize_t ffs_ep0_read(struct file *file, char __user *buf,
+			    size_t len, loff_t *ptr)
+{
+	struct ffs_data *ffs = file->private_data;
+	char *data = NULL;
+	size_t n;
+	int ret;
+
+	ENTER();
+
+	/* Fast check if setup was canceled */
+	if (FFS_SETUP_STATE(ffs) == FFS_SETUP_CANCELED)
+		return -EIDRM;
+
+	/* Acquire mutex */
+	ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);
+	if (unlikely(ret < 0))
+		return ret;
+
+	/* Check state */
+	if (ffs->state != FFS_ACTIVE) {
+		ret = -EBADFD;
+		goto done_mutex;
+	}
+
+	/*
+	 * We're called from user space, we can use _irq rather then
+	 * _irqsave
+	 */
+	spin_lock_irq(&ffs->ev.waitq.lock);
+
+	switch (FFS_SETUP_STATE(ffs)) {
+	case FFS_SETUP_CANCELED:
+		ret = -EIDRM;
+		break;
+
+	case FFS_NO_SETUP:
+		n = len / sizeof(struct usb_functionfs_event);
+		if (unlikely(!n)) {
+			ret = -EINVAL;
+			break;
+		}
+
+		if ((file->f_flags & O_NONBLOCK) && !ffs->ev.count) {
+			ret = -EAGAIN;
+			break;
+		}
+
+		if (wait_event_interruptible_exclusive_locked_irq(ffs->ev.waitq,
+							ffs->ev.count)) {
+			ret = -EINTR;
+			break;
+		}
+
+		return __ffs_ep0_read_events(ffs, buf,
+					     min(n, (size_t)ffs->ev.count));
+
+	case FFS_SETUP_PENDING:
+		if (ffs->ev.setup.bRequestType & USB_DIR_IN) {
+			spin_unlock_irq(&ffs->ev.waitq.lock);
+			ret = __ffs_ep0_stall(ffs);
+			goto done_mutex;
+		}
+
+		len = min(len, (size_t)le16_to_cpu(ffs->ev.setup.wLength));
+
+		spin_unlock_irq(&ffs->ev.waitq.lock);
+
+		if (likely(len)) {
+			data = kmalloc(len, GFP_KERNEL);
+			if (unlikely(!data)) {
+				ret = -ENOMEM;
+				goto done_mutex;
+			}
+		}
+
+		spin_lock_irq(&ffs->ev.waitq.lock);
+
+		/* See ffs_ep0_write() */
+		if (FFS_SETUP_STATE(ffs) == FFS_SETUP_CANCELED) {
+			ret = -EIDRM;
+			break;
+		}
+
+		/* unlocks spinlock */
+		ret = __ffs_ep0_queue_wait(ffs, data, len);
+		if (likely(ret > 0) && unlikely(__copy_to_user(buf, data, len)))
+			ret = -EFAULT;
+		goto done_mutex;
+
+	default:
+		ret = -EBADFD;
+		break;
+	}
+
+	spin_unlock_irq(&ffs->ev.waitq.lock);
+done_mutex:
+	mutex_unlock(&ffs->mutex);
+	kfree(data);
+	return ret;
+}
+
+static int ffs_ep0_open(struct inode *inode, struct file *file)
+{
+	struct ffs_data *ffs = inode->i_private;
+
+	ENTER();
+
+	if (unlikely(ffs->state == FFS_CLOSING))
+		return -EBUSY;
+
+	file->private_data = ffs;
+	ffs_data_opened(ffs);
+
+	return 0;
+}
+
+static int ffs_ep0_release(struct inode *inode, struct file *file)
+{
+	struct ffs_data *ffs = file->private_data;
+
+	ENTER();
+
+	ffs_data_closed(ffs);
+
+	return 0;
+}
+
+static long ffs_ep0_ioctl(struct file *file, unsigned code, unsigned long value)
+{
+	struct ffs_data *ffs = file->private_data;
+	struct usb_gadget *gadget = ffs->gadget;
+	long ret;
+
+	ENTER();
+
+	if (code == FUNCTIONFS_INTERFACE_REVMAP) {
+		struct ffs_function *func = ffs->func;
+		ret = func ? ffs_func_revmap_intf(func, value) : -ENODEV;
+	} else if (gadget && gadget->ops->ioctl) {
+		ret = gadget->ops->ioctl(gadget, code, value);
+	} else {
+		ret = -ENOTTY;
+	}
+
+	return ret;
+}
+
+static const struct file_operations ffs_ep0_operations = {
+	.owner =	THIS_MODULE,
+	.llseek =	no_llseek,
+
+	.open =		ffs_ep0_open,
+	.write =	ffs_ep0_write,
+	.read =		ffs_ep0_read,
+	.release =	ffs_ep0_release,
+	.unlocked_ioctl =	ffs_ep0_ioctl,
+};
+
+
+/* "Normal" endpoints operations ********************************************/
+
+static void ffs_epfile_io_complete(struct usb_ep *_ep, struct usb_request *req)
+{
+	ENTER();
+	if (likely(req->context)) {
+		struct ffs_ep *ep = _ep->driver_data;
+		ep->status = req->status ? req->status : req->actual;
+		complete(req->context);
+	}
+}
+
+static ssize_t ffs_epfile_io(struct file *file,
+			     char __user *buf, size_t len, int read)
+{
+	struct ffs_epfile *epfile = file->private_data;
+	struct ffs_ep *ep;
+	char *data = NULL;
+	ssize_t ret;
+	int halt;
+
+	goto first_try;
+	do {
+		spin_unlock_irq(&epfile->ffs->eps_lock);
+		mutex_unlock(&epfile->mutex);
+
+first_try:
+		/* Are we still active? */
+		if (WARN_ON(epfile->ffs->state != FFS_ACTIVE)) {
+			ret = -ENODEV;
+			goto error;
+		}
+
+		/* Wait for endpoint to be enabled */
+		ep = epfile->ep;
+		if (!ep) {
+			if (file->f_flags & O_NONBLOCK) {
+				ret = -EAGAIN;
+				goto error;
+			}
+
+			if (wait_event_interruptible(epfile->wait,
+						     (ep = epfile->ep))) {
+				ret = -EINTR;
+				goto error;
+			}
+		}
+
+		/* Do we halt? */
+		halt = !read == !epfile->in;
+		if (halt && epfile->isoc) {
+			ret = -EINVAL;
+			goto error;
+		}
+
+		/* Allocate & copy */
+		if (!halt && !data) {
+			data = kzalloc(len, GFP_KERNEL);
+			if (unlikely(!data))
+				return -ENOMEM;
+
+			if (!read &&
+			    unlikely(__copy_from_user(data, buf, len))) {
+				ret = -EFAULT;
+				goto error;
+			}
+		}
+
+		/* We will be using request */
+		ret = ffs_mutex_lock(&epfile->mutex,
+				     file->f_flags & O_NONBLOCK);
+		if (unlikely(ret))
+			goto error;
+
+		/*
+		 * We're called from user space, we can use _irq rather then
+		 * _irqsave
+		 */
+		spin_lock_irq(&epfile->ffs->eps_lock);
+
+		/*
+		 * While we were acquiring mutex endpoint got disabled
+		 * or changed?
+		 */
+	} while (unlikely(epfile->ep != ep));
+
+	/* Halt */
+	if (unlikely(halt)) {
+		if (likely(epfile->ep == ep) && !WARN_ON(!ep->ep))
+			usb_ep_set_halt(ep->ep);
+		spin_unlock_irq(&epfile->ffs->eps_lock);
+		ret = -EBADMSG;
+	} else {
+		/* Fire the request */
+		DECLARE_COMPLETION_ONSTACK(done);
+
+		struct usb_request *req = ep->req;
+		req->context  = &done;
+		req->complete = ffs_epfile_io_complete;
+		req->buf      = data;
+		req->length   = len;
+
+		ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
+
+		spin_unlock_irq(&epfile->ffs->eps_lock);
+
+		if (unlikely(ret < 0)) {
+			/* nop */
+		} else if (unlikely(wait_for_completion_interruptible(&done))) {
+			ret = -EINTR;
+			usb_ep_dequeue(ep->ep, req);
+		} else {
+			ret = ep->status;
+			if (read && ret > 0 &&
+			    unlikely(copy_to_user(buf, data, ret)))
+				ret = -EFAULT;
+		}
+	}
+
+	mutex_unlock(&epfile->mutex);
+error:
+	kfree(data);
+	return ret;
+}
+
+static ssize_t
+ffs_epfile_write(struct file *file, const char __user *buf, size_t len,
+		 loff_t *ptr)
+{
+	ENTER();
+
+	return ffs_epfile_io(file, (char __user *)buf, len, 0);
+}
+
+static ssize_t
+ffs_epfile_read(struct file *file, char __user *buf, size_t len, loff_t *ptr)
+{
+	ENTER();
+
+	return ffs_epfile_io(file, buf, len, 1);
+}
+
+static int
+ffs_epfile_open(struct inode *inode, struct file *file)
+{
+	struct ffs_epfile *epfile = inode->i_private;
+
+	ENTER();
+
+	if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
+		return -ENODEV;
+
+	file->private_data = epfile;
+	ffs_data_opened(epfile->ffs);
+
+	return 0;
+}
+
+static int
+ffs_epfile_release(struct inode *inode, struct file *file)
+{
+	struct ffs_epfile *epfile = inode->i_private;
+
+	ENTER();
+
+	ffs_data_closed(epfile->ffs);
+
+	return 0;
+}
+
+static long ffs_epfile_ioctl(struct file *file, unsigned code,
+			     unsigned long value)
+{
+	struct ffs_epfile *epfile = file->private_data;
+	int ret;
+
+	ENTER();
+
+	if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
+		return -ENODEV;
+
+	spin_lock_irq(&epfile->ffs->eps_lock);
+	if (likely(epfile->ep)) {
+		switch (code) {
+		case FUNCTIONFS_FIFO_STATUS:
+			ret = usb_ep_fifo_status(epfile->ep->ep);
+			break;
+		case FUNCTIONFS_FIFO_FLUSH:
+			usb_ep_fifo_flush(epfile->ep->ep);
+			ret = 0;
+			break;
+		case FUNCTIONFS_CLEAR_HALT:
+			ret = usb_ep_clear_halt(epfile->ep->ep);
+			break;
+		case FUNCTIONFS_ENDPOINT_REVMAP:
+			ret = epfile->ep->num;
+			break;
+		default:
+			ret = -ENOTTY;
+		}
+	} else {
+		ret = -ENODEV;
+	}
+	spin_unlock_irq(&epfile->ffs->eps_lock);
+
+	return ret;
+}
+
+static const struct file_operations ffs_epfile_operations = {
+	.owner =	THIS_MODULE,
+	.llseek =	no_llseek,
+
+	.open =		ffs_epfile_open,
+	.write =	ffs_epfile_write,
+	.read =		ffs_epfile_read,
+	.release =	ffs_epfile_release,
+	.unlocked_ioctl =	ffs_epfile_ioctl,
+};
+
+
+/* File system and super block operations ***********************************/
+
+/*
+ * Mounting the file system creates a controller file, used first for
+ * function configuration then later for event monitoring.
+ */
+
+static struct inode *__must_check
+ffs_sb_make_inode(struct super_block *sb, void *data,
+		  const struct file_operations *fops,
+		  const struct inode_operations *iops,
+		  struct ffs_file_perms *perms)
+{
+	struct inode *inode;
+
+	ENTER();
+
+	inode = new_inode(sb);
+
+	if (likely(inode)) {
+		struct timespec current_time = CURRENT_TIME;
+
+		inode->i_ino	 = get_next_ino();
+		inode->i_mode    = perms->mode;
+		inode->i_uid     = perms->uid;
+		inode->i_gid     = perms->gid;
+		inode->i_atime   = current_time;
+		inode->i_mtime   = current_time;
+		inode->i_ctime   = current_time;
+		inode->i_private = data;
+		if (fops)
+			inode->i_fop = fops;
+		if (iops)
+			inode->i_op  = iops;
+	}
+
+	return inode;
+}
+
+/* Create "regular" file */
+static struct inode *ffs_sb_create_file(struct super_block *sb,
+					const char *name, void *data,
+					const struct file_operations *fops,
+					struct dentry **dentry_p)
+{
+	struct ffs_data	*ffs = sb->s_fs_info;
+	struct dentry	*dentry;
+	struct inode	*inode;
+
+	ENTER();
+
+	dentry = d_alloc_name(sb->s_root, name);
+	if (unlikely(!dentry))
+		return NULL;
+
+	inode = ffs_sb_make_inode(sb, data, fops, NULL, &ffs->file_perms);
+	if (unlikely(!inode)) {
+		dput(dentry);
+		return NULL;
+	}
+
+	d_add(dentry, inode);
+	if (dentry_p)
+		*dentry_p = dentry;
+
+	return inode;
+}
+
+/* Super block */
+static const struct super_operations ffs_sb_operations = {
+	.statfs =	simple_statfs,
+	.drop_inode =	generic_delete_inode,
+};
+
+struct ffs_sb_fill_data {
+	struct ffs_file_perms perms;
+	umode_t root_mode;
+	const char *dev_name;
+	union {
+		/* set by ffs_fs_mount(), read by ffs_sb_fill() */
+		void *private_data;
+		/* set by ffs_sb_fill(), read by ffs_fs_mount */
+		struct ffs_data *ffs_data;
+	};
+};
+
+static int ffs_sb_fill(struct super_block *sb, void *_data, int silent)
+{
+	struct ffs_sb_fill_data *data = _data;
+	struct inode	*inode;
+	struct ffs_data	*ffs;
+
+	ENTER();
+
+	/* Initialise data */
+	ffs = ffs_data_new();
+	if (unlikely(!ffs))
+		goto Enomem;
+
+	ffs->sb              = sb;
+	ffs->dev_name        = kstrdup(data->dev_name, GFP_KERNEL);
+	if (unlikely(!ffs->dev_name))
+		goto Enomem;
+	ffs->file_perms      = data->perms;
+	ffs->private_data    = data->private_data;
+
+	/* used by the caller of this function */
+	data->ffs_data       = ffs;
+
+	sb->s_fs_info        = ffs;
+	sb->s_blocksize      = PAGE_CACHE_SIZE;
+	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+	sb->s_magic          = FUNCTIONFS_MAGIC;
+	sb->s_op             = &ffs_sb_operations;
+	sb->s_time_gran      = 1;
+
+	/* Root inode */
+	data->perms.mode = data->root_mode;
+	inode = ffs_sb_make_inode(sb, NULL,
+				  &simple_dir_operations,
+				  &simple_dir_inode_operations,
+				  &data->perms);
+	sb->s_root = d_make_root(inode);
+	if (unlikely(!sb->s_root))
+		goto Enomem;
+
+	/* EP0 file */
+	if (unlikely(!ffs_sb_create_file(sb, "ep0", ffs,
+					 &ffs_ep0_operations, NULL)))
+		goto Enomem;
+
+	return 0;
+
+Enomem:
+	return -ENOMEM;
+}
+
+static int ffs_fs_parse_opts(struct ffs_sb_fill_data *data, char *opts)
+{
+	ENTER();
+
+	if (!opts || !*opts)
+		return 0;
+
+	for (;;) {
+		char *end, *eq, *comma;
+		unsigned long value;
+
+		/* Option limit */
+		comma = strchr(opts, ',');
+		if (comma)
+			*comma = 0;
+
+		/* Value limit */
+		eq = strchr(opts, '=');
+		if (unlikely(!eq)) {
+			pr_err("'=' missing in %s\n", opts);
+			return -EINVAL;
+		}
+		*eq = 0;
+
+		/* Parse value */
+		value = simple_strtoul(eq + 1, &end, 0);
+		if (unlikely(*end != ',' && *end != 0)) {
+			pr_err("%s: invalid value: %s\n", opts, eq + 1);
+			return -EINVAL;
+		}
+
+		/* Interpret option */
+		switch (eq - opts) {
+		case 5:
+			if (!memcmp(opts, "rmode", 5))
+				data->root_mode  = (value & 0555) | S_IFDIR;
+			else if (!memcmp(opts, "fmode", 5))
+				data->perms.mode = (value & 0666) | S_IFREG;
+			else
+				goto invalid;
+			break;
+
+		case 4:
+			if (!memcmp(opts, "mode", 4)) {
+				data->root_mode  = (value & 0555) | S_IFDIR;
+				data->perms.mode = (value & 0666) | S_IFREG;
+			} else {
+				goto invalid;
+			}
+			break;
+
+		case 3:
+			if (!memcmp(opts, "uid", 3))
+				data->perms.uid = value;
+			else if (!memcmp(opts, "gid", 3))
+				data->perms.gid = value;
+			else
+				goto invalid;
+			break;
+
+		default:
+invalid:
+			pr_err("%s: invalid option\n", opts);
+			return -EINVAL;
+		}
+
+		/* Next iteration */
+		if (!comma)
+			break;
+		opts = comma + 1;
+	}
+
+	return 0;
+}
+
+/* "mount -t functionfs dev_name /dev/function" ends up here */
+
+static struct dentry *
+ffs_fs_mount(struct file_system_type *t, int flags,
+	      const char *dev_name, void *opts)
+{
+	struct ffs_sb_fill_data data = {
+		.perms = {
+			.mode = S_IFREG | 0600,
+			.uid = 0,
+			.gid = 0
+		},
+		.root_mode = S_IFDIR | 0500,
+	};
+	struct dentry *rv;
+	int ret;
+	void *ffs_dev;
+
+	ENTER();
+
+	ret = ffs_fs_parse_opts(&data, opts);
+	if (unlikely(ret < 0))
+		return ERR_PTR(ret);
+
+	ffs_dev = functionfs_acquire_dev_callback(dev_name);
+	if (IS_ERR(ffs_dev))
+		return ffs_dev;
+
+	data.dev_name = dev_name;
+	data.private_data = ffs_dev;
+	rv = mount_nodev(t, flags, &data, ffs_sb_fill);
+
+	/* data.ffs_data is set by ffs_sb_fill */
+	if (IS_ERR(rv))
+		functionfs_release_dev_callback(data.ffs_data);
+
+	return rv;
+}
+
+static void
+ffs_fs_kill_sb(struct super_block *sb)
+{
+	ENTER();
+
+	kill_litter_super(sb);
+	if (sb->s_fs_info) {
+		functionfs_release_dev_callback(sb->s_fs_info);
+		ffs_data_put(sb->s_fs_info);
+	}
+}
+
+static struct file_system_type ffs_fs_type = {
+	.owner		= THIS_MODULE,
+	.name		= "functionfs",
+	.mount		= ffs_fs_mount,
+	.kill_sb	= ffs_fs_kill_sb,
+};
+
+
+/* Driver's main init/cleanup functions *************************************/
+
+static int functionfs_init(void)
+{
+	int ret;
+
+	ENTER();
+
+	ret = register_filesystem(&ffs_fs_type);
+	if (likely(!ret))
+		pr_info("file system registered\n");
+	else
+		pr_err("failed registering file system (%d)\n", ret);
+
+	return ret;
+}
+
+static void functionfs_cleanup(void)
+{
+	ENTER();
+
+	pr_info("unloading\n");
+	unregister_filesystem(&ffs_fs_type);
+}
+
+
+/* ffs_data and ffs_function construction and destruction code **************/
+
+static void ffs_data_clear(struct ffs_data *ffs);
+static void ffs_data_reset(struct ffs_data *ffs);
+
+static void ffs_data_get(struct ffs_data *ffs)
+{
+	ENTER();
+
+	atomic_inc(&ffs->ref);
+}
+
+static void ffs_data_opened(struct ffs_data *ffs)
+{
+	ENTER();
+
+	atomic_inc(&ffs->ref);
+	atomic_inc(&ffs->opened);
+}
+
+static void ffs_data_put(struct ffs_data *ffs)
+{
+	ENTER();
+
+	if (unlikely(atomic_dec_and_test(&ffs->ref))) {
+		pr_info("%s(): freeing\n", __func__);
+		ffs_data_clear(ffs);
+		BUG_ON(waitqueue_active(&ffs->ev.waitq) ||
+		       waitqueue_active(&ffs->ep0req_completion.wait));
+		kfree(ffs->dev_name);
+		kfree(ffs);
+	}
+}
+
+static void ffs_data_closed(struct ffs_data *ffs)
+{
+	ENTER();
+
+	if (atomic_dec_and_test(&ffs->opened)) {
+		ffs->state = FFS_CLOSING;
+		ffs_data_reset(ffs);
+	}
+
+	ffs_data_put(ffs);
+}
+
+static struct ffs_data *ffs_data_new(void)
+{
+	struct ffs_data *ffs = kzalloc(sizeof *ffs, GFP_KERNEL);
+	if (unlikely(!ffs))
+		return 0;
+
+	ENTER();
+
+	atomic_set(&ffs->ref, 1);
+	atomic_set(&ffs->opened, 0);
+	ffs->state = FFS_READ_DESCRIPTORS;
+	mutex_init(&ffs->mutex);
+	spin_lock_init(&ffs->eps_lock);
+	init_waitqueue_head(&ffs->ev.waitq);
+	init_completion(&ffs->ep0req_completion);
+
+	/* XXX REVISIT need to update it in some places, or do we? */
+	ffs->ev.can_stall = 1;
+
+	return ffs;
+}
+
+static void ffs_data_clear(struct ffs_data *ffs)
+{
+	ENTER();
+
+	if (test_and_clear_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags))
+		functionfs_closed_callback(ffs);
+
+	BUG_ON(ffs->gadget);
+
+	if (ffs->epfiles)
+		ffs_epfiles_destroy(ffs->epfiles, ffs->eps_count);
+
+	kfree(ffs->raw_descs);
+	kfree(ffs->raw_strings);
+	kfree(ffs->stringtabs);
+}
+
+static void ffs_data_reset(struct ffs_data *ffs)
+{
+	ENTER();
+
+	ffs_data_clear(ffs);
+
+	ffs->epfiles = NULL;
+	ffs->raw_descs = NULL;
+	ffs->raw_strings = NULL;
+	ffs->stringtabs = NULL;
+
+	ffs->raw_descs_length = 0;
+	ffs->raw_fs_descs_length = 0;
+	ffs->fs_descs_count = 0;
+	ffs->hs_descs_count = 0;
+
+	ffs->strings_count = 0;
+	ffs->interfaces_count = 0;
+	ffs->eps_count = 0;
+
+	ffs->ev.count = 0;
+
+	ffs->state = FFS_READ_DESCRIPTORS;
+	ffs->setup_state = FFS_NO_SETUP;
+	ffs->flags = 0;
+}
+
+
+static int functionfs_bind(struct ffs_data *ffs, struct usb_composite_dev *cdev)
+{
+	struct usb_gadget_strings **lang;
+	int first_id;
+
+	ENTER();
+
+	if (WARN_ON(ffs->state != FFS_ACTIVE
+		 || test_and_set_bit(FFS_FL_BOUND, &ffs->flags)))
+		return -EBADFD;
+
+	first_id = usb_string_ids_n(cdev, ffs->strings_count);
+	if (unlikely(first_id < 0))
+		return first_id;
+
+	ffs->ep0req = usb_ep_alloc_request(cdev->gadget->ep0, GFP_KERNEL);
+	if (unlikely(!ffs->ep0req))
+		return -ENOMEM;
+	ffs->ep0req->complete = ffs_ep0_complete;
+	ffs->ep0req->context = ffs;
+
+	lang = ffs->stringtabs;
+	for (lang = ffs->stringtabs; *lang; ++lang) {
+		struct usb_string *str = (*lang)->strings;
+		int id = first_id;
+		for (; str->s; ++id, ++str)
+			str->id = id;
+	}
+
+	ffs->gadget = cdev->gadget;
+	ffs_data_get(ffs);
+	return 0;
+}
+
+static void functionfs_unbind(struct ffs_data *ffs)
+{
+	ENTER();
+
+	if (!WARN_ON(!ffs->gadget)) {
+		usb_ep_free_request(ffs->gadget->ep0, ffs->ep0req);
+		ffs->ep0req = NULL;
+		ffs->gadget = NULL;
+		ffs_data_put(ffs);
+		clear_bit(FFS_FL_BOUND, &ffs->flags);
+	}
+}
+
+static int ffs_epfiles_create(struct ffs_data *ffs)
+{
+	struct ffs_epfile *epfile, *epfiles;
+	unsigned i, count;
+
+	ENTER();
+
+	count = ffs->eps_count;
+	epfiles = kcalloc(count, sizeof(*epfiles), GFP_KERNEL);
+	if (!epfiles)
+		return -ENOMEM;
+
+	epfile = epfiles;
+	for (i = 1; i <= count; ++i, ++epfile) {
+		epfile->ffs = ffs;
+		mutex_init(&epfile->mutex);
+		init_waitqueue_head(&epfile->wait);
+		sprintf(epfiles->name, "ep%u",  i);
+		if (!unlikely(ffs_sb_create_file(ffs->sb, epfiles->name, epfile,
+						 &ffs_epfile_operations,
+						 &epfile->dentry))) {
+			ffs_epfiles_destroy(epfiles, i - 1);
+			return -ENOMEM;
+		}
+	}
+
+	ffs->epfiles = epfiles;
+	return 0;
+}
+
+static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count)
+{
+	struct ffs_epfile *epfile = epfiles;
+
+	ENTER();
+
+	for (; count; --count, ++epfile) {
+		BUG_ON(mutex_is_locked(&epfile->mutex) ||
+		       waitqueue_active(&epfile->wait));
+		if (epfile->dentry) {
+			d_delete(epfile->dentry);
+			dput(epfile->dentry);
+			epfile->dentry = NULL;
+		}
+	}
+
+	kfree(epfiles);
+}
+
+static int functionfs_bind_config(struct usb_composite_dev *cdev,
+				  struct usb_configuration *c,
+				  struct ffs_data *ffs)
+{
+	struct ffs_function *func;
+	int ret;
+
+	ENTER();
+
+	func = kzalloc(sizeof *func, GFP_KERNEL);
+	if (unlikely(!func))
+		return -ENOMEM;
+
+	func->function.name    = "Function FS Gadget";
+	func->function.strings = ffs->stringtabs;
+
+	func->function.bind    = ffs_func_bind;
+	func->function.unbind  = ffs_func_unbind;
+	func->function.set_alt = ffs_func_set_alt;
+	func->function.disable = ffs_func_disable;
+	func->function.setup   = ffs_func_setup;
+	func->function.suspend = ffs_func_suspend;
+	func->function.resume  = ffs_func_resume;
+
+	func->conf   = c;
+	func->gadget = cdev->gadget;
+	func->ffs = ffs;
+	ffs_data_get(ffs);
+
+	ret = usb_add_function(c, &func->function);
+	if (unlikely(ret))
+		ffs_func_free(func);
+
+	return ret;
+}
+
+static void ffs_func_free(struct ffs_function *func)
+{
+	struct ffs_ep *ep         = func->eps;
+	unsigned count            = func->ffs->eps_count;
+	unsigned long flags;
+
+	ENTER();
+
+	/* cleanup after autoconfig */
+	spin_lock_irqsave(&func->ffs->eps_lock, flags);
+	do {
+		if (ep->ep && ep->req)
+			usb_ep_free_request(ep->ep, ep->req);
+		ep->req = NULL;
+		++ep;
+	} while (--count);
+	spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
+
+	ffs_data_put(func->ffs);
+
+	kfree(func->eps);
+	/*
+	 * eps and interfaces_nums are allocated in the same chunk so
+	 * only one free is required.  Descriptors are also allocated
+	 * in the same chunk.
+	 */
+
+	kfree(func);
+}
+
+static void ffs_func_eps_disable(struct ffs_function *func)
+{
+	struct ffs_ep *ep         = func->eps;
+	struct ffs_epfile *epfile = func->ffs->epfiles;
+	unsigned count            = func->ffs->eps_count;
+	unsigned long flags;
+
+	spin_lock_irqsave(&func->ffs->eps_lock, flags);
+	do {
+		/* pending requests get nuked */
+		if (likely(ep->ep))
+			usb_ep_disable(ep->ep);
+		epfile->ep = NULL;
+
+		++ep;
+		++epfile;
+	} while (--count);
+	spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
+}
+
+static int ffs_func_eps_enable(struct ffs_function *func)
+{
+	struct ffs_data *ffs      = func->ffs;
+	struct ffs_ep *ep         = func->eps;
+	struct ffs_epfile *epfile = ffs->epfiles;
+	unsigned count            = ffs->eps_count;
+	unsigned long flags;
+	int ret = 0;
+
+	spin_lock_irqsave(&func->ffs->eps_lock, flags);
+	do {
+		struct usb_endpoint_descriptor *ds;
+		ds = ep->descs[ep->descs[1] ? 1 : 0];
+
+		ep->ep->driver_data = ep;
+		ep->ep->desc = ds;
+		ret = usb_ep_enable(ep->ep);
+		if (likely(!ret)) {
+			epfile->ep = ep;
+			epfile->in = usb_endpoint_dir_in(ds);
+			epfile->isoc = usb_endpoint_xfer_isoc(ds);
+		} else {
+			break;
+		}
+
+		wake_up(&epfile->wait);
+
+		++ep;
+		++epfile;
+	} while (--count);
+	spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
+
+	return ret;
+}
+
+
+/* Parsing and building descriptors and strings *****************************/
+
+/*
+ * This validates if data pointed by data is a valid USB descriptor as
+ * well as record how many interfaces, endpoints and strings are
+ * required by given configuration.  Returns address after the
+ * descriptor or NULL if data is invalid.
+ */
+
+enum ffs_entity_type {
+	FFS_DESCRIPTOR, FFS_INTERFACE, FFS_STRING, FFS_ENDPOINT
+};
+
+typedef int (*ffs_entity_callback)(enum ffs_entity_type entity,
+				   u8 *valuep,
+				   struct usb_descriptor_header *desc,
+				   void *priv);
+
+static int __must_check ffs_do_desc(char *data, unsigned len,
+				    ffs_entity_callback entity, void *priv)
+{
+	struct usb_descriptor_header *_ds = (void *)data;
+	u8 length;
+	int ret;
+
+	ENTER();
+
+	/* At least two bytes are required: length and type */
+	if (len < 2) {
+		pr_vdebug("descriptor too short\n");
+		return -EINVAL;
+	}
+
+	/* If we have at least as many bytes as the descriptor takes? */
+	length = _ds->bLength;
+	if (len < length) {
+		pr_vdebug("descriptor longer then available data\n");
+		return -EINVAL;
+	}
+
+#define __entity_check_INTERFACE(val)  1
+#define __entity_check_STRING(val)     (val)
+#define __entity_check_ENDPOINT(val)   ((val) & USB_ENDPOINT_NUMBER_MASK)
+#define __entity(type, val) do {					\
+		pr_vdebug("entity " #type "(%02x)\n", (val));		\
+		if (unlikely(!__entity_check_ ##type(val))) {		\
+			pr_vdebug("invalid entity's value\n");		\
+			return -EINVAL;					\
+		}							\
+		ret = entity(FFS_ ##type, &val, _ds, priv);		\
+		if (unlikely(ret < 0)) {				\
+			pr_debug("entity " #type "(%02x); ret = %d\n",	\
+				 (val), ret);				\
+			return ret;					\
+		}							\
+	} while (0)
+
+	/* Parse descriptor depending on type. */
+	switch (_ds->bDescriptorType) {
+	case USB_DT_DEVICE:
+	case USB_DT_CONFIG:
+	case USB_DT_STRING:
+	case USB_DT_DEVICE_QUALIFIER:
+		/* function can't have any of those */
+		pr_vdebug("descriptor reserved for gadget: %d\n",
+		      _ds->bDescriptorType);
+		return -EINVAL;
+
+	case USB_DT_INTERFACE: {
+		struct usb_interface_descriptor *ds = (void *)_ds;
+		pr_vdebug("interface descriptor\n");
+		if (length != sizeof *ds)
+			goto inv_length;
+
+		__entity(INTERFACE, ds->bInterfaceNumber);
+		if (ds->iInterface)
+			__entity(STRING, ds->iInterface);
+	}
+		break;
+
+	case USB_DT_ENDPOINT: {
+		struct usb_endpoint_descriptor *ds = (void *)_ds;
+		pr_vdebug("endpoint descriptor\n");
+		if (length != USB_DT_ENDPOINT_SIZE &&
+		    length != USB_DT_ENDPOINT_AUDIO_SIZE)
+			goto inv_length;
+		__entity(ENDPOINT, ds->bEndpointAddress);
+	}
+		break;
+
+	case HID_DT_HID:
+		pr_vdebug("hid descriptor\n");
+		if (length != sizeof(struct hid_descriptor))
+			goto inv_length;
+		break;
+
+	case USB_DT_OTG:
+		if (length != sizeof(struct usb_otg_descriptor))
+			goto inv_length;
+		break;
+
+	case USB_DT_INTERFACE_ASSOCIATION: {
+		struct usb_interface_assoc_descriptor *ds = (void *)_ds;
+		pr_vdebug("interface association descriptor\n");
+		if (length != sizeof *ds)
+			goto inv_length;
+		if (ds->iFunction)
+			__entity(STRING, ds->iFunction);
+	}
+		break;
+
+	case USB_DT_OTHER_SPEED_CONFIG:
+	case USB_DT_INTERFACE_POWER:
+	case USB_DT_DEBUG:
+	case USB_DT_SECURITY:
+	case USB_DT_CS_RADIO_CONTROL:
+		/* TODO */
+		pr_vdebug("unimplemented descriptor: %d\n", _ds->bDescriptorType);
+		return -EINVAL;
+
+	default:
+		/* We should never be here */
+		pr_vdebug("unknown descriptor: %d\n", _ds->bDescriptorType);
+		return -EINVAL;
+
+inv_length:
+		pr_vdebug("invalid length: %d (descriptor %d)\n",
+			  _ds->bLength, _ds->bDescriptorType);
+		return -EINVAL;
+	}
+
+#undef __entity
+#undef __entity_check_DESCRIPTOR
+#undef __entity_check_INTERFACE
+#undef __entity_check_STRING
+#undef __entity_check_ENDPOINT
+
+	return length;
+}
+
+static int __must_check ffs_do_descs(unsigned count, char *data, unsigned len,
+				     ffs_entity_callback entity, void *priv)
+{
+	const unsigned _len = len;
+	unsigned long num = 0;
+
+	ENTER();
+
+	for (;;) {
+		int ret;
+
+		if (num == count)
+			data = NULL;
+
+		/* Record "descriptor" entity */
+		ret = entity(FFS_DESCRIPTOR, (u8 *)num, (void *)data, priv);
+		if (unlikely(ret < 0)) {
+			pr_debug("entity DESCRIPTOR(%02lx); ret = %d\n",
+				 num, ret);
+			return ret;
+		}
+
+		if (!data)
+			return _len - len;
+
+		ret = ffs_do_desc(data, len, entity, priv);
+		if (unlikely(ret < 0)) {
+			pr_debug("%s returns %d\n", __func__, ret);
+			return ret;
+		}
+
+		len -= ret;
+		data += ret;
+		++num;
+	}
+}
+
+static int __ffs_data_do_entity(enum ffs_entity_type type,
+				u8 *valuep, struct usb_descriptor_header *desc,
+				void *priv)
+{
+	struct ffs_data *ffs = priv;
+
+	ENTER();
+
+	switch (type) {
+	case FFS_DESCRIPTOR:
+		break;
+
+	case FFS_INTERFACE:
+		/*
+		 * Interfaces are indexed from zero so if we
+		 * encountered interface "n" then there are at least
+		 * "n+1" interfaces.
+		 */
+		if (*valuep >= ffs->interfaces_count)
+			ffs->interfaces_count = *valuep + 1;
+		break;
+
+	case FFS_STRING:
+		/*
+		 * Strings are indexed from 1 (0 is magic ;) reserved
+		 * for languages list or some such)
+		 */
+		if (*valuep > ffs->strings_count)
+			ffs->strings_count = *valuep;
+		break;
+
+	case FFS_ENDPOINT:
+		/* Endpoints are indexed from 1 as well. */
+		if ((*valuep & USB_ENDPOINT_NUMBER_MASK) > ffs->eps_count)
+			ffs->eps_count = (*valuep & USB_ENDPOINT_NUMBER_MASK);
+		break;
+	}
+
+	return 0;
+}
+
+static int __ffs_data_got_descs(struct ffs_data *ffs,
+				char *const _data, size_t len)
+{
+	unsigned fs_count, hs_count;
+	int fs_len, ret = -EINVAL;
+	char *data = _data;
+
+	ENTER();
+
+	if (unlikely(get_unaligned_le32(data) != FUNCTIONFS_DESCRIPTORS_MAGIC ||
+		     get_unaligned_le32(data + 4) != len))
+		goto error;
+	fs_count = get_unaligned_le32(data +  8);
+	hs_count = get_unaligned_le32(data + 12);
+
+	if (!fs_count && !hs_count)
+		goto einval;
+
+	data += 16;
+	len  -= 16;
+
+	if (likely(fs_count)) {
+		fs_len = ffs_do_descs(fs_count, data, len,
+				      __ffs_data_do_entity, ffs);
+		if (unlikely(fs_len < 0)) {
+			ret = fs_len;
+			goto error;
+		}
+
+		data += fs_len;
+		len  -= fs_len;
+	} else {
+		fs_len = 0;
+	}
+
+	if (likely(hs_count)) {
+		ret = ffs_do_descs(hs_count, data, len,
+				   __ffs_data_do_entity, ffs);
+		if (unlikely(ret < 0))
+			goto error;
+	} else {
+		ret = 0;
+	}
+
+	if (unlikely(len != ret))
+		goto einval;
+
+	ffs->raw_fs_descs_length = fs_len;
+	ffs->raw_descs_length    = fs_len + ret;
+	ffs->raw_descs           = _data;
+	ffs->fs_descs_count      = fs_count;
+	ffs->hs_descs_count      = hs_count;
+
+	return 0;
+
+einval:
+	ret = -EINVAL;
+error:
+	kfree(_data);
+	return ret;
+}
+
+static int __ffs_data_got_strings(struct ffs_data *ffs,
+				  char *const _data, size_t len)
+{
+	u32 str_count, needed_count, lang_count;
+	struct usb_gadget_strings **stringtabs, *t;
+	struct usb_string *strings, *s;
+	const char *data = _data;
+
+	ENTER();
+
+	if (unlikely(get_unaligned_le32(data) != FUNCTIONFS_STRINGS_MAGIC ||
+		     get_unaligned_le32(data + 4) != len))
+		goto error;
+	str_count  = get_unaligned_le32(data + 8);
+	lang_count = get_unaligned_le32(data + 12);
+
+	/* if one is zero the other must be zero */
+	if (unlikely(!str_count != !lang_count))
+		goto error;
+
+	/* Do we have at least as many strings as descriptors need? */
+	needed_count = ffs->strings_count;
+	if (unlikely(str_count < needed_count))
+		goto error;
+
+	/*
+	 * If we don't need any strings just return and free all
+	 * memory.
+	 */
+	if (!needed_count) {
+		kfree(_data);
+		return 0;
+	}
+
+	/* Allocate everything in one chunk so there's less maintenance. */
+	{
+		struct {
+			struct usb_gadget_strings *stringtabs[lang_count + 1];
+			struct usb_gadget_strings stringtab[lang_count];
+			struct usb_string strings[lang_count*(needed_count+1)];
+		} *d;
+		unsigned i = 0;
+
+		d = kmalloc(sizeof *d, GFP_KERNEL);
+		if (unlikely(!d)) {
+			kfree(_data);
+			return -ENOMEM;
+		}
+
+		stringtabs = d->stringtabs;
+		t = d->stringtab;
+		i = lang_count;
+		do {
+			*stringtabs++ = t++;
+		} while (--i);
+		*stringtabs = NULL;
+
+		stringtabs = d->stringtabs;
+		t = d->stringtab;
+		s = d->strings;
+		strings = s;
+	}
+
+	/* For each language */
+	data += 16;
+	len -= 16;
+
+	do { /* lang_count > 0 so we can use do-while */
+		unsigned needed = needed_count;
+
+		if (unlikely(len < 3))
+			goto error_free;
+		t->language = get_unaligned_le16(data);
+		t->strings  = s;
+		++t;
+
+		data += 2;
+		len -= 2;
+
+		/* For each string */
+		do { /* str_count > 0 so we can use do-while */
+			size_t length = strnlen(data, len);
+
+			if (unlikely(length == len))
+				goto error_free;
+
+			/*
+			 * User may provide more strings then we need,
+			 * if that's the case we simply ignore the
+			 * rest
+			 */
+			if (likely(needed)) {
+				/*
+				 * s->id will be set while adding
+				 * function to configuration so for
+				 * now just leave garbage here.
+				 */
+				s->s = data;
+				--needed;
+				++s;
+			}
+
+			data += length + 1;
+			len -= length + 1;
+		} while (--str_count);
+
+		s->id = 0;   /* terminator */
+		s->s = NULL;
+		++s;
+
+	} while (--lang_count);
+
+	/* Some garbage left? */
+	if (unlikely(len))
+		goto error_free;
+
+	/* Done! */
+	ffs->stringtabs = stringtabs;
+	ffs->raw_strings = _data;
+
+	return 0;
+
+error_free:
+	kfree(stringtabs);
+error:
+	kfree(_data);
+	return -EINVAL;
+}
+
+
+/* Events handling and management *******************************************/
+
+static void __ffs_event_add(struct ffs_data *ffs,
+			    enum usb_functionfs_event_type type)
+{
+	enum usb_functionfs_event_type rem_type1, rem_type2 = type;
+	int neg = 0;
+
+	/*
+	 * Abort any unhandled setup
+	 *
+	 * We do not need to worry about some cmpxchg() changing value
+	 * of ffs->setup_state without holding the lock because when
+	 * state is FFS_SETUP_PENDING cmpxchg() in several places in
+	 * the source does nothing.
+	 */
+	if (ffs->setup_state == FFS_SETUP_PENDING)
+		ffs->setup_state = FFS_SETUP_CANCELED;
+
+	switch (type) {
+	case FUNCTIONFS_RESUME:
+		rem_type2 = FUNCTIONFS_SUSPEND;
+		/* FALL THROUGH */
+	case FUNCTIONFS_SUSPEND:
+	case FUNCTIONFS_SETUP:
+		rem_type1 = type;
+		/* Discard all similar events */
+		break;
+
+	case FUNCTIONFS_BIND:
+	case FUNCTIONFS_UNBIND:
+	case FUNCTIONFS_DISABLE:
+	case FUNCTIONFS_ENABLE:
+		/* Discard everything other then power management. */
+		rem_type1 = FUNCTIONFS_SUSPEND;
+		rem_type2 = FUNCTIONFS_RESUME;
+		neg = 1;
+		break;
+
+	default:
+		BUG();
+	}
+
+	{
+		u8 *ev  = ffs->ev.types, *out = ev;
+		unsigned n = ffs->ev.count;
+		for (; n; --n, ++ev)
+			if ((*ev == rem_type1 || *ev == rem_type2) == neg)
+				*out++ = *ev;
+			else
+				pr_vdebug("purging event %d\n", *ev);
+		ffs->ev.count = out - ffs->ev.types;
+	}
+
+	pr_vdebug("adding event %d\n", type);
+	ffs->ev.types[ffs->ev.count++] = type;
+	wake_up_locked(&ffs->ev.waitq);
+}
+
+static void ffs_event_add(struct ffs_data *ffs,
+			  enum usb_functionfs_event_type type)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&ffs->ev.waitq.lock, flags);
+	__ffs_event_add(ffs, type);
+	spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
+}
+
+
+/* Bind/unbind USB function hooks *******************************************/
+
+static int __ffs_func_bind_do_descs(enum ffs_entity_type type, u8 *valuep,
+				    struct usb_descriptor_header *desc,
+				    void *priv)
+{
+	struct usb_endpoint_descriptor *ds = (void *)desc;
+	struct ffs_function *func = priv;
+	struct ffs_ep *ffs_ep;
+
+	/*
+	 * If hs_descriptors is not NULL then we are reading hs
+	 * descriptors now
+	 */
+	const int isHS = func->function.hs_descriptors != NULL;
+	unsigned idx;
+
+	if (type != FFS_DESCRIPTOR)
+		return 0;
+
+	if (isHS)
+		func->function.hs_descriptors[(long)valuep] = desc;
+	else
+		func->function.descriptors[(long)valuep]    = desc;
+
+	if (!desc || desc->bDescriptorType != USB_DT_ENDPOINT)
+		return 0;
+
+	idx = (ds->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) - 1;
+	ffs_ep = func->eps + idx;
+
+	if (unlikely(ffs_ep->descs[isHS])) {
+		pr_vdebug("two %sspeed descriptors for EP %d\n",
+			  isHS ? "high" : "full",
+			  ds->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
+		return -EINVAL;
+	}
+	ffs_ep->descs[isHS] = ds;
+
+	ffs_dump_mem(": Original  ep desc", ds, ds->bLength);
+	if (ffs_ep->ep) {
+		ds->bEndpointAddress = ffs_ep->descs[0]->bEndpointAddress;
+		if (!ds->wMaxPacketSize)
+			ds->wMaxPacketSize = ffs_ep->descs[0]->wMaxPacketSize;
+	} else {
+		struct usb_request *req;
+		struct usb_ep *ep;
+
+		pr_vdebug("autoconfig\n");
+		ep = usb_ep_autoconfig(func->gadget, ds);
+		if (unlikely(!ep))
+			return -ENOTSUPP;
+		ep->driver_data = func->eps + idx;
+
+		req = usb_ep_alloc_request(ep, GFP_KERNEL);
+		if (unlikely(!req))
+			return -ENOMEM;
+
+		ffs_ep->ep  = ep;
+		ffs_ep->req = req;
+		func->eps_revmap[ds->bEndpointAddress &
+				 USB_ENDPOINT_NUMBER_MASK] = idx + 1;
+	}
+	ffs_dump_mem(": Rewritten ep desc", ds, ds->bLength);
+
+	return 0;
+}
+
+static int __ffs_func_bind_do_nums(enum ffs_entity_type type, u8 *valuep,
+				   struct usb_descriptor_header *desc,
+				   void *priv)
+{
+	struct ffs_function *func = priv;
+	unsigned idx;
+	u8 newValue;
+
+	switch (type) {
+	default:
+	case FFS_DESCRIPTOR:
+		/* Handled in previous pass by __ffs_func_bind_do_descs() */
+		return 0;
+
+	case FFS_INTERFACE:
+		idx = *valuep;
+		if (func->interfaces_nums[idx] < 0) {
+			int id = usb_interface_id(func->conf, &func->function);
+			if (unlikely(id < 0))
+				return id;
+			func->interfaces_nums[idx] = id;
+		}
+		newValue = func->interfaces_nums[idx];
+		break;
+
+	case FFS_STRING:
+		/* String' IDs are allocated when fsf_data is bound to cdev */
+		newValue = func->ffs->stringtabs[0]->strings[*valuep - 1].id;
+		break;
+
+	case FFS_ENDPOINT:
+		/*
+		 * USB_DT_ENDPOINT are handled in
+		 * __ffs_func_bind_do_descs().
+		 */
+		if (desc->bDescriptorType == USB_DT_ENDPOINT)
+			return 0;
+
+		idx = (*valuep & USB_ENDPOINT_NUMBER_MASK) - 1;
+		if (unlikely(!func->eps[idx].ep))
+			return -EINVAL;
+
+		{
+			struct usb_endpoint_descriptor **descs;
+			descs = func->eps[idx].descs;
+			newValue = descs[descs[0] ? 0 : 1]->bEndpointAddress;
+		}
+		break;
+	}
+
+	pr_vdebug("%02x -> %02x\n", *valuep, newValue);
+	*valuep = newValue;
+	return 0;
+}
+
+static int ffs_func_bind(struct usb_configuration *c,
+			 struct usb_function *f)
+{
+	struct ffs_function *func = ffs_func_from_usb(f);
+	struct ffs_data *ffs = func->ffs;
+
+	const int full = !!func->ffs->fs_descs_count;
+	const int high = gadget_is_dualspeed(func->gadget) &&
+		func->ffs->hs_descs_count;
+
+	int ret;
+
+	/* Make it a single chunk, less management later on */
+	struct {
+		struct ffs_ep eps[ffs->eps_count];
+		struct usb_descriptor_header
+			*fs_descs[full ? ffs->fs_descs_count + 1 : 0];
+		struct usb_descriptor_header
+			*hs_descs[high ? ffs->hs_descs_count + 1 : 0];
+		short inums[ffs->interfaces_count];
+		char raw_descs[high ? ffs->raw_descs_length
+				    : ffs->raw_fs_descs_length];
+	} *data;
+
+	ENTER();
+
+	/* Only high speed but not supported by gadget? */
+	if (unlikely(!(full | high)))
+		return -ENOTSUPP;
+
+	/* Allocate */
+	data = kmalloc(sizeof *data, GFP_KERNEL);
+	if (unlikely(!data))
+		return -ENOMEM;
+
+	/* Zero */
+	memset(data->eps, 0, sizeof data->eps);
+	memcpy(data->raw_descs, ffs->raw_descs + 16, sizeof data->raw_descs);
+	memset(data->inums, 0xff, sizeof data->inums);
+	for (ret = ffs->eps_count; ret; --ret)
+		data->eps[ret].num = -1;
+
+	/* Save pointers */
+	func->eps             = data->eps;
+	func->interfaces_nums = data->inums;
+
+	/*
+	 * Go through all the endpoint descriptors and allocate
+	 * endpoints first, so that later we can rewrite the endpoint
+	 * numbers without worrying that it may be described later on.
+	 */
+	if (likely(full)) {
+		func->function.descriptors = data->fs_descs;
+		ret = ffs_do_descs(ffs->fs_descs_count,
+				   data->raw_descs,
+				   sizeof data->raw_descs,
+				   __ffs_func_bind_do_descs, func);
+		if (unlikely(ret < 0))
+			goto error;
+	} else {
+		ret = 0;
+	}
+
+	if (likely(high)) {
+		func->function.hs_descriptors = data->hs_descs;
+		ret = ffs_do_descs(ffs->hs_descs_count,
+				   data->raw_descs + ret,
+				   (sizeof data->raw_descs) - ret,
+				   __ffs_func_bind_do_descs, func);
+	}
+
+	/*
+	 * Now handle interface numbers allocation and interface and
+	 * endpoint numbers rewriting.  We can do that in one go
+	 * now.
+	 */
+	ret = ffs_do_descs(ffs->fs_descs_count +
+			   (high ? ffs->hs_descs_count : 0),
+			   data->raw_descs, sizeof data->raw_descs,
+			   __ffs_func_bind_do_nums, func);
+	if (unlikely(ret < 0))
+		goto error;
+
+	/* And we're done */
+	ffs_event_add(ffs, FUNCTIONFS_BIND);
+	return 0;
+
+error:
+	/* XXX Do we need to release all claimed endpoints here? */
+	return ret;
+}
+
+
+/* Other USB function hooks *************************************************/
+
+static void ffs_func_unbind(struct usb_configuration *c,
+			    struct usb_function *f)
+{
+	struct ffs_function *func = ffs_func_from_usb(f);
+	struct ffs_data *ffs = func->ffs;
+
+	ENTER();
+
+	if (ffs->func == func) {
+		ffs_func_eps_disable(func);
+		ffs->func = NULL;
+	}
+
+	ffs_event_add(ffs, FUNCTIONFS_UNBIND);
+
+	ffs_func_free(func);
+}
+
+static int ffs_func_set_alt(struct usb_function *f,
+			    unsigned interface, unsigned alt)
+{
+	struct ffs_function *func = ffs_func_from_usb(f);
+	struct ffs_data *ffs = func->ffs;
+	int ret = 0, intf;
+
+	if (alt != (unsigned)-1) {
+		intf = ffs_func_revmap_intf(func, interface);
+		if (unlikely(intf < 0))
+			return intf;
+	}
+
+	if (ffs->func)
+		ffs_func_eps_disable(ffs->func);
+
+	if (ffs->state != FFS_ACTIVE)
+		return -ENODEV;
+
+	if (alt == (unsigned)-1) {
+		ffs->func = NULL;
+		ffs_event_add(ffs, FUNCTIONFS_DISABLE);
+		return 0;
+	}
+
+	ffs->func = func;
+	ret = ffs_func_eps_enable(func);
+	if (likely(ret >= 0))
+		ffs_event_add(ffs, FUNCTIONFS_ENABLE);
+	return ret;
+}
+
+static void ffs_func_disable(struct usb_function *f)
+{
+	ffs_func_set_alt(f, 0, (unsigned)-1);
+}
+
+static int ffs_func_setup(struct usb_function *f,
+			  const struct usb_ctrlrequest *creq)
+{
+	struct ffs_function *func = ffs_func_from_usb(f);
+	struct ffs_data *ffs = func->ffs;
+	unsigned long flags;
+	int ret;
+
+	ENTER();
+
+	pr_vdebug("creq->bRequestType = %02x\n", creq->bRequestType);
+	pr_vdebug("creq->bRequest     = %02x\n", creq->bRequest);
+	pr_vdebug("creq->wValue       = %04x\n", le16_to_cpu(creq->wValue));
+	pr_vdebug("creq->wIndex       = %04x\n", le16_to_cpu(creq->wIndex));
+	pr_vdebug("creq->wLength      = %04x\n", le16_to_cpu(creq->wLength));
+
+	/*
+	 * Most requests directed to interface go through here
+	 * (notable exceptions are set/get interface) so we need to
+	 * handle them.  All other either handled by composite or
+	 * passed to usb_configuration->setup() (if one is set).  No
+	 * matter, we will handle requests directed to endpoint here
+	 * as well (as it's straightforward) but what to do with any
+	 * other request?
+	 */
+	if (ffs->state != FFS_ACTIVE)
+		return -ENODEV;
+
+	switch (creq->bRequestType & USB_RECIP_MASK) {
+	case USB_RECIP_INTERFACE:
+		ret = ffs_func_revmap_intf(func, le16_to_cpu(creq->wIndex));
+		if (unlikely(ret < 0))
+			return ret;
+		break;
+
+	case USB_RECIP_ENDPOINT:
+		ret = ffs_func_revmap_ep(func, le16_to_cpu(creq->wIndex));
+		if (unlikely(ret < 0))
+			return ret;
+		break;
+
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	spin_lock_irqsave(&ffs->ev.waitq.lock, flags);
+	ffs->ev.setup = *creq;
+	ffs->ev.setup.wIndex = cpu_to_le16(ret);
+	__ffs_event_add(ffs, FUNCTIONFS_SETUP);
+	spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
+
+	return 0;
+}
+
+static void ffs_func_suspend(struct usb_function *f)
+{
+	ENTER();
+	ffs_event_add(ffs_func_from_usb(f)->ffs, FUNCTIONFS_SUSPEND);
+}
+
+static void ffs_func_resume(struct usb_function *f)
+{
+	ENTER();
+	ffs_event_add(ffs_func_from_usb(f)->ffs, FUNCTIONFS_RESUME);
+}
+
+
+/* Endpoint and interface numbers reverse mapping ***************************/
+
+static int ffs_func_revmap_ep(struct ffs_function *func, u8 num)
+{
+	num = func->eps_revmap[num & USB_ENDPOINT_NUMBER_MASK];
+	return num ? num : -EDOM;
+}
+
+static int ffs_func_revmap_intf(struct ffs_function *func, u8 intf)
+{
+	short *nums = func->interfaces_nums;
+	unsigned count = func->ffs->interfaces_count;
+
+	for (; count; --count, ++nums) {
+		if (*nums >= 0 && *nums == intf)
+			return nums - func->interfaces_nums;
+	}
+
+	return -EDOM;
+}
+
+
+/* Misc helper functions ****************************************************/
+
+static int ffs_mutex_lock(struct mutex *mutex, unsigned nonblock)
+{
+	return nonblock
+		? likely(mutex_trylock(mutex)) ? 0 : -EAGAIN
+		: mutex_lock_interruptible(mutex);
+}
+
+static char *ffs_prepare_buffer(const char * __user buf, size_t len)
+{
+	char *data;
+
+	if (unlikely(!len))
+		return NULL;
+
+	data = kmalloc(len, GFP_KERNEL);
+	if (unlikely(!data))
+		return ERR_PTR(-ENOMEM);
+
+	if (unlikely(__copy_from_user(data, buf, len))) {
+		kfree(data);
+		return ERR_PTR(-EFAULT);
+	}
+
+	pr_vdebug("Buffer from user space:\n");
+	ffs_dump_mem("", data, len);
+
+	return data;
+}
diff --git a/drivers/staging/ccg/f_mass_storage.c b/drivers/staging/ccg/f_mass_storage.c
new file mode 100644
index 0000000..4f1142e
--- /dev/null
+++ b/drivers/staging/ccg/f_mass_storage.c
@@ -0,0 +1,3135 @@
+/*
+ * f_mass_storage.c -- Mass Storage USB Composite Function
+ *
+ * Copyright (C) 2003-2008 Alan Stern
+ * Copyright (C) 2009 Samsung Electronics
+ *                    Author: Michal Nazarewicz <mina86 at mina86.com>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ *    to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * The Mass Storage Function acts as a USB Mass Storage device,
+ * appearing to the host as a disk drive or as a CD-ROM drive.  In
+ * addition to providing an example of a genuinely useful composite
+ * function for a USB device, it also illustrates a technique of
+ * double-buffering for increased throughput.
+ *
+ * For more information about MSF and in particular its module
+ * parameters and sysfs interface read the
+ * <Documentation/usb/mass-storage.txt> file.
+ */
+
+/*
+ * MSF is configured by specifying a fsg_config structure.  It has the
+ * following fields:
+ *
+ *	nluns		Number of LUNs function have (anywhere from 1
+ *				to FSG_MAX_LUNS which is 8).
+ *	luns		An array of LUN configuration values.  This
+ *				should be filled for each LUN that
+ *				function will include (ie. for "nluns"
+ *				LUNs).  Each element of the array has
+ *				the following fields:
+ *	->filename	The path to the backing file for the LUN.
+ *				Required if LUN is not marked as
+ *				removable.
+ *	->ro		Flag specifying access to the LUN shall be
+ *				read-only.  This is implied if CD-ROM
+ *				emulation is enabled as well as when
+ *				it was impossible to open "filename"
+ *				in R/W mode.
+ *	->removable	Flag specifying that LUN shall be indicated as
+ *				being removable.
+ *	->cdrom		Flag specifying that LUN shall be reported as
+ *				being a CD-ROM.
+ *	->nofua		Flag specifying that FUA flag in SCSI WRITE(10,12)
+ *				commands for this LUN shall be ignored.
+ *
+ *	vendor_name
+ *	product_name
+ *	release		Information used as a reply to INQUIRY
+ *				request.  To use default set to NULL,
+ *				NULL, 0xffff respectively.  The first
+ *				field should be 8 and the second 16
+ *				characters or less.
+ *
+ *	can_stall	Set to permit function to halt bulk endpoints.
+ *				Disabled on some USB devices known not
+ *				to work correctly.  You should set it
+ *				to true.
+ *
+ * If "removable" is not set for a LUN then a backing file must be
+ * specified.  If it is set, then NULL filename means the LUN's medium
+ * is not loaded (an empty string as "filename" in the fsg_config
+ * structure causes error).  The CD-ROM emulation includes a single
+ * data track and no audio tracks; hence there need be only one
+ * backing file per LUN.
+ *
+ * This function is heavily based on "File-backed Storage Gadget" by
+ * Alan Stern which in turn is heavily based on "Gadget Zero" by David
+ * Brownell.  The driver's SCSI command interface was based on the
+ * "Information technology - Small Computer System Interface - 2"
+ * document from X3T9.2 Project 375D, Revision 10L, 7-SEP-93,
+ * available at <http://www.t10.org/ftp/t10/drafts/s2/s2-r10l.pdf>.
+ * The single exception is opcode 0x23 (READ FORMAT CAPACITIES), which
+ * was based on the "Universal Serial Bus Mass Storage Class UFI
+ * Command Specification" document, Revision 1.0, December 14, 1998,
+ * available at
+ * <http://www.usb.org/developers/devclass_docs/usbmass-ufi10.pdf>.
+ */
+
+/*
+ *				Driver Design
+ *
+ * The MSF is fairly straightforward.  There is a main kernel
+ * thread that handles most of the work.  Interrupt routines field
+ * callbacks from the controller driver: bulk- and interrupt-request
+ * completion notifications, endpoint-0 events, and disconnect events.
+ * Completion events are passed to the main thread by wakeup calls.  Many
+ * ep0 requests are handled at interrupt time, but SetInterface,
+ * SetConfiguration, and device reset requests are forwarded to the
+ * thread in the form of "exceptions" using SIGUSR1 signals (since they
+ * should interrupt any ongoing file I/O operations).
+ *
+ * The thread's main routine implements the standard command/data/status
+ * parts of a SCSI interaction.  It and its subroutines are full of tests
+ * for pending signals/exceptions -- all this polling is necessary since
+ * the kernel has no setjmp/longjmp equivalents.  (Maybe this is an
+ * indication that the driver really wants to be running in userspace.)
+ * An important point is that so long as the thread is alive it keeps an
+ * open reference to the backing file.  This will prevent unmounting
+ * the backing file's underlying filesystem and could cause problems
+ * during system shutdown, for example.  To prevent such problems, the
+ * thread catches INT, TERM, and KILL signals and converts them into
+ * an EXIT exception.
+ *
+ * In normal operation the main thread is started during the gadget's
+ * fsg_bind() callback and stopped during fsg_unbind().  But it can
+ * also exit when it receives a signal, and there's no point leaving
+ * the gadget running when the thread is dead.  As of this moment, MSF
+ * provides no way to deregister the gadget when thread dies -- maybe
+ * a callback functions is needed.
+ *
+ * To provide maximum throughput, the driver uses a circular pipeline of
+ * buffer heads (struct fsg_buffhd).  In principle the pipeline can be
+ * arbitrarily long; in practice the benefits don't justify having more
+ * than 2 stages (i.e., double buffering).  But it helps to think of the
+ * pipeline as being a long one.  Each buffer head contains a bulk-in and
+ * a bulk-out request pointer (since the buffer can be used for both
+ * output and input -- directions always are given from the host's
+ * point of view) as well as a pointer to the buffer and various state
+ * variables.
+ *
+ * Use of the pipeline follows a simple protocol.  There is a variable
+ * (fsg->next_buffhd_to_fill) that points to the next buffer head to use.
+ * At any time that buffer head may still be in use from an earlier
+ * request, so each buffer head has a state variable indicating whether
+ * it is EMPTY, FULL, or BUSY.  Typical use involves waiting for the
+ * buffer head to be EMPTY, filling the buffer either by file I/O or by
+ * USB I/O (during which the buffer head is BUSY), and marking the buffer
+ * head FULL when the I/O is complete.  Then the buffer will be emptied
+ * (again possibly by USB I/O, during which it is marked BUSY) and
+ * finally marked EMPTY again (possibly by a completion routine).
+ *
+ * A module parameter tells the driver to avoid stalling the bulk
+ * endpoints wherever the transport specification allows.  This is
+ * necessary for some UDCs like the SuperH, which cannot reliably clear a
+ * halt on a bulk endpoint.  However, under certain circumstances the
+ * Bulk-only specification requires a stall.  In such cases the driver
+ * will halt the endpoint and set a flag indicating that it should clear
+ * the halt in software during the next device reset.  Hopefully this
+ * will permit everything to work correctly.  Furthermore, although the
+ * specification allows the bulk-out endpoint to halt when the host sends
+ * too much data, implementing this would cause an unavoidable race.
+ * The driver will always use the "no-stall" approach for OUT transfers.
+ *
+ * One subtle point concerns sending status-stage responses for ep0
+ * requests.  Some of these requests, such as device reset, can involve
+ * interrupting an ongoing file I/O operation, which might take an
+ * arbitrarily long time.  During that delay the host might give up on
+ * the original ep0 request and issue a new one.  When that happens the
+ * driver should not notify the host about completion of the original
+ * request, as the host will no longer be waiting for it.  So the driver
+ * assigns to each ep0 request a unique tag, and it keeps track of the
+ * tag value of the request associated with a long-running exception
+ * (device-reset, interface-change, or configuration-change).  When the
+ * exception handler is finished, the status-stage response is submitted
+ * only if the current ep0 request tag is equal to the exception request
+ * tag.  Thus only the most recently received ep0 request will get a
+ * status-stage response.
+ *
+ * Warning: This driver source file is too long.  It ought to be split up
+ * into a header file plus about 3 separate .c files, to handle the details
+ * of the Gadget, USB Mass Storage, and SCSI protocols.
+ */
+
+
+/* #define VERBOSE_DEBUG */
+/* #define DUMP_MSGS */
+
+#include <linux/blkdev.h>
+#include <linux/completion.h>
+#include <linux/dcache.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/fcntl.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/kref.h>
+#include <linux/kthread.h>
+#include <linux/limits.h>
+#include <linux/rwsem.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/freezer.h>
+#include <linux/utsname.h>
+
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/composite.h>
+
+#include "gadget_chips.h"
+
+
+/*------------------------------------------------------------------------*/
+
+#define FSG_DRIVER_DESC		"Mass Storage Function"
+#define FSG_DRIVER_VERSION	"2009/09/11"
+
+static const char fsg_string_interface[] = "Mass Storage";
+
+#define FSG_NO_DEVICE_STRINGS    1
+#define FSG_NO_OTG               1
+#define FSG_NO_INTR_EP           1
+
+#include "storage_common.c"
+
+
+/*-------------------------------------------------------------------------*/
+
+struct fsg_dev;
+struct fsg_common;
+
+/* FSF callback functions */
+struct fsg_operations {
+	/*
+	 * Callback function to call when thread exits.  If no
+	 * callback is set or it returns value lower then zero MSF
+	 * will force eject all LUNs it operates on (including those
+	 * marked as non-removable or with prevent_medium_removal flag
+	 * set).
+	 */
+	int (*thread_exits)(struct fsg_common *common);
+
+	/*
+	 * Called prior to ejection.  Negative return means error,
+	 * zero means to continue with ejection, positive means not to
+	 * eject.
+	 */
+	int (*pre_eject)(struct fsg_common *common,
+			 struct fsg_lun *lun, int num);
+	/*
+	 * Called after ejection.  Negative return means error, zero
+	 * or positive is just a success.
+	 */
+	int (*post_eject)(struct fsg_common *common,
+			  struct fsg_lun *lun, int num);
+};
+
+/* Data shared by all the FSG instances. */
+struct fsg_common {
+	struct usb_gadget	*gadget;
+	struct usb_composite_dev *cdev;
+	struct fsg_dev		*fsg, *new_fsg;
+	wait_queue_head_t	fsg_wait;
+
+	/* filesem protects: backing files in use */
+	struct rw_semaphore	filesem;
+
+	/* lock protects: state, all the req_busy's */
+	spinlock_t		lock;
+
+	struct usb_ep		*ep0;		/* Copy of gadget->ep0 */
+	struct usb_request	*ep0req;	/* Copy of cdev->req */
+	unsigned int		ep0_req_tag;
+
+	struct fsg_buffhd	*next_buffhd_to_fill;
+	struct fsg_buffhd	*next_buffhd_to_drain;
+	struct fsg_buffhd	*buffhds;
+
+	int			cmnd_size;
+	u8			cmnd[MAX_COMMAND_SIZE];
+
+	unsigned int		nluns;
+	unsigned int		lun;
+	struct fsg_lun		*luns;
+	struct fsg_lun		*curlun;
+
+	unsigned int		bulk_out_maxpacket;
+	enum fsg_state		state;		/* For exception handling */
+	unsigned int		exception_req_tag;
+
+	enum data_direction	data_dir;
+	u32			data_size;
+	u32			data_size_from_cmnd;
+	u32			tag;
+	u32			residue;
+	u32			usb_amount_left;
+
+	unsigned int		can_stall:1;
+	unsigned int		free_storage_on_release:1;
+	unsigned int		phase_error:1;
+	unsigned int		short_packet_received:1;
+	unsigned int		bad_lun_okay:1;
+	unsigned int		running:1;
+
+	int			thread_wakeup_needed;
+	struct completion	thread_notifier;
+	struct task_struct	*thread_task;
+
+	/* Callback functions. */
+	const struct fsg_operations	*ops;
+	/* Gadget's private data. */
+	void			*private_data;
+
+	/*
+	 * Vendor (8 chars), product (16 chars), release (4
+	 * hexadecimal digits) and NUL byte
+	 */
+	char inquiry_string[8 + 16 + 4 + 1];
+
+	struct kref		ref;
+};
+
+struct fsg_config {
+	unsigned nluns;
+	struct fsg_lun_config {
+		const char *filename;
+		char ro;
+		char removable;
+		char cdrom;
+		char nofua;
+	} luns[FSG_MAX_LUNS];
+
+	/* Callback functions. */
+	const struct fsg_operations	*ops;
+	/* Gadget's private data. */
+	void			*private_data;
+
+	const char *vendor_name;		/*  8 characters or less */
+	const char *product_name;		/* 16 characters or less */
+	u16 release;
+
+	char			can_stall;
+};
+
+struct fsg_dev {
+	struct usb_function	function;
+	struct usb_gadget	*gadget;	/* Copy of cdev->gadget */
+	struct fsg_common	*common;
+
+	u16			interface_number;
+
+	unsigned int		bulk_in_enabled:1;
+	unsigned int		bulk_out_enabled:1;
+
+	unsigned long		atomic_bitflags;
+#define IGNORE_BULK_OUT		0
+
+	struct usb_ep		*bulk_in;
+	struct usb_ep		*bulk_out;
+};
+
+static inline int __fsg_is_set(struct fsg_common *common,
+			       const char *func, unsigned line)
+{
+	if (common->fsg)
+		return 1;
+	ERROR(common, "common->fsg is NULL in %s at %u\n", func, line);
+	WARN_ON(1);
+	return 0;
+}
+
+#define fsg_is_set(common) likely(__fsg_is_set(common, __func__, __LINE__))
+
+static inline struct fsg_dev *fsg_from_func(struct usb_function *f)
+{
+	return container_of(f, struct fsg_dev, function);
+}
+
+typedef void (*fsg_routine_t)(struct fsg_dev *);
+
+static int exception_in_progress(struct fsg_common *common)
+{
+	return common->state > FSG_STATE_IDLE;
+}
+
+/* Make bulk-out requests be divisible by the maxpacket size */
+static void set_bulk_out_req_length(struct fsg_common *common,
+				    struct fsg_buffhd *bh, unsigned int length)
+{
+	unsigned int	rem;
+
+	bh->bulk_out_intended_length = length;
+	rem = length % common->bulk_out_maxpacket;
+	if (rem > 0)
+		length += common->bulk_out_maxpacket - rem;
+	bh->outreq->length = length;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep)
+{
+	const char	*name;
+
+	if (ep == fsg->bulk_in)
+		name = "bulk-in";
+	else if (ep == fsg->bulk_out)
+		name = "bulk-out";
+	else
+		name = ep->name;
+	DBG(fsg, "%s set halt\n", name);
+	return usb_ep_set_halt(ep);
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* These routines may be called in process context or in_irq */
+
+/* Caller must hold fsg->lock */
+static void wakeup_thread(struct fsg_common *common)
+{
+	/* Tell the main thread that something has happened */
+	common->thread_wakeup_needed = 1;
+	if (common->thread_task)
+		wake_up_process(common->thread_task);
+}
+
+static void raise_exception(struct fsg_common *common, enum fsg_state new_state)
+{
+	unsigned long		flags;
+
+	/*
+	 * Do nothing if a higher-priority exception is already in progress.
+	 * If a lower-or-equal priority exception is in progress, preempt it
+	 * and notify the main thread by sending it a signal.
+	 */
+	spin_lock_irqsave(&common->lock, flags);
+	if (common->state <= new_state) {
+		common->exception_req_tag = common->ep0_req_tag;
+		common->state = new_state;
+		if (common->thread_task)
+			send_sig_info(SIGUSR1, SEND_SIG_FORCED,
+				      common->thread_task);
+	}
+	spin_unlock_irqrestore(&common->lock, flags);
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int ep0_queue(struct fsg_common *common)
+{
+	int	rc;
+
+	rc = usb_ep_queue(common->ep0, common->ep0req, GFP_ATOMIC);
+	common->ep0->driver_data = common;
+	if (rc != 0 && rc != -ESHUTDOWN) {
+		/* We can't do much more than wait for a reset */
+		WARNING(common, "error in submission: %s --> %d\n",
+			common->ep0->name, rc);
+	}
+	return rc;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* Completion handlers. These always run in_irq. */
+
+static void bulk_in_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct fsg_common	*common = ep->driver_data;
+	struct fsg_buffhd	*bh = req->context;
+
+	if (req->status || req->actual != req->length)
+		DBG(common, "%s --> %d, %u/%u\n", __func__,
+		    req->status, req->actual, req->length);
+	if (req->status == -ECONNRESET)		/* Request was cancelled */
+		usb_ep_fifo_flush(ep);
+
+	/* Hold the lock while we update the request and buffer states */
+	smp_wmb();
+	spin_lock(&common->lock);
+	bh->inreq_busy = 0;
+	bh->state = BUF_STATE_EMPTY;
+	wakeup_thread(common);
+	spin_unlock(&common->lock);
+}
+
+static void bulk_out_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct fsg_common	*common = ep->driver_data;
+	struct fsg_buffhd	*bh = req->context;
+
+	dump_msg(common, "bulk-out", req->buf, req->actual);
+	if (req->status || req->actual != bh->bulk_out_intended_length)
+		DBG(common, "%s --> %d, %u/%u\n", __func__,
+		    req->status, req->actual, bh->bulk_out_intended_length);
+	if (req->status == -ECONNRESET)		/* Request was cancelled */
+		usb_ep_fifo_flush(ep);
+
+	/* Hold the lock while we update the request and buffer states */
+	smp_wmb();
+	spin_lock(&common->lock);
+	bh->outreq_busy = 0;
+	bh->state = BUF_STATE_FULL;
+	wakeup_thread(common);
+	spin_unlock(&common->lock);
+}
+
+static int fsg_setup(struct usb_function *f,
+		     const struct usb_ctrlrequest *ctrl)
+{
+	struct fsg_dev		*fsg = fsg_from_func(f);
+	struct usb_request	*req = fsg->common->ep0req;
+	u16			w_index = le16_to_cpu(ctrl->wIndex);
+	u16			w_value = le16_to_cpu(ctrl->wValue);
+	u16			w_length = le16_to_cpu(ctrl->wLength);
+
+	if (!fsg_is_set(fsg->common))
+		return -EOPNOTSUPP;
+
+	++fsg->common->ep0_req_tag;	/* Record arrival of a new request */
+	req->context = NULL;
+	req->length = 0;
+	dump_msg(fsg, "ep0-setup", (u8 *) ctrl, sizeof(*ctrl));
+
+	switch (ctrl->bRequest) {
+
+	case US_BULK_RESET_REQUEST:
+		if (ctrl->bRequestType !=
+		    (USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE))
+			break;
+		if (w_index != fsg->interface_number || w_value != 0 ||
+				w_length != 0)
+			return -EDOM;
+
+		/*
+		 * Raise an exception to stop the current operation
+		 * and reinitialize our state.
+		 */
+		DBG(fsg, "bulk reset request\n");
+		raise_exception(fsg->common, FSG_STATE_RESET);
+		return DELAYED_STATUS;
+
+	case US_BULK_GET_MAX_LUN:
+		if (ctrl->bRequestType !=
+		    (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE))
+			break;
+		if (w_index != fsg->interface_number || w_value != 0 ||
+				w_length != 1)
+			return -EDOM;
+		VDBG(fsg, "get max LUN\n");
+		*(u8 *)req->buf = fsg->common->nluns - 1;
+
+		/* Respond with data/status */
+		req->length = min((u16)1, w_length);
+		return ep0_queue(fsg->common);
+	}
+
+	VDBG(fsg,
+	     "unknown class-specific control req %02x.%02x v%04x i%04x l%u\n",
+	     ctrl->bRequestType, ctrl->bRequest,
+	     le16_to_cpu(ctrl->wValue), w_index, w_length);
+	return -EOPNOTSUPP;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/* All the following routines run in process context */
+
+/* Use this for bulk or interrupt transfers, not ep0 */
+static void start_transfer(struct fsg_dev *fsg, struct usb_ep *ep,
+			   struct usb_request *req, int *pbusy,
+			   enum fsg_buffer_state *state)
+{
+	int	rc;
+
+	if (ep == fsg->bulk_in)
+		dump_msg(fsg, "bulk-in", req->buf, req->length);
+
+	spin_lock_irq(&fsg->common->lock);
+	*pbusy = 1;
+	*state = BUF_STATE_BUSY;
+	spin_unlock_irq(&fsg->common->lock);
+	rc = usb_ep_queue(ep, req, GFP_KERNEL);
+	if (rc != 0) {
+		*pbusy = 0;
+		*state = BUF_STATE_EMPTY;
+
+		/* We can't do much more than wait for a reset */
+
+		/*
+		 * Note: currently the net2280 driver fails zero-length
+		 * submissions if DMA is enabled.
+		 */
+		if (rc != -ESHUTDOWN &&
+		    !(rc == -EOPNOTSUPP && req->length == 0))
+			WARNING(fsg, "error in submission: %s --> %d\n",
+				ep->name, rc);
+	}
+}
+
+static bool start_in_transfer(struct fsg_common *common, struct fsg_buffhd *bh)
+{
+	if (!fsg_is_set(common))
+		return false;
+	start_transfer(common->fsg, common->fsg->bulk_in,
+		       bh->inreq, &bh->inreq_busy, &bh->state);
+	return true;
+}
+
+static bool start_out_transfer(struct fsg_common *common, struct fsg_buffhd *bh)
+{
+	if (!fsg_is_set(common))
+		return false;
+	start_transfer(common->fsg, common->fsg->bulk_out,
+		       bh->outreq, &bh->outreq_busy, &bh->state);
+	return true;
+}
+
+static int sleep_thread(struct fsg_common *common)
+{
+	int	rc = 0;
+
+	/* Wait until a signal arrives or we are woken up */
+	for (;;) {
+		try_to_freeze();
+		set_current_state(TASK_INTERRUPTIBLE);
+		if (signal_pending(current)) {
+			rc = -EINTR;
+			break;
+		}
+		if (common->thread_wakeup_needed)
+			break;
+		schedule();
+	}
+	__set_current_state(TASK_RUNNING);
+	common->thread_wakeup_needed = 0;
+	return rc;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int do_read(struct fsg_common *common)
+{
+	struct fsg_lun		*curlun = common->curlun;
+	u32			lba;
+	struct fsg_buffhd	*bh;
+	int			rc;
+	u32			amount_left;
+	loff_t			file_offset, file_offset_tmp;
+	unsigned int		amount;
+	ssize_t			nread;
+
+	/*
+	 * Get the starting Logical Block Address and check that it's
+	 * not too big.
+	 */
+	if (common->cmnd[0] == READ_6)
+		lba = get_unaligned_be24(&common->cmnd[1]);
+	else {
+		lba = get_unaligned_be32(&common->cmnd[2]);
+
+		/*
+		 * We allow DPO (Disable Page Out = don't save data in the
+		 * cache) and FUA (Force Unit Access = don't read from the
+		 * cache), but we don't implement them.
+		 */
+		if ((common->cmnd[1] & ~0x18) != 0) {
+			curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+			return -EINVAL;
+		}
+	}
+	if (lba >= curlun->num_sectors) {
+		curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
+		return -EINVAL;
+	}
+	file_offset = ((loff_t) lba) << curlun->blkbits;
+
+	/* Carry out the file reads */
+	amount_left = common->data_size_from_cmnd;
+	if (unlikely(amount_left == 0))
+		return -EIO;		/* No default reply */
+
+	for (;;) {
+		/*
+		 * Figure out how much we need to read:
+		 * Try to read the remaining amount.
+		 * But don't read more than the buffer size.
+		 * And don't try to read past the end of the file.
+		 */
+		amount = min(amount_left, FSG_BUFLEN);
+		amount = min((loff_t)amount,
+			     curlun->file_length - file_offset);
+
+		/* Wait for the next buffer to become available */
+		bh = common->next_buffhd_to_fill;
+		while (bh->state != BUF_STATE_EMPTY) {
+			rc = sleep_thread(common);
+			if (rc)
+				return rc;
+		}
+
+		/*
+		 * If we were asked to read past the end of file,
+		 * end with an empty buffer.
+		 */
+		if (amount == 0) {
+			curlun->sense_data =
+					SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
+			curlun->sense_data_info =
+					file_offset >> curlun->blkbits;
+			curlun->info_valid = 1;
+			bh->inreq->length = 0;
+			bh->state = BUF_STATE_FULL;
+			break;
+		}
+
+		/* Perform the read */
+		file_offset_tmp = file_offset;
+		nread = vfs_read(curlun->filp,
+				 (char __user *)bh->buf,
+				 amount, &file_offset_tmp);
+		VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
+		      (unsigned long long)file_offset, (int)nread);
+		if (signal_pending(current))
+			return -EINTR;
+
+		if (nread < 0) {
+			LDBG(curlun, "error in file read: %d\n", (int)nread);
+			nread = 0;
+		} else if (nread < amount) {
+			LDBG(curlun, "partial file read: %d/%u\n",
+			     (int)nread, amount);
+			nread = round_down(nread, curlun->blksize);
+		}
+		file_offset  += nread;
+		amount_left  -= nread;
+		common->residue -= nread;
+
+		/*
+		 * Except at the end of the transfer, nread will be
+		 * equal to the buffer size, which is divisible by the
+		 * bulk-in maxpacket size.
+		 */
+		bh->inreq->length = nread;
+		bh->state = BUF_STATE_FULL;
+
+		/* If an error occurred, report it and its position */
+		if (nread < amount) {
+			curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
+			curlun->sense_data_info =
+					file_offset >> curlun->blkbits;
+			curlun->info_valid = 1;
+			break;
+		}
+
+		if (amount_left == 0)
+			break;		/* No more left to read */
+
+		/* Send this buffer and go read some more */
+		bh->inreq->zero = 0;
+		if (!start_in_transfer(common, bh))
+			/* Don't know what to do if common->fsg is NULL */
+			return -EIO;
+		common->next_buffhd_to_fill = bh->next;
+	}
+
+	return -EIO;		/* No default reply */
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int do_write(struct fsg_common *common)
+{
+	struct fsg_lun		*curlun = common->curlun;
+	u32			lba;
+	struct fsg_buffhd	*bh;
+	int			get_some_more;
+	u32			amount_left_to_req, amount_left_to_write;
+	loff_t			usb_offset, file_offset, file_offset_tmp;
+	unsigned int		amount;
+	ssize_t			nwritten;
+	int			rc;
+
+	if (curlun->ro) {
+		curlun->sense_data = SS_WRITE_PROTECTED;
+		return -EINVAL;
+	}
+	spin_lock(&curlun->filp->f_lock);
+	curlun->filp->f_flags &= ~O_SYNC;	/* Default is not to wait */
+	spin_unlock(&curlun->filp->f_lock);
+
+	/*
+	 * Get the starting Logical Block Address and check that it's
+	 * not too big
+	 */
+	if (common->cmnd[0] == WRITE_6)
+		lba = get_unaligned_be24(&common->cmnd[1]);
+	else {
+		lba = get_unaligned_be32(&common->cmnd[2]);
+
+		/*
+		 * We allow DPO (Disable Page Out = don't save data in the
+		 * cache) and FUA (Force Unit Access = write directly to the
+		 * medium).  We don't implement DPO; we implement FUA by
+		 * performing synchronous output.
+		 */
+		if (common->cmnd[1] & ~0x18) {
+			curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+			return -EINVAL;
+		}
+		if (!curlun->nofua && (common->cmnd[1] & 0x08)) { /* FUA */
+			spin_lock(&curlun->filp->f_lock);
+			curlun->filp->f_flags |= O_SYNC;
+			spin_unlock(&curlun->filp->f_lock);
+		}
+	}
+	if (lba >= curlun->num_sectors) {
+		curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
+		return -EINVAL;
+	}
+
+	/* Carry out the file writes */
+	get_some_more = 1;
+	file_offset = usb_offset = ((loff_t) lba) << curlun->blkbits;
+	amount_left_to_req = common->data_size_from_cmnd;
+	amount_left_to_write = common->data_size_from_cmnd;
+
+	while (amount_left_to_write > 0) {
+
+		/* Queue a request for more data from the host */
+		bh = common->next_buffhd_to_fill;
+		if (bh->state == BUF_STATE_EMPTY && get_some_more) {
+
+			/*
+			 * Figure out how much we want to get:
+			 * Try to get the remaining amount,
+			 * but not more than the buffer size.
+			 */
+			amount = min(amount_left_to_req, FSG_BUFLEN);
+
+			/* Beyond the end of the backing file? */
+			if (usb_offset >= curlun->file_length) {
+				get_some_more = 0;
+				curlun->sense_data =
+					SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
+				curlun->sense_data_info =
+					usb_offset >> curlun->blkbits;
+				curlun->info_valid = 1;
+				continue;
+			}
+
+			/* Get the next buffer */
+			usb_offset += amount;
+			common->usb_amount_left -= amount;
+			amount_left_to_req -= amount;
+			if (amount_left_to_req == 0)
+				get_some_more = 0;
+
+			/*
+			 * Except at the end of the transfer, amount will be
+			 * equal to the buffer size, which is divisible by
+			 * the bulk-out maxpacket size.
+			 */
+			set_bulk_out_req_length(common, bh, amount);
+			if (!start_out_transfer(common, bh))
+				/* Dunno what to do if common->fsg is NULL */
+				return -EIO;
+			common->next_buffhd_to_fill = bh->next;
+			continue;
+		}
+
+		/* Write the received data to the backing file */
+		bh = common->next_buffhd_to_drain;
+		if (bh->state == BUF_STATE_EMPTY && !get_some_more)
+			break;			/* We stopped early */
+		if (bh->state == BUF_STATE_FULL) {
+			smp_rmb();
+			common->next_buffhd_to_drain = bh->next;
+			bh->state = BUF_STATE_EMPTY;
+
+			/* Did something go wrong with the transfer? */
+			if (bh->outreq->status != 0) {
+				curlun->sense_data = SS_COMMUNICATION_FAILURE;
+				curlun->sense_data_info =
+					file_offset >> curlun->blkbits;
+				curlun->info_valid = 1;
+				break;
+			}
+
+			amount = bh->outreq->actual;
+			if (curlun->file_length - file_offset < amount) {
+				LERROR(curlun,
+				       "write %u @ %llu beyond end %llu\n",
+				       amount, (unsigned long long)file_offset,
+				       (unsigned long long)curlun->file_length);
+				amount = curlun->file_length - file_offset;
+			}
+
+			/* Don't accept excess data.  The spec doesn't say
+			 * what to do in this case.  We'll ignore the error.
+			 */
+			amount = min(amount, bh->bulk_out_intended_length);
+
+			/* Don't write a partial block */
+			amount = round_down(amount, curlun->blksize);
+			if (amount == 0)
+				goto empty_write;
+
+			/* Perform the write */
+			file_offset_tmp = file_offset;
+			nwritten = vfs_write(curlun->filp,
+					     (char __user *)bh->buf,
+					     amount, &file_offset_tmp);
+			VLDBG(curlun, "file write %u @ %llu -> %d\n", amount,
+			      (unsigned long long)file_offset, (int)nwritten);
+			if (signal_pending(current))
+				return -EINTR;		/* Interrupted! */
+
+			if (nwritten < 0) {
+				LDBG(curlun, "error in file write: %d\n",
+				     (int)nwritten);
+				nwritten = 0;
+			} else if (nwritten < amount) {
+				LDBG(curlun, "partial file write: %d/%u\n",
+				     (int)nwritten, amount);
+				nwritten = round_down(nwritten, curlun->blksize);
+			}
+			file_offset += nwritten;
+			amount_left_to_write -= nwritten;
+			common->residue -= nwritten;
+
+			/* If an error occurred, report it and its position */
+			if (nwritten < amount) {
+				curlun->sense_data = SS_WRITE_ERROR;
+				curlun->sense_data_info =
+					file_offset >> curlun->blkbits;
+				curlun->info_valid = 1;
+				break;
+			}
+
+ empty_write:
+			/* Did the host decide to stop early? */
+			if (bh->outreq->actual < bh->bulk_out_intended_length) {
+				common->short_packet_received = 1;
+				break;
+			}
+			continue;
+		}
+
+		/* Wait for something to happen */
+		rc = sleep_thread(common);
+		if (rc)
+			return rc;
+	}
+
+	return -EIO;		/* No default reply */
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int do_synchronize_cache(struct fsg_common *common)
+{
+	struct fsg_lun	*curlun = common->curlun;
+	int		rc;
+
+	/* We ignore the requested LBA and write out all file's
+	 * dirty data buffers. */
+	rc = fsg_lun_fsync_sub(curlun);
+	if (rc)
+		curlun->sense_data = SS_WRITE_ERROR;
+	return 0;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static void invalidate_sub(struct fsg_lun *curlun)
+{
+	struct file	*filp = curlun->filp;
+	struct inode	*inode = filp->f_path.dentry->d_inode;
+	unsigned long	rc;
+
+	rc = invalidate_mapping_pages(inode->i_mapping, 0, -1);
+	VLDBG(curlun, "invalidate_mapping_pages -> %ld\n", rc);
+}
+
+static int do_verify(struct fsg_common *common)
+{
+	struct fsg_lun		*curlun = common->curlun;
+	u32			lba;
+	u32			verification_length;
+	struct fsg_buffhd	*bh = common->next_buffhd_to_fill;
+	loff_t			file_offset, file_offset_tmp;
+	u32			amount_left;
+	unsigned int		amount;
+	ssize_t			nread;
+
+	/*
+	 * Get the starting Logical Block Address and check that it's
+	 * not too big.
+	 */
+	lba = get_unaligned_be32(&common->cmnd[2]);
+	if (lba >= curlun->num_sectors) {
+		curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
+		return -EINVAL;
+	}
+
+	/*
+	 * We allow DPO (Disable Page Out = don't save data in the
+	 * cache) but we don't implement it.
+	 */
+	if (common->cmnd[1] & ~0x10) {
+		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+		return -EINVAL;
+	}
+
+	verification_length = get_unaligned_be16(&common->cmnd[7]);
+	if (unlikely(verification_length == 0))
+		return -EIO;		/* No default reply */
+
+	/* Prepare to carry out the file verify */
+	amount_left = verification_length << curlun->blkbits;
+	file_offset = ((loff_t) lba) << curlun->blkbits;
+
+	/* Write out all the dirty buffers before invalidating them */
+	fsg_lun_fsync_sub(curlun);
+	if (signal_pending(current))
+		return -EINTR;
+
+	invalidate_sub(curlun);
+	if (signal_pending(current))
+		return -EINTR;
+
+	/* Just try to read the requested blocks */
+	while (amount_left > 0) {
+		/*
+		 * Figure out how much we need to read:
+		 * Try to read the remaining amount, but not more than
+		 * the buffer size.
+		 * And don't try to read past the end of the file.
+		 */
+		amount = min(amount_left, FSG_BUFLEN);
+		amount = min((loff_t)amount,
+			     curlun->file_length - file_offset);
+		if (amount == 0) {
+			curlun->sense_data =
+					SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
+			curlun->sense_data_info =
+				file_offset >> curlun->blkbits;
+			curlun->info_valid = 1;
+			break;
+		}
+
+		/* Perform the read */
+		file_offset_tmp = file_offset;
+		nread = vfs_read(curlun->filp,
+				(char __user *) bh->buf,
+				amount, &file_offset_tmp);
+		VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
+				(unsigned long long) file_offset,
+				(int) nread);
+		if (signal_pending(current))
+			return -EINTR;
+
+		if (nread < 0) {
+			LDBG(curlun, "error in file verify: %d\n", (int)nread);
+			nread = 0;
+		} else if (nread < amount) {
+			LDBG(curlun, "partial file verify: %d/%u\n",
+			     (int)nread, amount);
+			nread = round_down(nread, curlun->blksize);
+		}
+		if (nread == 0) {
+			curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
+			curlun->sense_data_info =
+				file_offset >> curlun->blkbits;
+			curlun->info_valid = 1;
+			break;
+		}
+		file_offset += nread;
+		amount_left -= nread;
+	}
+	return 0;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int do_inquiry(struct fsg_common *common, struct fsg_buffhd *bh)
+{
+	struct fsg_lun *curlun = common->curlun;
+	u8	*buf = (u8 *) bh->buf;
+
+	if (!curlun) {		/* Unsupported LUNs are okay */
+		common->bad_lun_okay = 1;
+		memset(buf, 0, 36);
+		buf[0] = 0x7f;		/* Unsupported, no device-type */
+		buf[4] = 31;		/* Additional length */
+		return 36;
+	}
+
+	buf[0] = curlun->cdrom ? TYPE_ROM : TYPE_DISK;
+	buf[1] = curlun->removable ? 0x80 : 0;
+	buf[2] = 2;		/* ANSI SCSI level 2 */
+	buf[3] = 2;		/* SCSI-2 INQUIRY data format */
+	buf[4] = 31;		/* Additional length */
+	buf[5] = 0;		/* No special options */
+	buf[6] = 0;
+	buf[7] = 0;
+	memcpy(buf + 8, common->inquiry_string, sizeof common->inquiry_string);
+	return 36;
+}
+
+static int do_request_sense(struct fsg_common *common, struct fsg_buffhd *bh)
+{
+	struct fsg_lun	*curlun = common->curlun;
+	u8		*buf = (u8 *) bh->buf;
+	u32		sd, sdinfo;
+	int		valid;
+
+	/*
+	 * From the SCSI-2 spec., section 7.9 (Unit attention condition):
+	 *
+	 * If a REQUEST SENSE command is received from an initiator
+	 * with a pending unit attention condition (before the target
+	 * generates the contingent allegiance condition), then the
+	 * target shall either:
+	 *   a) report any pending sense data and preserve the unit
+	 *	attention condition on the logical unit, or,
+	 *   b) report the unit attention condition, may discard any
+	 *	pending sense data, and clear the unit attention
+	 *	condition on the logical unit for that initiator.
+	 *
+	 * FSG normally uses option a); enable this code to use option b).
+	 */
+#if 0
+	if (curlun && curlun->unit_attention_data != SS_NO_SENSE) {
+		curlun->sense_data = curlun->unit_attention_data;
+		curlun->unit_attention_data = SS_NO_SENSE;
+	}
+#endif
+
+	if (!curlun) {		/* Unsupported LUNs are okay */
+		common->bad_lun_okay = 1;
+		sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
+		sdinfo = 0;
+		valid = 0;
+	} else {
+		sd = curlun->sense_data;
+		sdinfo = curlun->sense_data_info;
+		valid = curlun->info_valid << 7;
+		curlun->sense_data = SS_NO_SENSE;
+		curlun->sense_data_info = 0;
+		curlun->info_valid = 0;
+	}
+
+	memset(buf, 0, 18);
+	buf[0] = valid | 0x70;			/* Valid, current error */
+	buf[2] = SK(sd);
+	put_unaligned_be32(sdinfo, &buf[3]);	/* Sense information */
+	buf[7] = 18 - 8;			/* Additional sense length */
+	buf[12] = ASC(sd);
+	buf[13] = ASCQ(sd);
+	return 18;
+}
+
+static int do_read_capacity(struct fsg_common *common, struct fsg_buffhd *bh)
+{
+	struct fsg_lun	*curlun = common->curlun;
+	u32		lba = get_unaligned_be32(&common->cmnd[2]);
+	int		pmi = common->cmnd[8];
+	u8		*buf = (u8 *)bh->buf;
+
+	/* Check the PMI and LBA fields */
+	if (pmi > 1 || (pmi == 0 && lba != 0)) {
+		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+		return -EINVAL;
+	}
+
+	put_unaligned_be32(curlun->num_sectors - 1, &buf[0]);
+						/* Max logical block */
+	put_unaligned_be32(curlun->blksize, &buf[4]);/* Block length */
+	return 8;
+}
+
+static int do_read_header(struct fsg_common *common, struct fsg_buffhd *bh)
+{
+	struct fsg_lun	*curlun = common->curlun;
+	int		msf = common->cmnd[1] & 0x02;
+	u32		lba = get_unaligned_be32(&common->cmnd[2]);
+	u8		*buf = (u8 *)bh->buf;
+
+	if (common->cmnd[1] & ~0x02) {		/* Mask away MSF */
+		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+		return -EINVAL;
+	}
+	if (lba >= curlun->num_sectors) {
+		curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
+		return -EINVAL;
+	}
+
+	memset(buf, 0, 8);
+	buf[0] = 0x01;		/* 2048 bytes of user data, rest is EC */
+	store_cdrom_address(&buf[4], msf, lba);
+	return 8;
+}
+
+static int do_read_toc(struct fsg_common *common, struct fsg_buffhd *bh)
+{
+	struct fsg_lun	*curlun = common->curlun;
+	int		msf = common->cmnd[1] & 0x02;
+	int		start_track = common->cmnd[6];
+	u8		*buf = (u8 *)bh->buf;
+
+	if ((common->cmnd[1] & ~0x02) != 0 ||	/* Mask away MSF */
+			start_track > 1) {
+		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+		return -EINVAL;
+	}
+
+	memset(buf, 0, 20);
+	buf[1] = (20-2);		/* TOC data length */
+	buf[2] = 1;			/* First track number */
+	buf[3] = 1;			/* Last track number */
+	buf[5] = 0x16;			/* Data track, copying allowed */
+	buf[6] = 0x01;			/* Only track is number 1 */
+	store_cdrom_address(&buf[8], msf, 0);
+
+	buf[13] = 0x16;			/* Lead-out track is data */
+	buf[14] = 0xAA;			/* Lead-out track number */
+	store_cdrom_address(&buf[16], msf, curlun->num_sectors);
+	return 20;
+}
+
+static int do_mode_sense(struct fsg_common *common, struct fsg_buffhd *bh)
+{
+	struct fsg_lun	*curlun = common->curlun;
+	int		mscmnd = common->cmnd[0];
+	u8		*buf = (u8 *) bh->buf;
+	u8		*buf0 = buf;
+	int		pc, page_code;
+	int		changeable_values, all_pages;
+	int		valid_page = 0;
+	int		len, limit;
+
+	if ((common->cmnd[1] & ~0x08) != 0) {	/* Mask away DBD */
+		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+		return -EINVAL;
+	}
+	pc = common->cmnd[2] >> 6;
+	page_code = common->cmnd[2] & 0x3f;
+	if (pc == 3) {
+		curlun->sense_data = SS_SAVING_PARAMETERS_NOT_SUPPORTED;
+		return -EINVAL;
+	}
+	changeable_values = (pc == 1);
+	all_pages = (page_code == 0x3f);
+
+	/*
+	 * Write the mode parameter header.  Fixed values are: default
+	 * medium type, no cache control (DPOFUA), and no block descriptors.
+	 * The only variable value is the WriteProtect bit.  We will fill in
+	 * the mode data length later.
+	 */
+	memset(buf, 0, 8);
+	if (mscmnd == MODE_SENSE) {
+		buf[2] = (curlun->ro ? 0x80 : 0x00);		/* WP, DPOFUA */
+		buf += 4;
+		limit = 255;
+	} else {			/* MODE_SENSE_10 */
+		buf[3] = (curlun->ro ? 0x80 : 0x00);		/* WP, DPOFUA */
+		buf += 8;
+		limit = 65535;		/* Should really be FSG_BUFLEN */
+	}
+
+	/* No block descriptors */
+
+	/*
+	 * The mode pages, in numerical order.  The only page we support
+	 * is the Caching page.
+	 */
+	if (page_code == 0x08 || all_pages) {
+		valid_page = 1;
+		buf[0] = 0x08;		/* Page code */
+		buf[1] = 10;		/* Page length */
+		memset(buf+2, 0, 10);	/* None of the fields are changeable */
+
+		if (!changeable_values) {
+			buf[2] = 0x04;	/* Write cache enable, */
+					/* Read cache not disabled */
+					/* No cache retention priorities */
+			put_unaligned_be16(0xffff, &buf[4]);
+					/* Don't disable prefetch */
+					/* Minimum prefetch = 0 */
+			put_unaligned_be16(0xffff, &buf[8]);
+					/* Maximum prefetch */
+			put_unaligned_be16(0xffff, &buf[10]);
+					/* Maximum prefetch ceiling */
+		}
+		buf += 12;
+	}
+
+	/*
+	 * Check that a valid page was requested and the mode data length
+	 * isn't too long.
+	 */
+	len = buf - buf0;
+	if (!valid_page || len > limit) {
+		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+		return -EINVAL;
+	}
+
+	/*  Store the mode data length */
+	if (mscmnd == MODE_SENSE)
+		buf0[0] = len - 1;
+	else
+		put_unaligned_be16(len - 2, buf0);
+	return len;
+}
+
+static int do_start_stop(struct fsg_common *common)
+{
+	struct fsg_lun	*curlun = common->curlun;
+	int		loej, start;
+
+	if (!curlun) {
+		return -EINVAL;
+	} else if (!curlun->removable) {
+		curlun->sense_data = SS_INVALID_COMMAND;
+		return -EINVAL;
+	} else if ((common->cmnd[1] & ~0x01) != 0 || /* Mask away Immed */
+		   (common->cmnd[4] & ~0x03) != 0) { /* Mask LoEj, Start */
+		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+		return -EINVAL;
+	}
+
+	loej  = common->cmnd[4] & 0x02;
+	start = common->cmnd[4] & 0x01;
+
+	/*
+	 * Our emulation doesn't support mounting; the medium is
+	 * available for use as soon as it is loaded.
+	 */
+	if (start) {
+		if (!fsg_lun_is_open(curlun)) {
+			curlun->sense_data = SS_MEDIUM_NOT_PRESENT;
+			return -EINVAL;
+		}
+		return 0;
+	}
+
+	/* Are we allowed to unload the media? */
+	if (curlun->prevent_medium_removal) {
+		LDBG(curlun, "unload attempt prevented\n");
+		curlun->sense_data = SS_MEDIUM_REMOVAL_PREVENTED;
+		return -EINVAL;
+	}
+
+	if (!loej)
+		return 0;
+
+	/* Simulate an unload/eject */
+	if (common->ops && common->ops->pre_eject) {
+		int r = common->ops->pre_eject(common, curlun,
+					       curlun - common->luns);
+		if (unlikely(r < 0))
+			return r;
+		else if (r)
+			return 0;
+	}
+
+	up_read(&common->filesem);
+	down_write(&common->filesem);
+	fsg_lun_close(curlun);
+	up_write(&common->filesem);
+	down_read(&common->filesem);
+
+	return common->ops && common->ops->post_eject
+		? min(0, common->ops->post_eject(common, curlun,
+						 curlun - common->luns))
+		: 0;
+}
+
+static int do_prevent_allow(struct fsg_common *common)
+{
+	struct fsg_lun	*curlun = common->curlun;
+	int		prevent;
+
+	if (!common->curlun) {
+		return -EINVAL;
+	} else if (!common->curlun->removable) {
+		common->curlun->sense_data = SS_INVALID_COMMAND;
+		return -EINVAL;
+	}
+
+	prevent = common->cmnd[4] & 0x01;
+	if ((common->cmnd[4] & ~0x01) != 0) {	/* Mask away Prevent */
+		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+		return -EINVAL;
+	}
+
+	if (curlun->prevent_medium_removal && !prevent)
+		fsg_lun_fsync_sub(curlun);
+	curlun->prevent_medium_removal = prevent;
+	return 0;
+}
+
+static int do_read_format_capacities(struct fsg_common *common,
+			struct fsg_buffhd *bh)
+{
+	struct fsg_lun	*curlun = common->curlun;
+	u8		*buf = (u8 *) bh->buf;
+
+	buf[0] = buf[1] = buf[2] = 0;
+	buf[3] = 8;	/* Only the Current/Maximum Capacity Descriptor */
+	buf += 4;
+
+	put_unaligned_be32(curlun->num_sectors, &buf[0]);
+						/* Number of blocks */
+	put_unaligned_be32(curlun->blksize, &buf[4]);/* Block length */
+	buf[4] = 0x02;				/* Current capacity */
+	return 12;
+}
+
+static int do_mode_select(struct fsg_common *common, struct fsg_buffhd *bh)
+{
+	struct fsg_lun	*curlun = common->curlun;
+
+	/* We don't support MODE SELECT */
+	if (curlun)
+		curlun->sense_data = SS_INVALID_COMMAND;
+	return -EINVAL;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int halt_bulk_in_endpoint(struct fsg_dev *fsg)
+{
+	int	rc;
+
+	rc = fsg_set_halt(fsg, fsg->bulk_in);
+	if (rc == -EAGAIN)
+		VDBG(fsg, "delayed bulk-in endpoint halt\n");
+	while (rc != 0) {
+		if (rc != -EAGAIN) {
+			WARNING(fsg, "usb_ep_set_halt -> %d\n", rc);
+			rc = 0;
+			break;
+		}
+
+		/* Wait for a short time and then try again */
+		if (msleep_interruptible(100) != 0)
+			return -EINTR;
+		rc = usb_ep_set_halt(fsg->bulk_in);
+	}
+	return rc;
+}
+
+static int wedge_bulk_in_endpoint(struct fsg_dev *fsg)
+{
+	int	rc;
+
+	DBG(fsg, "bulk-in set wedge\n");
+	rc = usb_ep_set_wedge(fsg->bulk_in);
+	if (rc == -EAGAIN)
+		VDBG(fsg, "delayed bulk-in endpoint wedge\n");
+	while (rc != 0) {
+		if (rc != -EAGAIN) {
+			WARNING(fsg, "usb_ep_set_wedge -> %d\n", rc);
+			rc = 0;
+			break;
+		}
+
+		/* Wait for a short time and then try again */
+		if (msleep_interruptible(100) != 0)
+			return -EINTR;
+		rc = usb_ep_set_wedge(fsg->bulk_in);
+	}
+	return rc;
+}
+
+static int throw_away_data(struct fsg_common *common)
+{
+	struct fsg_buffhd	*bh;
+	u32			amount;
+	int			rc;
+
+	for (bh = common->next_buffhd_to_drain;
+	     bh->state != BUF_STATE_EMPTY || common->usb_amount_left > 0;
+	     bh = common->next_buffhd_to_drain) {
+
+		/* Throw away the data in a filled buffer */
+		if (bh->state == BUF_STATE_FULL) {
+			smp_rmb();
+			bh->state = BUF_STATE_EMPTY;
+			common->next_buffhd_to_drain = bh->next;
+
+			/* A short packet or an error ends everything */
+			if (bh->outreq->actual < bh->bulk_out_intended_length ||
+			    bh->outreq->status != 0) {
+				raise_exception(common,
+						FSG_STATE_ABORT_BULK_OUT);
+				return -EINTR;
+			}
+			continue;
+		}
+
+		/* Try to submit another request if we need one */
+		bh = common->next_buffhd_to_fill;
+		if (bh->state == BUF_STATE_EMPTY
+		 && common->usb_amount_left > 0) {
+			amount = min(common->usb_amount_left, FSG_BUFLEN);
+
+			/*
+			 * Except at the end of the transfer, amount will be
+			 * equal to the buffer size, which is divisible by
+			 * the bulk-out maxpacket size.
+			 */
+			set_bulk_out_req_length(common, bh, amount);
+			if (!start_out_transfer(common, bh))
+				/* Dunno what to do if common->fsg is NULL */
+				return -EIO;
+			common->next_buffhd_to_fill = bh->next;
+			common->usb_amount_left -= amount;
+			continue;
+		}
+
+		/* Otherwise wait for something to happen */
+		rc = sleep_thread(common);
+		if (rc)
+			return rc;
+	}
+	return 0;
+}
+
+static int finish_reply(struct fsg_common *common)
+{
+	struct fsg_buffhd	*bh = common->next_buffhd_to_fill;
+	int			rc = 0;
+
+	switch (common->data_dir) {
+	case DATA_DIR_NONE:
+		break;			/* Nothing to send */
+
+	/*
+	 * If we don't know whether the host wants to read or write,
+	 * this must be CB or CBI with an unknown command.  We mustn't
+	 * try to send or receive any data.  So stall both bulk pipes
+	 * if we can and wait for a reset.
+	 */
+	case DATA_DIR_UNKNOWN:
+		if (!common->can_stall) {
+			/* Nothing */
+		} else if (fsg_is_set(common)) {
+			fsg_set_halt(common->fsg, common->fsg->bulk_out);
+			rc = halt_bulk_in_endpoint(common->fsg);
+		} else {
+			/* Don't know what to do if common->fsg is NULL */
+			rc = -EIO;
+		}
+		break;
+
+	/* All but the last buffer of data must have already been sent */
+	case DATA_DIR_TO_HOST:
+		if (common->data_size == 0) {
+			/* Nothing to send */
+
+		/* Don't know what to do if common->fsg is NULL */
+		} else if (!fsg_is_set(common)) {
+			rc = -EIO;
+
+		/* If there's no residue, simply send the last buffer */
+		} else if (common->residue == 0) {
+			bh->inreq->zero = 0;
+			if (!start_in_transfer(common, bh))
+				return -EIO;
+			common->next_buffhd_to_fill = bh->next;
+
+		/*
+		 * For Bulk-only, mark the end of the data with a short
+		 * packet.  If we are allowed to stall, halt the bulk-in
+		 * endpoint.  (Note: This violates the Bulk-Only Transport
+		 * specification, which requires us to pad the data if we
+		 * don't halt the endpoint.  Presumably nobody will mind.)
+		 */
+		} else {
+			bh->inreq->zero = 1;
+			if (!start_in_transfer(common, bh))
+				rc = -EIO;
+			common->next_buffhd_to_fill = bh->next;
+			if (common->can_stall)
+				rc = halt_bulk_in_endpoint(common->fsg);
+		}
+		break;
+
+	/*
+	 * We have processed all we want from the data the host has sent.
+	 * There may still be outstanding bulk-out requests.
+	 */
+	case DATA_DIR_FROM_HOST:
+		if (common->residue == 0) {
+			/* Nothing to receive */
+
+		/* Did the host stop sending unexpectedly early? */
+		} else if (common->short_packet_received) {
+			raise_exception(common, FSG_STATE_ABORT_BULK_OUT);
+			rc = -EINTR;
+
+		/*
+		 * We haven't processed all the incoming data.  Even though
+		 * we may be allowed to stall, doing so would cause a race.
+		 * The controller may already have ACK'ed all the remaining
+		 * bulk-out packets, in which case the host wouldn't see a
+		 * STALL.  Not realizing the endpoint was halted, it wouldn't
+		 * clear the halt -- leading to problems later on.
+		 */
+#if 0
+		} else if (common->can_stall) {
+			if (fsg_is_set(common))
+				fsg_set_halt(common->fsg,
+					     common->fsg->bulk_out);
+			raise_exception(common, FSG_STATE_ABORT_BULK_OUT);
+			rc = -EINTR;
+#endif
+
+		/*
+		 * We can't stall.  Read in the excess data and throw it
+		 * all away.
+		 */
+		} else {
+			rc = throw_away_data(common);
+		}
+		break;
+	}
+	return rc;
+}
+
+static int send_status(struct fsg_common *common)
+{
+	struct fsg_lun		*curlun = common->curlun;
+	struct fsg_buffhd	*bh;
+	struct bulk_cs_wrap	*csw;
+	int			rc;
+	u8			status = US_BULK_STAT_OK;
+	u32			sd, sdinfo = 0;
+
+	/* Wait for the next buffer to become available */
+	bh = common->next_buffhd_to_fill;
+	while (bh->state != BUF_STATE_EMPTY) {
+		rc = sleep_thread(common);
+		if (rc)
+			return rc;
+	}
+
+	if (curlun) {
+		sd = curlun->sense_data;
+		sdinfo = curlun->sense_data_info;
+	} else if (common->bad_lun_okay)
+		sd = SS_NO_SENSE;
+	else
+		sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
+
+	if (common->phase_error) {
+		DBG(common, "sending phase-error status\n");
+		status = US_BULK_STAT_PHASE;
+		sd = SS_INVALID_COMMAND;
+	} else if (sd != SS_NO_SENSE) {
+		DBG(common, "sending command-failure status\n");
+		status = US_BULK_STAT_FAIL;
+		VDBG(common, "  sense data: SK x%02x, ASC x%02x, ASCQ x%02x;"
+				"  info x%x\n",
+				SK(sd), ASC(sd), ASCQ(sd), sdinfo);
+	}
+
+	/* Store and send the Bulk-only CSW */
+	csw = (void *)bh->buf;
+
+	csw->Signature = cpu_to_le32(US_BULK_CS_SIGN);
+	csw->Tag = common->tag;
+	csw->Residue = cpu_to_le32(common->residue);
+	csw->Status = status;
+
+	bh->inreq->length = US_BULK_CS_WRAP_LEN;
+	bh->inreq->zero = 0;
+	if (!start_in_transfer(common, bh))
+		/* Don't know what to do if common->fsg is NULL */
+		return -EIO;
+
+	common->next_buffhd_to_fill = bh->next;
+	return 0;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Check whether the command is properly formed and whether its data size
+ * and direction agree with the values we already have.
+ */
+static int check_command(struct fsg_common *common, int cmnd_size,
+			 enum data_direction data_dir, unsigned int mask,
+			 int needs_medium, const char *name)
+{
+	int			i;
+	int			lun = common->cmnd[1] >> 5;
+	static const char	dirletter[4] = {'u', 'o', 'i', 'n'};
+	char			hdlen[20];
+	struct fsg_lun		*curlun;
+
+	hdlen[0] = 0;
+	if (common->data_dir != DATA_DIR_UNKNOWN)
+		sprintf(hdlen, ", H%c=%u", dirletter[(int) common->data_dir],
+			common->data_size);
+	VDBG(common, "SCSI command: %s;  Dc=%d, D%c=%u;  Hc=%d%s\n",
+	     name, cmnd_size, dirletter[(int) data_dir],
+	     common->data_size_from_cmnd, common->cmnd_size, hdlen);
+
+	/*
+	 * We can't reply at all until we know the correct data direction
+	 * and size.
+	 */
+	if (common->data_size_from_cmnd == 0)
+		data_dir = DATA_DIR_NONE;
+	if (common->data_size < common->data_size_from_cmnd) {
+		/*
+		 * Host data size < Device data size is a phase error.
+		 * Carry out the command, but only transfer as much as
+		 * we are allowed.
+		 */
+		common->data_size_from_cmnd = common->data_size;
+		common->phase_error = 1;
+	}
+	common->residue = common->data_size;
+	common->usb_amount_left = common->data_size;
+
+	/* Conflicting data directions is a phase error */
+	if (common->data_dir != data_dir && common->data_size_from_cmnd > 0) {
+		common->phase_error = 1;
+		return -EINVAL;
+	}
+
+	/* Verify the length of the command itself */
+	if (cmnd_size != common->cmnd_size) {
+
+		/*
+		 * Special case workaround: There are plenty of buggy SCSI
+		 * implementations. Many have issues with cbw->Length
+		 * field passing a wrong command size. For those cases we
+		 * always try to work around the problem by using the length
+		 * sent by the host side provided it is at least as large
+		 * as the correct command length.
+		 * Examples of such cases would be MS-Windows, which issues
+		 * REQUEST SENSE with cbw->Length == 12 where it should
+		 * be 6, and xbox360 issuing INQUIRY, TEST UNIT READY and
+		 * REQUEST SENSE with cbw->Length == 10 where it should
+		 * be 6 as well.
+		 */
+		if (cmnd_size <= common->cmnd_size) {
+			DBG(common, "%s is buggy! Expected length %d "
+			    "but we got %d\n", name,
+			    cmnd_size, common->cmnd_size);
+			cmnd_size = common->cmnd_size;
+		} else {
+			common->phase_error = 1;
+			return -EINVAL;
+		}
+	}
+
+	/* Check that the LUN values are consistent */
+	if (common->lun != lun)
+		DBG(common, "using LUN %d from CBW, not LUN %d from CDB\n",
+		    common->lun, lun);
+
+	/* Check the LUN */
+	curlun = common->curlun;
+	if (curlun) {
+		if (common->cmnd[0] != REQUEST_SENSE) {
+			curlun->sense_data = SS_NO_SENSE;
+			curlun->sense_data_info = 0;
+			curlun->info_valid = 0;
+		}
+	} else {
+		common->bad_lun_okay = 0;
+
+		/*
+		 * INQUIRY and REQUEST SENSE commands are explicitly allowed
+		 * to use unsupported LUNs; all others may not.
+		 */
+		if (common->cmnd[0] != INQUIRY &&
+		    common->cmnd[0] != REQUEST_SENSE) {
+			DBG(common, "unsupported LUN %d\n", common->lun);
+			return -EINVAL;
+		}
+	}
+
+	/*
+	 * If a unit attention condition exists, only INQUIRY and
+	 * REQUEST SENSE commands are allowed; anything else must fail.
+	 */
+	if (curlun && curlun->unit_attention_data != SS_NO_SENSE &&
+	    common->cmnd[0] != INQUIRY &&
+	    common->cmnd[0] != REQUEST_SENSE) {
+		curlun->sense_data = curlun->unit_attention_data;
+		curlun->unit_attention_data = SS_NO_SENSE;
+		return -EINVAL;
+	}
+
+	/* Check that only command bytes listed in the mask are non-zero */
+	common->cmnd[1] &= 0x1f;			/* Mask away the LUN */
+	for (i = 1; i < cmnd_size; ++i) {
+		if (common->cmnd[i] && !(mask & (1 << i))) {
+			if (curlun)
+				curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+			return -EINVAL;
+		}
+	}
+
+	/* If the medium isn't mounted and the command needs to access
+	 * it, return an error. */
+	if (curlun && !fsg_lun_is_open(curlun) && needs_medium) {
+		curlun->sense_data = SS_MEDIUM_NOT_PRESENT;
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/* wrapper of check_command for data size in blocks handling */
+static int check_command_size_in_blocks(struct fsg_common *common,
+		int cmnd_size, enum data_direction data_dir,
+		unsigned int mask, int needs_medium, const char *name)
+{
+	if (common->curlun)
+		common->data_size_from_cmnd <<= common->curlun->blkbits;
+	return check_command(common, cmnd_size, data_dir,
+			mask, needs_medium, name);
+}
+
+static int do_scsi_command(struct fsg_common *common)
+{
+	struct fsg_buffhd	*bh;
+	int			rc;
+	int			reply = -EINVAL;
+	int			i;
+	static char		unknown[16];
+
+	dump_cdb(common);
+
+	/* Wait for the next buffer to become available for data or status */
+	bh = common->next_buffhd_to_fill;
+	common->next_buffhd_to_drain = bh;
+	while (bh->state != BUF_STATE_EMPTY) {
+		rc = sleep_thread(common);
+		if (rc)
+			return rc;
+	}
+	common->phase_error = 0;
+	common->short_packet_received = 0;
+
+	down_read(&common->filesem);	/* We're using the backing file */
+	switch (common->cmnd[0]) {
+
+	case INQUIRY:
+		common->data_size_from_cmnd = common->cmnd[4];
+		reply = check_command(common, 6, DATA_DIR_TO_HOST,
+				      (1<<4), 0,
+				      "INQUIRY");
+		if (reply == 0)
+			reply = do_inquiry(common, bh);
+		break;
+
+	case MODE_SELECT:
+		common->data_size_from_cmnd = common->cmnd[4];
+		reply = check_command(common, 6, DATA_DIR_FROM_HOST,
+				      (1<<1) | (1<<4), 0,
+				      "MODE SELECT(6)");
+		if (reply == 0)
+			reply = do_mode_select(common, bh);
+		break;
+
+	case MODE_SELECT_10:
+		common->data_size_from_cmnd =
+			get_unaligned_be16(&common->cmnd[7]);
+		reply = check_command(common, 10, DATA_DIR_FROM_HOST,
+				      (1<<1) | (3<<7), 0,
+				      "MODE SELECT(10)");
+		if (reply == 0)
+			reply = do_mode_select(common, bh);
+		break;
+
+	case MODE_SENSE:
+		common->data_size_from_cmnd = common->cmnd[4];
+		reply = check_command(common, 6, DATA_DIR_TO_HOST,
+				      (1<<1) | (1<<2) | (1<<4), 0,
+				      "MODE SENSE(6)");
+		if (reply == 0)
+			reply = do_mode_sense(common, bh);
+		break;
+
+	case MODE_SENSE_10:
+		common->data_size_from_cmnd =
+			get_unaligned_be16(&common->cmnd[7]);
+		reply = check_command(common, 10, DATA_DIR_TO_HOST,
+				      (1<<1) | (1<<2) | (3<<7), 0,
+				      "MODE SENSE(10)");
+		if (reply == 0)
+			reply = do_mode_sense(common, bh);
+		break;
+
+	case ALLOW_MEDIUM_REMOVAL:
+		common->data_size_from_cmnd = 0;
+		reply = check_command(common, 6, DATA_DIR_NONE,
+				      (1<<4), 0,
+				      "PREVENT-ALLOW MEDIUM REMOVAL");
+		if (reply == 0)
+			reply = do_prevent_allow(common);
+		break;
+
+	case READ_6:
+		i = common->cmnd[4];
+		common->data_size_from_cmnd = (i == 0) ? 256 : i;
+		reply = check_command_size_in_blocks(common, 6,
+				      DATA_DIR_TO_HOST,
+				      (7<<1) | (1<<4), 1,
+				      "READ(6)");
+		if (reply == 0)
+			reply = do_read(common);
+		break;
+
+	case READ_10:
+		common->data_size_from_cmnd =
+				get_unaligned_be16(&common->cmnd[7]);
+		reply = check_command_size_in_blocks(common, 10,
+				      DATA_DIR_TO_HOST,
+				      (1<<1) | (0xf<<2) | (3<<7), 1,
+				      "READ(10)");
+		if (reply == 0)
+			reply = do_read(common);
+		break;
+
+	case READ_12:
+		common->data_size_from_cmnd =
+				get_unaligned_be32(&common->cmnd[6]);
+		reply = check_command_size_in_blocks(common, 12,
+				      DATA_DIR_TO_HOST,
+				      (1<<1) | (0xf<<2) | (0xf<<6), 1,
+				      "READ(12)");
+		if (reply == 0)
+			reply = do_read(common);
+		break;
+
+	case READ_CAPACITY:
+		common->data_size_from_cmnd = 8;
+		reply = check_command(common, 10, DATA_DIR_TO_HOST,
+				      (0xf<<2) | (1<<8), 1,
+				      "READ CAPACITY");
+		if (reply == 0)
+			reply = do_read_capacity(common, bh);
+		break;
+
+	case READ_HEADER:
+		if (!common->curlun || !common->curlun->cdrom)
+			goto unknown_cmnd;
+		common->data_size_from_cmnd =
+			get_unaligned_be16(&common->cmnd[7]);
+		reply = check_command(common, 10, DATA_DIR_TO_HOST,
+				      (3<<7) | (0x1f<<1), 1,
+				      "READ HEADER");
+		if (reply == 0)
+			reply = do_read_header(common, bh);
+		break;
+
+	case READ_TOC:
+		if (!common->curlun || !common->curlun->cdrom)
+			goto unknown_cmnd;
+		common->data_size_from_cmnd =
+			get_unaligned_be16(&common->cmnd[7]);
+		reply = check_command(common, 10, DATA_DIR_TO_HOST,
+				      (7<<6) | (1<<1), 1,
+				      "READ TOC");
+		if (reply == 0)
+			reply = do_read_toc(common, bh);
+		break;
+
+	case READ_FORMAT_CAPACITIES:
+		common->data_size_from_cmnd =
+			get_unaligned_be16(&common->cmnd[7]);
+		reply = check_command(common, 10, DATA_DIR_TO_HOST,
+				      (3<<7), 1,
+				      "READ FORMAT CAPACITIES");
+		if (reply == 0)
+			reply = do_read_format_capacities(common, bh);
+		break;
+
+	case REQUEST_SENSE:
+		common->data_size_from_cmnd = common->cmnd[4];
+		reply = check_command(common, 6, DATA_DIR_TO_HOST,
+				      (1<<4), 0,
+				      "REQUEST SENSE");
+		if (reply == 0)
+			reply = do_request_sense(common, bh);
+		break;
+
+	case START_STOP:
+		common->data_size_from_cmnd = 0;
+		reply = check_command(common, 6, DATA_DIR_NONE,
+				      (1<<1) | (1<<4), 0,
+				      "START-STOP UNIT");
+		if (reply == 0)
+			reply = do_start_stop(common);
+		break;
+
+	case SYNCHRONIZE_CACHE:
+		common->data_size_from_cmnd = 0;
+		reply = check_command(common, 10, DATA_DIR_NONE,
+				      (0xf<<2) | (3<<7), 1,
+				      "SYNCHRONIZE CACHE");
+		if (reply == 0)
+			reply = do_synchronize_cache(common);
+		break;
+
+	case TEST_UNIT_READY:
+		common->data_size_from_cmnd = 0;
+		reply = check_command(common, 6, DATA_DIR_NONE,
+				0, 1,
+				"TEST UNIT READY");
+		break;
+
+	/*
+	 * Although optional, this command is used by MS-Windows.  We
+	 * support a minimal version: BytChk must be 0.
+	 */
+	case VERIFY:
+		common->data_size_from_cmnd = 0;
+		reply = check_command(common, 10, DATA_DIR_NONE,
+				      (1<<1) | (0xf<<2) | (3<<7), 1,
+				      "VERIFY");
+		if (reply == 0)
+			reply = do_verify(common);
+		break;
+
+	case WRITE_6:
+		i = common->cmnd[4];
+		common->data_size_from_cmnd = (i == 0) ? 256 : i;
+		reply = check_command_size_in_blocks(common, 6,
+				      DATA_DIR_FROM_HOST,
+				      (7<<1) | (1<<4), 1,
+				      "WRITE(6)");
+		if (reply == 0)
+			reply = do_write(common);
+		break;
+
+	case WRITE_10:
+		common->data_size_from_cmnd =
+				get_unaligned_be16(&common->cmnd[7]);
+		reply = check_command_size_in_blocks(common, 10,
+				      DATA_DIR_FROM_HOST,
+				      (1<<1) | (0xf<<2) | (3<<7), 1,
+				      "WRITE(10)");
+		if (reply == 0)
+			reply = do_write(common);
+		break;
+
+	case WRITE_12:
+		common->data_size_from_cmnd =
+				get_unaligned_be32(&common->cmnd[6]);
+		reply = check_command_size_in_blocks(common, 12,
+				      DATA_DIR_FROM_HOST,
+				      (1<<1) | (0xf<<2) | (0xf<<6), 1,
+				      "WRITE(12)");
+		if (reply == 0)
+			reply = do_write(common);
+		break;
+
+	/*
+	 * Some mandatory commands that we recognize but don't implement.
+	 * They don't mean much in this setting.  It's left as an exercise
+	 * for anyone interested to implement RESERVE and RELEASE in terms
+	 * of Posix locks.
+	 */
+	case FORMAT_UNIT:
+	case RELEASE:
+	case RESERVE:
+	case SEND_DIAGNOSTIC:
+		/* Fall through */
+
+	default:
+unknown_cmnd:
+		common->data_size_from_cmnd = 0;
+		sprintf(unknown, "Unknown x%02x", common->cmnd[0]);
+		reply = check_command(common, common->cmnd_size,
+				      DATA_DIR_UNKNOWN, ~0, 0, unknown);
+		if (reply == 0) {
+			common->curlun->sense_data = SS_INVALID_COMMAND;
+			reply = -EINVAL;
+		}
+		break;
+	}
+	up_read(&common->filesem);
+
+	if (reply == -EINTR || signal_pending(current))
+		return -EINTR;
+
+	/* Set up the single reply buffer for finish_reply() */
+	if (reply == -EINVAL)
+		reply = 0;		/* Error reply length */
+	if (reply >= 0 && common->data_dir == DATA_DIR_TO_HOST) {
+		reply = min((u32)reply, common->data_size_from_cmnd);
+		bh->inreq->length = reply;
+		bh->state = BUF_STATE_FULL;
+		common->residue -= reply;
+	}				/* Otherwise it's already set */
+
+	return 0;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int received_cbw(struct fsg_dev *fsg, struct fsg_buffhd *bh)
+{
+	struct usb_request	*req = bh->outreq;
+	struct bulk_cb_wrap	*cbw = req->buf;
+	struct fsg_common	*common = fsg->common;
+
+	/* Was this a real packet?  Should it be ignored? */
+	if (req->status || test_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags))
+		return -EINVAL;
+
+	/* Is the CBW valid? */
+	if (req->actual != US_BULK_CB_WRAP_LEN ||
+			cbw->Signature != cpu_to_le32(
+				US_BULK_CB_SIGN)) {
+		DBG(fsg, "invalid CBW: len %u sig 0x%x\n",
+				req->actual,
+				le32_to_cpu(cbw->Signature));
+
+		/*
+		 * The Bulk-only spec says we MUST stall the IN endpoint
+		 * (6.6.1), so it's unavoidable.  It also says we must
+		 * retain this state until the next reset, but there's
+		 * no way to tell the controller driver it should ignore
+		 * Clear-Feature(HALT) requests.
+		 *
+		 * We aren't required to halt the OUT endpoint; instead
+		 * we can simply accept and discard any data received
+		 * until the next reset.
+		 */
+		wedge_bulk_in_endpoint(fsg);
+		set_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);
+		return -EINVAL;
+	}
+
+	/* Is the CBW meaningful? */
+	if (cbw->Lun >= FSG_MAX_LUNS || cbw->Flags & ~US_BULK_FLAG_IN ||
+			cbw->Length <= 0 || cbw->Length > MAX_COMMAND_SIZE) {
+		DBG(fsg, "non-meaningful CBW: lun = %u, flags = 0x%x, "
+				"cmdlen %u\n",
+				cbw->Lun, cbw->Flags, cbw->Length);
+
+		/*
+		 * We can do anything we want here, so let's stall the
+		 * bulk pipes if we are allowed to.
+		 */
+		if (common->can_stall) {
+			fsg_set_halt(fsg, fsg->bulk_out);
+			halt_bulk_in_endpoint(fsg);
+		}
+		return -EINVAL;
+	}
+
+	/* Save the command for later */
+	common->cmnd_size = cbw->Length;
+	memcpy(common->cmnd, cbw->CDB, common->cmnd_size);
+	if (cbw->Flags & US_BULK_FLAG_IN)
+		common->data_dir = DATA_DIR_TO_HOST;
+	else
+		common->data_dir = DATA_DIR_FROM_HOST;
+	common->data_size = le32_to_cpu(cbw->DataTransferLength);
+	if (common->data_size == 0)
+		common->data_dir = DATA_DIR_NONE;
+	common->lun = cbw->Lun;
+	if (common->lun >= 0 && common->lun < common->nluns)
+		common->curlun = &common->luns[common->lun];
+	else
+		common->curlun = NULL;
+	common->tag = cbw->Tag;
+	return 0;
+}
+
+static int get_next_command(struct fsg_common *common)
+{
+	struct fsg_buffhd	*bh;
+	int			rc = 0;
+
+	/* Wait for the next buffer to become available */
+	bh = common->next_buffhd_to_fill;
+	while (bh->state != BUF_STATE_EMPTY) {
+		rc = sleep_thread(common);
+		if (rc)
+			return rc;
+	}
+
+	/* Queue a request to read a Bulk-only CBW */
+	set_bulk_out_req_length(common, bh, US_BULK_CB_WRAP_LEN);
+	if (!start_out_transfer(common, bh))
+		/* Don't know what to do if common->fsg is NULL */
+		return -EIO;
+
+	/*
+	 * We will drain the buffer in software, which means we
+	 * can reuse it for the next filling.  No need to advance
+	 * next_buffhd_to_fill.
+	 */
+
+	/* Wait for the CBW to arrive */
+	while (bh->state != BUF_STATE_FULL) {
+		rc = sleep_thread(common);
+		if (rc)
+			return rc;
+	}
+	smp_rmb();
+	rc = fsg_is_set(common) ? received_cbw(common->fsg, bh) : -EIO;
+	bh->state = BUF_STATE_EMPTY;
+
+	return rc;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int alloc_request(struct fsg_common *common, struct usb_ep *ep,
+		struct usb_request **preq)
+{
+	*preq = usb_ep_alloc_request(ep, GFP_ATOMIC);
+	if (*preq)
+		return 0;
+	ERROR(common, "can't allocate request for %s\n", ep->name);
+	return -ENOMEM;
+}
+
+/* Reset interface setting and re-init endpoint state (toggle etc). */
+static int do_set_interface(struct fsg_common *common, struct fsg_dev *new_fsg)
+{
+	struct fsg_dev *fsg;
+	int i, rc = 0;
+
+	if (common->running)
+		DBG(common, "reset interface\n");
+
+reset:
+	/* Deallocate the requests */
+	if (common->fsg) {
+		fsg = common->fsg;
+
+		for (i = 0; i < fsg_num_buffers; ++i) {
+			struct fsg_buffhd *bh = &common->buffhds[i];
+
+			if (bh->inreq) {
+				usb_ep_free_request(fsg->bulk_in, bh->inreq);
+				bh->inreq = NULL;
+			}
+			if (bh->outreq) {
+				usb_ep_free_request(fsg->bulk_out, bh->outreq);
+				bh->outreq = NULL;
+			}
+		}
+
+		/* Disable the endpoints */
+		if (fsg->bulk_in_enabled) {
+			usb_ep_disable(fsg->bulk_in);
+			fsg->bulk_in_enabled = 0;
+		}
+		if (fsg->bulk_out_enabled) {
+			usb_ep_disable(fsg->bulk_out);
+			fsg->bulk_out_enabled = 0;
+		}
+
+		common->fsg = NULL;
+		wake_up(&common->fsg_wait);
+	}
+
+	common->running = 0;
+	if (!new_fsg || rc)
+		return rc;
+
+	common->fsg = new_fsg;
+	fsg = common->fsg;
+
+	/* Enable the endpoints */
+	rc = config_ep_by_speed(common->gadget, &(fsg->function), fsg->bulk_in);
+	if (rc)
+		goto reset;
+	rc = usb_ep_enable(fsg->bulk_in);
+	if (rc)
+		goto reset;
+	fsg->bulk_in->driver_data = common;
+	fsg->bulk_in_enabled = 1;
+
+	rc = config_ep_by_speed(common->gadget, &(fsg->function),
+				fsg->bulk_out);
+	if (rc)
+		goto reset;
+	rc = usb_ep_enable(fsg->bulk_out);
+	if (rc)
+		goto reset;
+	fsg->bulk_out->driver_data = common;
+	fsg->bulk_out_enabled = 1;
+	common->bulk_out_maxpacket = usb_endpoint_maxp(fsg->bulk_out->desc);
+	clear_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);
+
+	/* Allocate the requests */
+	for (i = 0; i < fsg_num_buffers; ++i) {
+		struct fsg_buffhd	*bh = &common->buffhds[i];
+
+		rc = alloc_request(common, fsg->bulk_in, &bh->inreq);
+		if (rc)
+			goto reset;
+		rc = alloc_request(common, fsg->bulk_out, &bh->outreq);
+		if (rc)
+			goto reset;
+		bh->inreq->buf = bh->outreq->buf = bh->buf;
+		bh->inreq->context = bh->outreq->context = bh;
+		bh->inreq->complete = bulk_in_complete;
+		bh->outreq->complete = bulk_out_complete;
+	}
+
+	common->running = 1;
+	for (i = 0; i < common->nluns; ++i)
+		common->luns[i].unit_attention_data = SS_RESET_OCCURRED;
+	return rc;
+}
+
+
+/****************************** ALT CONFIGS ******************************/
+
+static int fsg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+	struct fsg_dev *fsg = fsg_from_func(f);
+	fsg->common->new_fsg = fsg;
+	raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
+	return USB_GADGET_DELAYED_STATUS;
+}
+
+static void fsg_disable(struct usb_function *f)
+{
+	struct fsg_dev *fsg = fsg_from_func(f);
+	fsg->common->new_fsg = NULL;
+	raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static void handle_exception(struct fsg_common *common)
+{
+	siginfo_t		info;
+	int			i;
+	struct fsg_buffhd	*bh;
+	enum fsg_state		old_state;
+	struct fsg_lun		*curlun;
+	unsigned int		exception_req_tag;
+
+	/*
+	 * Clear the existing signals.  Anything but SIGUSR1 is converted
+	 * into a high-priority EXIT exception.
+	 */
+	for (;;) {
+		int sig =
+			dequeue_signal_lock(current, &current->blocked, &info);
+		if (!sig)
+			break;
+		if (sig != SIGUSR1) {
+			if (common->state < FSG_STATE_EXIT)
+				DBG(common, "Main thread exiting on signal\n");
+			raise_exception(common, FSG_STATE_EXIT);
+		}
+	}
+
+	/* Cancel all the pending transfers */
+	if (likely(common->fsg)) {
+		for (i = 0; i < fsg_num_buffers; ++i) {
+			bh = &common->buffhds[i];
+			if (bh->inreq_busy)
+				usb_ep_dequeue(common->fsg->bulk_in, bh->inreq);
+			if (bh->outreq_busy)
+				usb_ep_dequeue(common->fsg->bulk_out,
+					       bh->outreq);
+		}
+
+		/* Wait until everything is idle */
+		for (;;) {
+			int num_active = 0;
+			for (i = 0; i < fsg_num_buffers; ++i) {
+				bh = &common->buffhds[i];
+				num_active += bh->inreq_busy + bh->outreq_busy;
+			}
+			if (num_active == 0)
+				break;
+			if (sleep_thread(common))
+				return;
+		}
+
+		/* Clear out the controller's fifos */
+		if (common->fsg->bulk_in_enabled)
+			usb_ep_fifo_flush(common->fsg->bulk_in);
+		if (common->fsg->bulk_out_enabled)
+			usb_ep_fifo_flush(common->fsg->bulk_out);
+	}
+
+	/*
+	 * Reset the I/O buffer states and pointers, the SCSI
+	 * state, and the exception.  Then invoke the handler.
+	 */
+	spin_lock_irq(&common->lock);
+
+	for (i = 0; i < fsg_num_buffers; ++i) {
+		bh = &common->buffhds[i];
+		bh->state = BUF_STATE_EMPTY;
+	}
+	common->next_buffhd_to_fill = &common->buffhds[0];
+	common->next_buffhd_to_drain = &common->buffhds[0];
+	exception_req_tag = common->exception_req_tag;
+	old_state = common->state;
+
+	if (old_state == FSG_STATE_ABORT_BULK_OUT)
+		common->state = FSG_STATE_STATUS_PHASE;
+	else {
+		for (i = 0; i < common->nluns; ++i) {
+			curlun = &common->luns[i];
+			curlun->prevent_medium_removal = 0;
+			curlun->sense_data = SS_NO_SENSE;
+			curlun->unit_attention_data = SS_NO_SENSE;
+			curlun->sense_data_info = 0;
+			curlun->info_valid = 0;
+		}
+		common->state = FSG_STATE_IDLE;
+	}
+	spin_unlock_irq(&common->lock);
+
+	/* Carry out any extra actions required for the exception */
+	switch (old_state) {
+	case FSG_STATE_ABORT_BULK_OUT:
+		send_status(common);
+		spin_lock_irq(&common->lock);
+		if (common->state == FSG_STATE_STATUS_PHASE)
+			common->state = FSG_STATE_IDLE;
+		spin_unlock_irq(&common->lock);
+		break;
+
+	case FSG_STATE_RESET:
+		/*
+		 * In case we were forced against our will to halt a
+		 * bulk endpoint, clear the halt now.  (The SuperH UDC
+		 * requires this.)
+		 */
+		if (!fsg_is_set(common))
+			break;
+		if (test_and_clear_bit(IGNORE_BULK_OUT,
+				       &common->fsg->atomic_bitflags))
+			usb_ep_clear_halt(common->fsg->bulk_in);
+
+		if (common->ep0_req_tag == exception_req_tag)
+			ep0_queue(common);	/* Complete the status stage */
+
+		/*
+		 * Technically this should go here, but it would only be
+		 * a waste of time.  Ditto for the INTERFACE_CHANGE and
+		 * CONFIG_CHANGE cases.
+		 */
+		/* for (i = 0; i < common->nluns; ++i) */
+		/*	common->luns[i].unit_attention_data = */
+		/*		SS_RESET_OCCURRED;  */
+		break;
+
+	case FSG_STATE_CONFIG_CHANGE:
+		do_set_interface(common, common->new_fsg);
+		if (common->new_fsg)
+			usb_composite_setup_continue(common->cdev);
+		break;
+
+	case FSG_STATE_EXIT:
+	case FSG_STATE_TERMINATED:
+		do_set_interface(common, NULL);		/* Free resources */
+		spin_lock_irq(&common->lock);
+		common->state = FSG_STATE_TERMINATED;	/* Stop the thread */
+		spin_unlock_irq(&common->lock);
+		break;
+
+	case FSG_STATE_INTERFACE_CHANGE:
+	case FSG_STATE_DISCONNECT:
+	case FSG_STATE_COMMAND_PHASE:
+	case FSG_STATE_DATA_PHASE:
+	case FSG_STATE_STATUS_PHASE:
+	case FSG_STATE_IDLE:
+		break;
+	}
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static int fsg_main_thread(void *common_)
+{
+	struct fsg_common	*common = common_;
+
+	/*
+	 * Allow the thread to be killed by a signal, but set the signal mask
+	 * to block everything but INT, TERM, KILL, and USR1.
+	 */
+	allow_signal(SIGINT);
+	allow_signal(SIGTERM);
+	allow_signal(SIGKILL);
+	allow_signal(SIGUSR1);
+
+	/* Allow the thread to be frozen */
+	set_freezable();
+
+	/*
+	 * Arrange for userspace references to be interpreted as kernel
+	 * pointers.  That way we can pass a kernel pointer to a routine
+	 * that expects a __user pointer and it will work okay.
+	 */
+	set_fs(get_ds());
+
+	/* The main loop */
+	while (common->state != FSG_STATE_TERMINATED) {
+		if (exception_in_progress(common) || signal_pending(current)) {
+			handle_exception(common);
+			continue;
+		}
+
+		if (!common->running) {
+			sleep_thread(common);
+			continue;
+		}
+
+		if (get_next_command(common))
+			continue;
+
+		spin_lock_irq(&common->lock);
+		if (!exception_in_progress(common))
+			common->state = FSG_STATE_DATA_PHASE;
+		spin_unlock_irq(&common->lock);
+
+		if (do_scsi_command(common) || finish_reply(common))
+			continue;
+
+		spin_lock_irq(&common->lock);
+		if (!exception_in_progress(common))
+			common->state = FSG_STATE_STATUS_PHASE;
+		spin_unlock_irq(&common->lock);
+
+		if (send_status(common))
+			continue;
+
+		spin_lock_irq(&common->lock);
+		if (!exception_in_progress(common))
+			common->state = FSG_STATE_IDLE;
+		spin_unlock_irq(&common->lock);
+	}
+
+	spin_lock_irq(&common->lock);
+	common->thread_task = NULL;
+	spin_unlock_irq(&common->lock);
+
+	if (!common->ops || !common->ops->thread_exits
+	 || common->ops->thread_exits(common) < 0) {
+		struct fsg_lun *curlun = common->luns;
+		unsigned i = common->nluns;
+
+		down_write(&common->filesem);
+		for (; i--; ++curlun) {
+			if (!fsg_lun_is_open(curlun))
+				continue;
+
+			fsg_lun_close(curlun);
+			curlun->unit_attention_data = SS_MEDIUM_NOT_PRESENT;
+		}
+		up_write(&common->filesem);
+	}
+
+	/* Let fsg_unbind() know the thread has exited */
+	complete_and_exit(&common->thread_notifier, 0);
+}
+
+
+/*************************** DEVICE ATTRIBUTES ***************************/
+
+static DEVICE_ATTR(ro, 0644, fsg_show_ro, fsg_store_ro);
+static DEVICE_ATTR(nofua, 0644, fsg_show_nofua, fsg_store_nofua);
+static DEVICE_ATTR(file, 0644, fsg_show_file, fsg_store_file);
+
+static struct device_attribute dev_attr_ro_cdrom =
+	__ATTR(ro, 0444, fsg_show_ro, NULL);
+static struct device_attribute dev_attr_file_nonremovable =
+	__ATTR(file, 0444, fsg_show_file, NULL);
+
+
+/****************************** FSG COMMON ******************************/
+
+static void fsg_common_release(struct kref *ref);
+
+static void fsg_lun_release(struct device *dev)
+{
+	/* Nothing needs to be done */
+}
+
+static inline void fsg_common_get(struct fsg_common *common)
+{
+	kref_get(&common->ref);
+}
+
+static inline void fsg_common_put(struct fsg_common *common)
+{
+	kref_put(&common->ref, fsg_common_release);
+}
+
+static struct fsg_common *fsg_common_init(struct fsg_common *common,
+					  struct usb_composite_dev *cdev,
+					  struct fsg_config *cfg)
+{
+	struct usb_gadget *gadget = cdev->gadget;
+	struct fsg_buffhd *bh;
+	struct fsg_lun *curlun;
+	struct fsg_lun_config *lcfg;
+	int nluns, i, rc;
+	char *pathbuf;
+
+	rc = fsg_num_buffers_validate();
+	if (rc != 0)
+		return ERR_PTR(rc);
+
+	/* Find out how many LUNs there should be */
+	nluns = cfg->nluns;
+	if (nluns < 1 || nluns > FSG_MAX_LUNS) {
+		dev_err(&gadget->dev, "invalid number of LUNs: %u\n", nluns);
+		return ERR_PTR(-EINVAL);
+	}
+
+	/* Allocate? */
+	if (!common) {
+		common = kzalloc(sizeof *common, GFP_KERNEL);
+		if (!common)
+			return ERR_PTR(-ENOMEM);
+		common->free_storage_on_release = 1;
+	} else {
+		memset(common, 0, sizeof *common);
+		common->free_storage_on_release = 0;
+	}
+
+	common->buffhds = kcalloc(fsg_num_buffers,
+				  sizeof *(common->buffhds), GFP_KERNEL);
+	if (!common->buffhds) {
+		if (common->free_storage_on_release)
+			kfree(common);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	common->ops = cfg->ops;
+	common->private_data = cfg->private_data;
+
+	common->gadget = gadget;
+	common->ep0 = gadget->ep0;
+	common->ep0req = cdev->req;
+	common->cdev = cdev;
+
+	/* Maybe allocate device-global string IDs, and patch descriptors */
+	if (fsg_strings[FSG_STRING_INTERFACE].id == 0) {
+		rc = usb_string_id(cdev);
+		if (unlikely(rc < 0))
+			goto error_release;
+		fsg_strings[FSG_STRING_INTERFACE].id = rc;
+		fsg_intf_desc.iInterface = rc;
+	}
+
+	/*
+	 * Create the LUNs, open their backing files, and register the
+	 * LUN devices in sysfs.
+	 */
+	curlun = kcalloc(nluns, sizeof(*curlun), GFP_KERNEL);
+	if (unlikely(!curlun)) {
+		rc = -ENOMEM;
+		goto error_release;
+	}
+	common->luns = curlun;
+
+	init_rwsem(&common->filesem);
+
+	for (i = 0, lcfg = cfg->luns; i < nluns; ++i, ++curlun, ++lcfg) {
+		curlun->cdrom = !!lcfg->cdrom;
+		curlun->ro = lcfg->cdrom || lcfg->ro;
+		curlun->initially_ro = curlun->ro;
+		curlun->removable = lcfg->removable;
+		curlun->dev.release = fsg_lun_release;
+		curlun->dev.parent = &gadget->dev;
+		/* curlun->dev.driver = &fsg_driver.driver; XXX */
+		dev_set_drvdata(&curlun->dev, &common->filesem);
+		dev_set_name(&curlun->dev, "lun%d", i);
+
+		rc = device_register(&curlun->dev);
+		if (rc) {
+			INFO(common, "failed to register LUN%d: %d\n", i, rc);
+			common->nluns = i;
+			put_device(&curlun->dev);
+			goto error_release;
+		}
+
+		rc = device_create_file(&curlun->dev,
+					curlun->cdrom
+				      ? &dev_attr_ro_cdrom
+				      : &dev_attr_ro);
+		if (rc)
+			goto error_luns;
+		rc = device_create_file(&curlun->dev,
+					curlun->removable
+				      ? &dev_attr_file
+				      : &dev_attr_file_nonremovable);
+		if (rc)
+			goto error_luns;
+		rc = device_create_file(&curlun->dev, &dev_attr_nofua);
+		if (rc)
+			goto error_luns;
+
+		if (lcfg->filename) {
+			rc = fsg_lun_open(curlun, lcfg->filename);
+			if (rc)
+				goto error_luns;
+		} else if (!curlun->removable) {
+			ERROR(common, "no file given for LUN%d\n", i);
+			rc = -EINVAL;
+			goto error_luns;
+		}
+	}
+	common->nluns = nluns;
+
+	/* Data buffers cyclic list */
+	bh = common->buffhds;
+	i = fsg_num_buffers;
+	goto buffhds_first_it;
+	do {
+		bh->next = bh + 1;
+		++bh;
+buffhds_first_it:
+		bh->buf = kmalloc(FSG_BUFLEN, GFP_KERNEL);
+		if (unlikely(!bh->buf)) {
+			rc = -ENOMEM;
+			goto error_release;
+		}
+	} while (--i);
+	bh->next = common->buffhds;
+
+	/* Prepare inquiryString */
+	if (cfg->release != 0xffff) {
+		i = cfg->release;
+	} else {
+		i = usb_gadget_controller_number(gadget);
+		if (i >= 0) {
+			i = 0x0300 + i;
+		} else {
+			WARNING(common, "controller '%s' not recognized\n",
+				gadget->name);
+			i = 0x0399;
+		}
+	}
+	snprintf(common->inquiry_string, sizeof common->inquiry_string,
+		 "%-8s%-16s%04x", cfg->vendor_name ?: "Linux",
+		 /* Assume product name dependent on the first LUN */
+		 cfg->product_name ?: (common->luns->cdrom
+				     ? "File-Stor Gadget"
+				     : "File-CD Gadget"),
+		 i);
+
+	/*
+	 * Some peripheral controllers are known not to be able to
+	 * halt bulk endpoints correctly.  If one of them is present,
+	 * disable stalls.
+	 */
+	common->can_stall = cfg->can_stall &&
+		!(gadget_is_at91(common->gadget));
+
+	spin_lock_init(&common->lock);
+	kref_init(&common->ref);
+
+	/* Tell the thread to start working */
+	common->thread_task =
+		kthread_create(fsg_main_thread, common, "file-storage");
+	if (IS_ERR(common->thread_task)) {
+		rc = PTR_ERR(common->thread_task);
+		goto error_release;
+	}
+	init_completion(&common->thread_notifier);
+	init_waitqueue_head(&common->fsg_wait);
+
+	/* Information */
+	INFO(common, FSG_DRIVER_DESC ", version: " FSG_DRIVER_VERSION "\n");
+	INFO(common, "Number of LUNs=%d\n", common->nluns);
+
+	pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
+	for (i = 0, nluns = common->nluns, curlun = common->luns;
+	     i < nluns;
+	     ++curlun, ++i) {
+		char *p = "(no medium)";
+		if (fsg_lun_is_open(curlun)) {
+			p = "(error)";
+			if (pathbuf) {
+				p = d_path(&curlun->filp->f_path,
+					   pathbuf, PATH_MAX);
+				if (IS_ERR(p))
+					p = "(error)";
+			}
+		}
+		LINFO(curlun, "LUN: %s%s%sfile: %s\n",
+		      curlun->removable ? "removable " : "",
+		      curlun->ro ? "read only " : "",
+		      curlun->cdrom ? "CD-ROM " : "",
+		      p);
+	}
+	kfree(pathbuf);
+
+	DBG(common, "I/O thread pid: %d\n", task_pid_nr(common->thread_task));
+
+	wake_up_process(common->thread_task);
+
+	return common;
+
+error_luns:
+	common->nluns = i + 1;
+error_release:
+	common->state = FSG_STATE_TERMINATED;	/* The thread is dead */
+	/* Call fsg_common_release() directly, ref might be not initialised. */
+	fsg_common_release(&common->ref);
+	return ERR_PTR(rc);
+}
+
+static void fsg_common_release(struct kref *ref)
+{
+	struct fsg_common *common = container_of(ref, struct fsg_common, ref);
+
+	/* If the thread isn't already dead, tell it to exit now */
+	if (common->state != FSG_STATE_TERMINATED) {
+		raise_exception(common, FSG_STATE_EXIT);
+		wait_for_completion(&common->thread_notifier);
+	}
+
+	if (likely(common->luns)) {
+		struct fsg_lun *lun = common->luns;
+		unsigned i = common->nluns;
+
+		/* In error recovery common->nluns may be zero. */
+		for (; i; --i, ++lun) {
+			device_remove_file(&lun->dev, &dev_attr_nofua);
+			device_remove_file(&lun->dev,
+					   lun->cdrom
+					 ? &dev_attr_ro_cdrom
+					 : &dev_attr_ro);
+			device_remove_file(&lun->dev,
+					   lun->removable
+					 ? &dev_attr_file
+					 : &dev_attr_file_nonremovable);
+			fsg_lun_close(lun);
+			device_unregister(&lun->dev);
+		}
+
+		kfree(common->luns);
+	}
+
+	{
+		struct fsg_buffhd *bh = common->buffhds;
+		unsigned i = fsg_num_buffers;
+		do {
+			kfree(bh->buf);
+		} while (++bh, --i);
+	}
+
+	kfree(common->buffhds);
+	if (common->free_storage_on_release)
+		kfree(common);
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+static void fsg_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct fsg_dev		*fsg = fsg_from_func(f);
+	struct fsg_common	*common = fsg->common;
+
+	DBG(fsg, "unbind\n");
+	if (fsg->common->fsg == fsg) {
+		fsg->common->new_fsg = NULL;
+		raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
+		/* FIXME: make interruptible or killable somehow? */
+		wait_event(common->fsg_wait, common->fsg != fsg);
+	}
+
+	fsg_common_put(common);
+	usb_free_descriptors(fsg->function.descriptors);
+	usb_free_descriptors(fsg->function.hs_descriptors);
+	usb_free_descriptors(fsg->function.ss_descriptors);
+	kfree(fsg);
+}
+
+static int fsg_bind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct fsg_dev		*fsg = fsg_from_func(f);
+	struct usb_gadget	*gadget = c->cdev->gadget;
+	int			i;
+	struct usb_ep		*ep;
+
+	fsg->gadget = gadget;
+
+	/* New interface */
+	i = usb_interface_id(c, f);
+	if (i < 0)
+		return i;
+	fsg_intf_desc.bInterfaceNumber = i;
+	fsg->interface_number = i;
+
+	/* Find all the endpoints we will use */
+	ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_in_desc);
+	if (!ep)
+		goto autoconf_fail;
+	ep->driver_data = fsg->common;	/* claim the endpoint */
+	fsg->bulk_in = ep;
+
+	ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_out_desc);
+	if (!ep)
+		goto autoconf_fail;
+	ep->driver_data = fsg->common;	/* claim the endpoint */
+	fsg->bulk_out = ep;
+
+	/* Copy descriptors */
+	f->descriptors = usb_copy_descriptors(fsg_fs_function);
+	if (unlikely(!f->descriptors))
+		return -ENOMEM;
+
+	if (gadget_is_dualspeed(gadget)) {
+		/* Assume endpoint addresses are the same for both speeds */
+		fsg_hs_bulk_in_desc.bEndpointAddress =
+			fsg_fs_bulk_in_desc.bEndpointAddress;
+		fsg_hs_bulk_out_desc.bEndpointAddress =
+			fsg_fs_bulk_out_desc.bEndpointAddress;
+		f->hs_descriptors = usb_copy_descriptors(fsg_hs_function);
+		if (unlikely(!f->hs_descriptors)) {
+			usb_free_descriptors(f->descriptors);
+			return -ENOMEM;
+		}
+	}
+
+	if (gadget_is_superspeed(gadget)) {
+		unsigned	max_burst;
+
+		/* Calculate bMaxBurst, we know packet size is 1024 */
+		max_burst = min_t(unsigned, FSG_BUFLEN / 1024, 15);
+
+		fsg_ss_bulk_in_desc.bEndpointAddress =
+			fsg_fs_bulk_in_desc.bEndpointAddress;
+		fsg_ss_bulk_in_comp_desc.bMaxBurst = max_burst;
+
+		fsg_ss_bulk_out_desc.bEndpointAddress =
+			fsg_fs_bulk_out_desc.bEndpointAddress;
+		fsg_ss_bulk_out_comp_desc.bMaxBurst = max_burst;
+
+		f->ss_descriptors = usb_copy_descriptors(fsg_ss_function);
+		if (unlikely(!f->ss_descriptors)) {
+			usb_free_descriptors(f->hs_descriptors);
+			usb_free_descriptors(f->descriptors);
+			return -ENOMEM;
+		}
+	}
+
+	return 0;
+
+autoconf_fail:
+	ERROR(fsg, "unable to autoconfigure all endpoints\n");
+	return -ENOTSUPP;
+}
+
+
+/****************************** ADD FUNCTION ******************************/
+
+static struct usb_gadget_strings *fsg_strings_array[] = {
+	&fsg_stringtab,
+	NULL,
+};
+
+static int fsg_bind_config(struct usb_composite_dev *cdev,
+			   struct usb_configuration *c,
+			   struct fsg_common *common)
+{
+	struct fsg_dev *fsg;
+	int rc;
+
+	fsg = kzalloc(sizeof *fsg, GFP_KERNEL);
+	if (unlikely(!fsg))
+		return -ENOMEM;
+
+	fsg->function.name        = FSG_DRIVER_DESC;
+	fsg->function.strings     = fsg_strings_array;
+	fsg->function.bind        = fsg_bind;
+	fsg->function.unbind      = fsg_unbind;
+	fsg->function.setup       = fsg_setup;
+	fsg->function.set_alt     = fsg_set_alt;
+	fsg->function.disable     = fsg_disable;
+
+	fsg->common               = common;
+	/*
+	 * Our caller holds a reference to common structure so we
+	 * don't have to be worry about it being freed until we return
+	 * from this function.  So instead of incrementing counter now
+	 * and decrement in error recovery we increment it only when
+	 * call to usb_add_function() was successful.
+	 */
+
+	rc = usb_add_function(c, &fsg->function);
+	if (unlikely(rc))
+		kfree(fsg);
+	else
+		fsg_common_get(fsg->common);
+	return rc;
+}
+
+
+/************************* Module parameters *************************/
+
+struct fsg_module_parameters {
+	char		*file[FSG_MAX_LUNS];
+	bool		ro[FSG_MAX_LUNS];
+	bool		removable[FSG_MAX_LUNS];
+	bool		cdrom[FSG_MAX_LUNS];
+	bool		nofua[FSG_MAX_LUNS];
+
+	unsigned int	file_count, ro_count, removable_count, cdrom_count;
+	unsigned int	nofua_count;
+	unsigned int	luns;	/* nluns */
+	bool		stall;	/* can_stall */
+};
+
+#define _FSG_MODULE_PARAM_ARRAY(prefix, params, name, type, desc)	\
+	module_param_array_named(prefix ## name, params.name, type,	\
+				 &prefix ## params.name ## _count,	\
+				 S_IRUGO);				\
+	MODULE_PARM_DESC(prefix ## name, desc)
+
+#define _FSG_MODULE_PARAM(prefix, params, name, type, desc)		\
+	module_param_named(prefix ## name, params.name, type,		\
+			   S_IRUGO);					\
+	MODULE_PARM_DESC(prefix ## name, desc)
+
+#define FSG_MODULE_PARAMETERS(prefix, params)				\
+	_FSG_MODULE_PARAM_ARRAY(prefix, params, file, charp,		\
+				"names of backing files or devices");	\
+	_FSG_MODULE_PARAM_ARRAY(prefix, params, ro, bool,		\
+				"true to force read-only");		\
+	_FSG_MODULE_PARAM_ARRAY(prefix, params, removable, bool,	\
+				"true to simulate removable media");	\
+	_FSG_MODULE_PARAM_ARRAY(prefix, params, cdrom, bool,		\
+				"true to simulate CD-ROM instead of disk"); \
+	_FSG_MODULE_PARAM_ARRAY(prefix, params, nofua, bool,		\
+				"true to ignore SCSI WRITE(10,12) FUA bit"); \
+	_FSG_MODULE_PARAM(prefix, params, luns, uint,			\
+			  "number of LUNs");				\
+	_FSG_MODULE_PARAM(prefix, params, stall, bool,			\
+			  "false to prevent bulk stalls")
+
+static void
+fsg_config_from_params(struct fsg_config *cfg,
+		       const struct fsg_module_parameters *params)
+{
+	struct fsg_lun_config *lun;
+	unsigned i;
+
+	/* Configure LUNs */
+	cfg->nluns =
+		min(params->luns ?: (params->file_count ?: 1u),
+		    (unsigned)FSG_MAX_LUNS);
+	for (i = 0, lun = cfg->luns; i < cfg->nluns; ++i, ++lun) {
+		lun->ro = !!params->ro[i];
+		lun->cdrom = !!params->cdrom[i];
+		lun->removable = !!params->removable[i];
+		lun->filename =
+			params->file_count > i && params->file[i][0]
+			? params->file[i]
+			: 0;
+	}
+
+	/* Let MSF use defaults */
+	cfg->vendor_name = 0;
+	cfg->product_name = 0;
+	cfg->release = 0xffff;
+
+	cfg->ops = NULL;
+	cfg->private_data = NULL;
+
+	/* Finalise */
+	cfg->can_stall = params->stall;
+}
+
+static inline struct fsg_common *
+fsg_common_from_params(struct fsg_common *common,
+		       struct usb_composite_dev *cdev,
+		       const struct fsg_module_parameters *params)
+	__attribute__((unused));
+static inline struct fsg_common *
+fsg_common_from_params(struct fsg_common *common,
+		       struct usb_composite_dev *cdev,
+		       const struct fsg_module_parameters *params)
+{
+	struct fsg_config cfg;
+	fsg_config_from_params(&cfg, params);
+	return fsg_common_init(common, cdev, &cfg);
+}
diff --git a/drivers/staging/ccg/f_rndis.c b/drivers/staging/ccg/f_rndis.c
new file mode 100644
index 0000000..b1681e4
--- /dev/null
+++ b/drivers/staging/ccg/f_rndis.c
@@ -0,0 +1,918 @@
+/*
+ * f_rndis.c -- RNDIS link function driver
+ *
+ * Copyright (C) 2003-2005,2008 David Brownell
+ * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
+ * Copyright (C) 2008 Nokia Corporation
+ * Copyright (C) 2009 Samsung Electronics
+ *                    Author: Michal Nazarewicz (mina86 at mina86.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+/* #define VERBOSE_DEBUG */
+
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/etherdevice.h>
+
+#include <linux/atomic.h>
+
+#include "u_ether.h"
+#include "rndis.h"
+
+
+/*
+ * This function is an RNDIS Ethernet port -- a Microsoft protocol that's
+ * been promoted instead of the standard CDC Ethernet.  The published RNDIS
+ * spec is ambiguous, incomplete, and needlessly complex.  Variants such as
+ * ActiveSync have even worse status in terms of specification.
+ *
+ * In short:  it's a protocol controlled by (and for) Microsoft, not for an
+ * Open ecosystem or markets.  Linux supports it *only* because Microsoft
+ * doesn't support the CDC Ethernet standard.
+ *
+ * The RNDIS data transfer model is complex, with multiple Ethernet packets
+ * per USB message, and out of band data.  The control model is built around
+ * what's essentially an "RNDIS RPC" protocol.  It's all wrapped in a CDC ACM
+ * (modem, not Ethernet) veneer, with those ACM descriptors being entirely
+ * useless (they're ignored).  RNDIS expects to be the only function in its
+ * configuration, so it's no real help if you need composite devices; and
+ * it expects to be the first configuration too.
+ *
+ * There is a single technical advantage of RNDIS over CDC Ethernet, if you
+ * discount the fluff that its RPC can be made to deliver: it doesn't need
+ * a NOP altsetting for the data interface.  That lets it work on some of the
+ * "so smart it's stupid" hardware which takes over configuration changes
+ * from the software, and adds restrictions like "no altsettings".
+ *
+ * Unfortunately MSFT's RNDIS drivers are buggy.  They hang or oops, and
+ * have all sorts of contrary-to-specification oddities that can prevent
+ * them from working sanely.  Since bugfixes (or accurate specs, letting
+ * Linux work around those bugs) are unlikely to ever come from MSFT, you
+ * may want to avoid using RNDIS on purely operational grounds.
+ *
+ * Omissions from the RNDIS 1.0 specification include:
+ *
+ *   - Power management ... references data that's scattered around lots
+ *     of other documentation, which is incorrect/incomplete there too.
+ *
+ *   - There are various undocumented protocol requirements, like the need
+ *     to send garbage in some control-OUT messages.
+ *
+ *   - MS-Windows drivers sometimes emit undocumented requests.
+ */
+
+struct f_rndis {
+	struct gether			port;
+	u8				ctrl_id, data_id;
+	u8				ethaddr[ETH_ALEN];
+	u32				vendorID;
+	const char			*manufacturer;
+	int				config;
+
+	struct usb_ep			*notify;
+	struct usb_request		*notify_req;
+	atomic_t			notify_count;
+};
+
+static inline struct f_rndis *func_to_rndis(struct usb_function *f)
+{
+	return container_of(f, struct f_rndis, port.func);
+}
+
+/* peak (theoretical) bulk transfer rate in bits-per-second */
+static unsigned int bitrate(struct usb_gadget *g)
+{
+	if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER)
+		return 13 * 1024 * 8 * 1000 * 8;
+	else if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH)
+		return 13 * 512 * 8 * 1000 * 8;
+	else
+		return 19 * 64 * 1 * 1000 * 8;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ */
+
+#define LOG2_STATUS_INTERVAL_MSEC	5	/* 1 << 5 == 32 msec */
+#define STATUS_BYTECOUNT		8	/* 8 bytes data */
+
+
+/* interface descriptor: */
+
+static struct usb_interface_descriptor rndis_control_intf = {
+	.bLength =		sizeof rndis_control_intf,
+	.bDescriptorType =	USB_DT_INTERFACE,
+
+	/* .bInterfaceNumber = DYNAMIC */
+	/* status endpoint is optional; this could be patched later */
+	.bNumEndpoints =	1,
+	.bInterfaceClass =	USB_CLASS_COMM,
+	.bInterfaceSubClass =   USB_CDC_SUBCLASS_ACM,
+	.bInterfaceProtocol =   USB_CDC_ACM_PROTO_VENDOR,
+	/* .iInterface = DYNAMIC */
+};
+
+static struct usb_cdc_header_desc header_desc = {
+	.bLength =		sizeof header_desc,
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_HEADER_TYPE,
+
+	.bcdCDC =		cpu_to_le16(0x0110),
+};
+
+static struct usb_cdc_call_mgmt_descriptor call_mgmt_descriptor = {
+	.bLength =		sizeof call_mgmt_descriptor,
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_CALL_MANAGEMENT_TYPE,
+
+	.bmCapabilities =	0x00,
+	.bDataInterface =	0x01,
+};
+
+static struct usb_cdc_acm_descriptor rndis_acm_descriptor = {
+	.bLength =		sizeof rndis_acm_descriptor,
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_ACM_TYPE,
+
+	.bmCapabilities =	0x00,
+};
+
+static struct usb_cdc_union_desc rndis_union_desc = {
+	.bLength =		sizeof(rndis_union_desc),
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubType =	USB_CDC_UNION_TYPE,
+	/* .bMasterInterface0 =	DYNAMIC */
+	/* .bSlaveInterface0 =	DYNAMIC */
+};
+
+/* the data interface has two bulk endpoints */
+
+static struct usb_interface_descriptor rndis_data_intf = {
+	.bLength =		sizeof rndis_data_intf,
+	.bDescriptorType =	USB_DT_INTERFACE,
+
+	/* .bInterfaceNumber = DYNAMIC */
+	.bNumEndpoints =	2,
+	.bInterfaceClass =	USB_CLASS_CDC_DATA,
+	.bInterfaceSubClass =	0,
+	.bInterfaceProtocol =	0,
+	/* .iInterface = DYNAMIC */
+};
+
+
+static struct usb_interface_assoc_descriptor
+rndis_iad_descriptor = {
+	.bLength =		sizeof rndis_iad_descriptor,
+	.bDescriptorType =	USB_DT_INTERFACE_ASSOCIATION,
+
+	.bFirstInterface =	0, /* XXX, hardcoded */
+	.bInterfaceCount = 	2,	// control + data
+	.bFunctionClass =	USB_CLASS_COMM,
+	.bFunctionSubClass =	USB_CDC_SUBCLASS_ETHERNET,
+	.bFunctionProtocol =	USB_CDC_PROTO_NONE,
+	/* .iFunction = DYNAMIC */
+};
+
+/* full speed support: */
+
+static struct usb_endpoint_descriptor fs_notify_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	cpu_to_le16(STATUS_BYTECOUNT),
+	.bInterval =		1 << LOG2_STATUS_INTERVAL_MSEC,
+};
+
+static struct usb_endpoint_descriptor fs_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor fs_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_descriptor_header *eth_fs_function[] = {
+	(struct usb_descriptor_header *) &rndis_iad_descriptor,
+
+	/* control interface matches ACM, not Ethernet */
+	(struct usb_descriptor_header *) &rndis_control_intf,
+	(struct usb_descriptor_header *) &header_desc,
+	(struct usb_descriptor_header *) &call_mgmt_descriptor,
+	(struct usb_descriptor_header *) &rndis_acm_descriptor,
+	(struct usb_descriptor_header *) &rndis_union_desc,
+	(struct usb_descriptor_header *) &fs_notify_desc,
+
+	/* data interface has no altsetting */
+	(struct usb_descriptor_header *) &rndis_data_intf,
+	(struct usb_descriptor_header *) &fs_in_desc,
+	(struct usb_descriptor_header *) &fs_out_desc,
+	NULL,
+};
+
+/* high speed support: */
+
+static struct usb_endpoint_descriptor hs_notify_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	cpu_to_le16(STATUS_BYTECOUNT),
+	.bInterval =		LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+
+static struct usb_endpoint_descriptor hs_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor hs_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(512),
+};
+
+static struct usb_descriptor_header *eth_hs_function[] = {
+	(struct usb_descriptor_header *) &rndis_iad_descriptor,
+
+	/* control interface matches ACM, not Ethernet */
+	(struct usb_descriptor_header *) &rndis_control_intf,
+	(struct usb_descriptor_header *) &header_desc,
+	(struct usb_descriptor_header *) &call_mgmt_descriptor,
+	(struct usb_descriptor_header *) &rndis_acm_descriptor,
+	(struct usb_descriptor_header *) &rndis_union_desc,
+	(struct usb_descriptor_header *) &hs_notify_desc,
+
+	/* data interface has no altsetting */
+	(struct usb_descriptor_header *) &rndis_data_intf,
+	(struct usb_descriptor_header *) &hs_in_desc,
+	(struct usb_descriptor_header *) &hs_out_desc,
+	NULL,
+};
+
+/* super speed support: */
+
+static struct usb_endpoint_descriptor ss_notify_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	cpu_to_le16(STATUS_BYTECOUNT),
+	.bInterval =		LOG2_STATUS_INTERVAL_MSEC + 4,
+};
+
+static struct usb_ss_ep_comp_descriptor ss_intr_comp_desc = {
+	.bLength =		sizeof ss_intr_comp_desc,
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+
+	/* the following 3 values can be tweaked if necessary */
+	/* .bMaxBurst =		0, */
+	/* .bmAttributes =	0, */
+	.wBytesPerInterval =	cpu_to_le16(STATUS_BYTECOUNT),
+};
+
+static struct usb_endpoint_descriptor ss_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(1024),
+};
+
+static struct usb_endpoint_descriptor ss_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor ss_bulk_comp_desc = {
+	.bLength =		sizeof ss_bulk_comp_desc,
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+
+	/* the following 2 values can be tweaked if necessary */
+	/* .bMaxBurst =		0, */
+	/* .bmAttributes =	0, */
+};
+
+static struct usb_descriptor_header *eth_ss_function[] = {
+	(struct usb_descriptor_header *) &rndis_iad_descriptor,
+
+	/* control interface matches ACM, not Ethernet */
+	(struct usb_descriptor_header *) &rndis_control_intf,
+	(struct usb_descriptor_header *) &header_desc,
+	(struct usb_descriptor_header *) &call_mgmt_descriptor,
+	(struct usb_descriptor_header *) &rndis_acm_descriptor,
+	(struct usb_descriptor_header *) &rndis_union_desc,
+	(struct usb_descriptor_header *) &ss_notify_desc,
+	(struct usb_descriptor_header *) &ss_intr_comp_desc,
+
+	/* data interface has no altsetting */
+	(struct usb_descriptor_header *) &rndis_data_intf,
+	(struct usb_descriptor_header *) &ss_in_desc,
+	(struct usb_descriptor_header *) &ss_bulk_comp_desc,
+	(struct usb_descriptor_header *) &ss_out_desc,
+	(struct usb_descriptor_header *) &ss_bulk_comp_desc,
+	NULL,
+};
+
+/* string descriptors: */
+
+static struct usb_string rndis_string_defs[] = {
+	[0].s = "RNDIS Communications Control",
+	[1].s = "RNDIS Ethernet Data",
+	[2].s = "RNDIS",
+	{  } /* end of list */
+};
+
+static struct usb_gadget_strings rndis_string_table = {
+	.language =		0x0409,	/* en-us */
+	.strings =		rndis_string_defs,
+};
+
+static struct usb_gadget_strings *rndis_strings[] = {
+	&rndis_string_table,
+	NULL,
+};
+
+/*-------------------------------------------------------------------------*/
+
+static struct sk_buff *rndis_add_header(struct gether *port,
+					struct sk_buff *skb)
+{
+	struct sk_buff *skb2;
+
+	skb2 = skb_realloc_headroom(skb, sizeof(struct rndis_packet_msg_type));
+	if (skb2)
+		rndis_add_hdr(skb2);
+
+	dev_kfree_skb_any(skb);
+	return skb2;
+}
+
+static void rndis_response_available(void *_rndis)
+{
+	struct f_rndis			*rndis = _rndis;
+	struct usb_request		*req = rndis->notify_req;
+	struct usb_composite_dev	*cdev = rndis->port.func.config->cdev;
+	__le32				*data = req->buf;
+	int				status;
+
+	if (atomic_inc_return(&rndis->notify_count) != 1)
+		return;
+
+	/* Send RNDIS RESPONSE_AVAILABLE notification; a
+	 * USB_CDC_NOTIFY_RESPONSE_AVAILABLE "should" work too
+	 *
+	 * This is the only notification defined by RNDIS.
+	 */
+	data[0] = cpu_to_le32(1);
+	data[1] = cpu_to_le32(0);
+
+	status = usb_ep_queue(rndis->notify, req, GFP_ATOMIC);
+	if (status) {
+		atomic_dec(&rndis->notify_count);
+		DBG(cdev, "notify/0 --> %d\n", status);
+	}
+}
+
+static void rndis_response_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct f_rndis			*rndis = req->context;
+	struct usb_composite_dev	*cdev = rndis->port.func.config->cdev;
+	int				status = req->status;
+
+	/* after TX:
+	 *  - USB_CDC_GET_ENCAPSULATED_RESPONSE (ep0/control)
+	 *  - RNDIS_RESPONSE_AVAILABLE (status/irq)
+	 */
+	switch (status) {
+	case -ECONNRESET:
+	case -ESHUTDOWN:
+		/* connection gone */
+		atomic_set(&rndis->notify_count, 0);
+		break;
+	default:
+		DBG(cdev, "RNDIS %s response error %d, %d/%d\n",
+			ep->name, status,
+			req->actual, req->length);
+		/* FALLTHROUGH */
+	case 0:
+		if (ep != rndis->notify)
+			break;
+
+		/* handle multiple pending RNDIS_RESPONSE_AVAILABLE
+		 * notifications by resending until we're done
+		 */
+		if (atomic_dec_and_test(&rndis->notify_count))
+			break;
+		status = usb_ep_queue(rndis->notify, req, GFP_ATOMIC);
+		if (status) {
+			atomic_dec(&rndis->notify_count);
+			DBG(cdev, "notify/1 --> %d\n", status);
+		}
+		break;
+	}
+}
+
+static void rndis_command_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct f_rndis			*rndis = req->context;
+	struct usb_composite_dev	*cdev = rndis->port.func.config->cdev;
+	int				status;
+
+	/* received RNDIS command from USB_CDC_SEND_ENCAPSULATED_COMMAND */
+//	spin_lock(&dev->lock);
+	status = rndis_msg_parser(rndis->config, (u8 *) req->buf);
+	if (status < 0)
+		ERROR(cdev, "RNDIS command error %d, %d/%d\n",
+			status, req->actual, req->length);
+//	spin_unlock(&dev->lock);
+}
+
+static int
+rndis_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+	struct f_rndis		*rndis = func_to_rndis(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+	struct usb_request	*req = cdev->req;
+	int			value = -EOPNOTSUPP;
+	u16			w_index = le16_to_cpu(ctrl->wIndex);
+	u16			w_value = le16_to_cpu(ctrl->wValue);
+	u16			w_length = le16_to_cpu(ctrl->wLength);
+
+	/* composite driver infrastructure handles everything except
+	 * CDC class messages; interface activation uses set_alt().
+	 */
+	switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
+
+	/* RNDIS uses the CDC command encapsulation mechanism to implement
+	 * an RPC scheme, with much getting/setting of attributes by OID.
+	 */
+	case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+			| USB_CDC_SEND_ENCAPSULATED_COMMAND:
+		if (w_value || w_index != rndis->ctrl_id)
+			goto invalid;
+		/* read the request; process it later */
+		value = w_length;
+		req->complete = rndis_command_complete;
+		req->context = rndis;
+		/* later, rndis_response_available() sends a notification */
+		break;
+
+	case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8)
+			| USB_CDC_GET_ENCAPSULATED_RESPONSE:
+		if (w_value || w_index != rndis->ctrl_id)
+			goto invalid;
+		else {
+			u8 *buf;
+			u32 n;
+
+			/* return the result */
+			buf = rndis_get_next_response(rndis->config, &n);
+			if (buf) {
+				memcpy(req->buf, buf, n);
+				req->complete = rndis_response_complete;
+				req->context = rndis;
+				rndis_free_response(rndis->config, buf);
+				value = n;
+			}
+			/* else stalls ... spec says to avoid that */
+		}
+		break;
+
+	default:
+invalid:
+		VDBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+	}
+
+	/* respond with data transfer or status phase? */
+	if (value >= 0) {
+		DBG(cdev, "rndis req%02x.%02x v%04x i%04x l%d\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+		req->zero = (value < w_length);
+		req->length = value;
+		value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+		if (value < 0)
+			ERROR(cdev, "rndis response on err %d\n", value);
+	}
+
+	/* device either stalls (value < 0) or reports success */
+	return value;
+}
+
+
+static int rndis_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+	struct f_rndis		*rndis = func_to_rndis(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+
+	/* we know alt == 0 */
+
+	if (intf == rndis->ctrl_id) {
+		if (rndis->notify->driver_data) {
+			VDBG(cdev, "reset rndis control %d\n", intf);
+			usb_ep_disable(rndis->notify);
+		}
+		if (!rndis->notify->desc) {
+			VDBG(cdev, "init rndis ctrl %d\n", intf);
+			if (config_ep_by_speed(cdev->gadget, f, rndis->notify))
+				goto fail;
+		}
+		usb_ep_enable(rndis->notify);
+		rndis->notify->driver_data = rndis;
+
+	} else if (intf == rndis->data_id) {
+		struct net_device	*net;
+
+		if (rndis->port.in_ep->driver_data) {
+			DBG(cdev, "reset rndis\n");
+			gether_disconnect(&rndis->port);
+		}
+
+		if (!rndis->port.in_ep->desc || !rndis->port.out_ep->desc) {
+			DBG(cdev, "init rndis\n");
+			if (config_ep_by_speed(cdev->gadget, f,
+					       rndis->port.in_ep) ||
+			    config_ep_by_speed(cdev->gadget, f,
+					       rndis->port.out_ep)) {
+				rndis->port.in_ep->desc = NULL;
+				rndis->port.out_ep->desc = NULL;
+				goto fail;
+			}
+		}
+
+		/* Avoid ZLPs; they can be troublesome. */
+		rndis->port.is_zlp_ok = false;
+
+		/* RNDIS should be in the "RNDIS uninitialized" state,
+		 * either never activated or after rndis_uninit().
+		 *
+		 * We don't want data to flow here until a nonzero packet
+		 * filter is set, at which point it enters "RNDIS data
+		 * initialized" state ... but we do want the endpoints
+		 * to be activated.  It's a strange little state.
+		 *
+		 * REVISIT the RNDIS gadget code has done this wrong for a
+		 * very long time.  We need another call to the link layer
+		 * code -- gether_updown(...bool) maybe -- to do it right.
+		 */
+		rndis->port.cdc_filter = 0;
+
+		DBG(cdev, "RNDIS RX/TX early activation ... \n");
+		net = gether_connect(&rndis->port);
+		if (IS_ERR(net))
+			return PTR_ERR(net);
+
+		rndis_set_param_dev(rndis->config, net,
+				&rndis->port.cdc_filter);
+	} else
+		goto fail;
+
+	return 0;
+fail:
+	return -EINVAL;
+}
+
+static void rndis_disable(struct usb_function *f)
+{
+	struct f_rndis		*rndis = func_to_rndis(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+
+	if (!rndis->notify->driver_data)
+		return;
+
+	DBG(cdev, "rndis deactivated\n");
+
+	rndis_uninit(rndis->config);
+	gether_disconnect(&rndis->port);
+
+	usb_ep_disable(rndis->notify);
+	rndis->notify->driver_data = NULL;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * This isn't quite the same mechanism as CDC Ethernet, since the
+ * notification scheme passes less data, but the same set of link
+ * states must be tested.  A key difference is that altsettings are
+ * not used to tell whether the link should send packets or not.
+ */
+
+static void rndis_open(struct gether *geth)
+{
+	struct f_rndis		*rndis = func_to_rndis(&geth->func);
+	struct usb_composite_dev *cdev = geth->func.config->cdev;
+
+	DBG(cdev, "%s\n", __func__);
+
+	rndis_set_param_medium(rndis->config, RNDIS_MEDIUM_802_3,
+				bitrate(cdev->gadget) / 100);
+	rndis_signal_connect(rndis->config);
+}
+
+static void rndis_close(struct gether *geth)
+{
+	struct f_rndis		*rndis = func_to_rndis(&geth->func);
+
+	DBG(geth->func.config->cdev, "%s\n", __func__);
+
+	rndis_set_param_medium(rndis->config, RNDIS_MEDIUM_802_3, 0);
+	rndis_signal_disconnect(rndis->config);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* ethernet function driver setup/binding */
+
+static int
+rndis_bind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct usb_composite_dev *cdev = c->cdev;
+	struct f_rndis		*rndis = func_to_rndis(f);
+	int			status;
+	struct usb_ep		*ep;
+
+	/* allocate instance-specific interface IDs */
+	status = usb_interface_id(c, f);
+	if (status < 0)
+		goto fail;
+	rndis->ctrl_id = status;
+	rndis_iad_descriptor.bFirstInterface = status;
+
+	rndis_control_intf.bInterfaceNumber = status;
+	rndis_union_desc.bMasterInterface0 = status;
+
+	status = usb_interface_id(c, f);
+	if (status < 0)
+		goto fail;
+	rndis->data_id = status;
+
+	rndis_data_intf.bInterfaceNumber = status;
+	rndis_union_desc.bSlaveInterface0 = status;
+
+	status = -ENODEV;
+
+	/* allocate instance-specific endpoints */
+	ep = usb_ep_autoconfig(cdev->gadget, &fs_in_desc);
+	if (!ep)
+		goto fail;
+	rndis->port.in_ep = ep;
+	ep->driver_data = cdev;	/* claim */
+
+	ep = usb_ep_autoconfig(cdev->gadget, &fs_out_desc);
+	if (!ep)
+		goto fail;
+	rndis->port.out_ep = ep;
+	ep->driver_data = cdev;	/* claim */
+
+	/* NOTE:  a status/notification endpoint is, strictly speaking,
+	 * optional.  We don't treat it that way though!  It's simpler,
+	 * and some newer profiles don't treat it as optional.
+	 */
+	ep = usb_ep_autoconfig(cdev->gadget, &fs_notify_desc);
+	if (!ep)
+		goto fail;
+	rndis->notify = ep;
+	ep->driver_data = cdev;	/* claim */
+
+	status = -ENOMEM;
+
+	/* allocate notification request and buffer */
+	rndis->notify_req = usb_ep_alloc_request(ep, GFP_KERNEL);
+	if (!rndis->notify_req)
+		goto fail;
+	rndis->notify_req->buf = kmalloc(STATUS_BYTECOUNT, GFP_KERNEL);
+	if (!rndis->notify_req->buf)
+		goto fail;
+	rndis->notify_req->length = STATUS_BYTECOUNT;
+	rndis->notify_req->context = rndis;
+	rndis->notify_req->complete = rndis_response_complete;
+
+	/* copy descriptors, and track endpoint copies */
+	f->descriptors = usb_copy_descriptors(eth_fs_function);
+	if (!f->descriptors)
+		goto fail;
+
+	/* support all relevant hardware speeds... we expect that when
+	 * hardware is dual speed, all bulk-capable endpoints work at
+	 * both speeds
+	 */
+	if (gadget_is_dualspeed(c->cdev->gadget)) {
+		hs_in_desc.bEndpointAddress =
+				fs_in_desc.bEndpointAddress;
+		hs_out_desc.bEndpointAddress =
+				fs_out_desc.bEndpointAddress;
+		hs_notify_desc.bEndpointAddress =
+				fs_notify_desc.bEndpointAddress;
+
+		/* copy descriptors, and track endpoint copies */
+		f->hs_descriptors = usb_copy_descriptors(eth_hs_function);
+		if (!f->hs_descriptors)
+			goto fail;
+	}
+
+	if (gadget_is_superspeed(c->cdev->gadget)) {
+		ss_in_desc.bEndpointAddress =
+				fs_in_desc.bEndpointAddress;
+		ss_out_desc.bEndpointAddress =
+				fs_out_desc.bEndpointAddress;
+		ss_notify_desc.bEndpointAddress =
+				fs_notify_desc.bEndpointAddress;
+
+		/* copy descriptors, and track endpoint copies */
+		f->ss_descriptors = usb_copy_descriptors(eth_ss_function);
+		if (!f->ss_descriptors)
+			goto fail;
+	}
+
+	rndis->port.open = rndis_open;
+	rndis->port.close = rndis_close;
+
+	status = rndis_register(rndis_response_available, rndis);
+	if (status < 0)
+		goto fail;
+	rndis->config = status;
+
+	rndis_set_param_medium(rndis->config, RNDIS_MEDIUM_802_3, 0);
+	rndis_set_host_mac(rndis->config, rndis->ethaddr);
+
+	if (rndis->manufacturer && rndis->vendorID &&
+			rndis_set_param_vendor(rndis->config, rndis->vendorID,
+					       rndis->manufacturer))
+		goto fail;
+
+	/* NOTE:  all that is done without knowing or caring about
+	 * the network link ... which is unavailable to this code
+	 * until we're activated via set_alt().
+	 */
+
+	DBG(cdev, "RNDIS: %s speed IN/%s OUT/%s NOTIFY/%s\n",
+			gadget_is_superspeed(c->cdev->gadget) ? "super" :
+			gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+			rndis->port.in_ep->name, rndis->port.out_ep->name,
+			rndis->notify->name);
+	return 0;
+
+fail:
+	if (gadget_is_superspeed(c->cdev->gadget) && f->ss_descriptors)
+		usb_free_descriptors(f->ss_descriptors);
+	if (gadget_is_dualspeed(c->cdev->gadget) && f->hs_descriptors)
+		usb_free_descriptors(f->hs_descriptors);
+	if (f->descriptors)
+		usb_free_descriptors(f->descriptors);
+
+	if (rndis->notify_req) {
+		kfree(rndis->notify_req->buf);
+		usb_ep_free_request(rndis->notify, rndis->notify_req);
+	}
+
+	/* we might as well release our claims on endpoints */
+	if (rndis->notify)
+		rndis->notify->driver_data = NULL;
+	if (rndis->port.out_ep->desc)
+		rndis->port.out_ep->driver_data = NULL;
+	if (rndis->port.in_ep->desc)
+		rndis->port.in_ep->driver_data = NULL;
+
+	ERROR(cdev, "%s: can't bind, err %d\n", f->name, status);
+
+	return status;
+}
+
+static void
+rndis_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct f_rndis		*rndis = func_to_rndis(f);
+
+	rndis_deregister(rndis->config);
+	rndis_exit();
+	rndis_string_defs[0].id = 0;
+
+	if (gadget_is_superspeed(c->cdev->gadget))
+		usb_free_descriptors(f->ss_descriptors);
+	if (gadget_is_dualspeed(c->cdev->gadget))
+		usb_free_descriptors(f->hs_descriptors);
+	usb_free_descriptors(f->descriptors);
+
+	kfree(rndis->notify_req->buf);
+	usb_ep_free_request(rndis->notify, rndis->notify_req);
+
+	kfree(rndis);
+}
+
+/* Some controllers can't support RNDIS ... */
+static inline bool can_support_rndis(struct usb_configuration *c)
+{
+	/* everything else is *presumably* fine */
+	return true;
+}
+
+int
+rndis_bind_config_vendor(struct usb_configuration *c, u8 ethaddr[ETH_ALEN],
+				u32 vendorID, const char *manufacturer)
+{
+	struct f_rndis	*rndis;
+	int		status;
+
+	if (!can_support_rndis(c) || !ethaddr)
+		return -EINVAL;
+
+	/* maybe allocate device-global string IDs */
+	if (rndis_string_defs[0].id == 0) {
+
+		/* ... and setup RNDIS itself */
+		status = rndis_init();
+		if (status < 0)
+			return status;
+
+		/* control interface label */
+		status = usb_string_id(c->cdev);
+		if (status < 0)
+			return status;
+		rndis_string_defs[0].id = status;
+		rndis_control_intf.iInterface = status;
+
+		/* data interface label */
+		status = usb_string_id(c->cdev);
+		if (status < 0)
+			return status;
+		rndis_string_defs[1].id = status;
+		rndis_data_intf.iInterface = status;
+
+		/* IAD iFunction label */
+		status = usb_string_id(c->cdev);
+		if (status < 0)
+			return status;
+		rndis_string_defs[2].id = status;
+		rndis_iad_descriptor.iFunction = status;
+	}
+
+	/* allocate and initialize one new instance */
+	status = -ENOMEM;
+	rndis = kzalloc(sizeof *rndis, GFP_KERNEL);
+	if (!rndis)
+		goto fail;
+
+	memcpy(rndis->ethaddr, ethaddr, ETH_ALEN);
+	rndis->vendorID = vendorID;
+	rndis->manufacturer = manufacturer;
+
+	/* RNDIS activates when the host changes this filter */
+	rndis->port.cdc_filter = 0;
+
+	/* RNDIS has special (and complex) framing */
+	rndis->port.header_len = sizeof(struct rndis_packet_msg_type);
+	rndis->port.wrap = rndis_add_header;
+	rndis->port.unwrap = rndis_rm_hdr;
+
+	rndis->port.func.name = "rndis";
+	rndis->port.func.strings = rndis_strings;
+	/* descriptors are per-instance copies */
+	rndis->port.func.bind = rndis_bind;
+	rndis->port.func.unbind = rndis_unbind;
+	rndis->port.func.set_alt = rndis_set_alt;
+	rndis->port.func.setup = rndis_setup;
+	rndis->port.func.disable = rndis_disable;
+
+	status = usb_add_function(c, &rndis->port.func);
+	if (status) {
+		kfree(rndis);
+fail:
+		rndis_exit();
+	}
+	return status;
+}
diff --git a/drivers/staging/ccg/gadget_chips.h b/drivers/staging/ccg/gadget_chips.h
new file mode 100644
index 0000000..0ccca58
--- /dev/null
+++ b/drivers/staging/ccg/gadget_chips.h
@@ -0,0 +1,150 @@
+/*
+ * USB device controllers have lots of quirks.  Use these macros in
+ * gadget drivers or other code that needs to deal with them, and which
+ * autoconfigures instead of using early binding to the hardware.
+ *
+ * This SHOULD eventually work like the ARM mach_is_*() stuff, driven by
+ * some config file that gets updated as new hardware is supported.
+ * (And avoiding all runtime comparisons in typical one-choice configs!)
+ *
+ * NOTE:  some of these controller drivers may not be available yet.
+ * Some are available on 2.4 kernels; several are available, but not
+ * yet pushed in the 2.6 mainline tree.
+ */
+
+#ifndef __GADGET_CHIPS_H
+#define __GADGET_CHIPS_H
+
+/*
+ * NOTICE: the entries below are alphabetical and should be kept
+ * that way.
+ *
+ * Always be sure to add new entries to the correct position or
+ * accept the bashing later.
+ *
+ * If you have forgotten the alphabetical order let VIM/EMACS
+ * do that for you.
+ */
+#define gadget_is_amd5536udc(g)		(!strcmp("amd5536udc", (g)->name))
+#define gadget_is_at91(g)		(!strcmp("at91_udc", (g)->name))
+#define gadget_is_atmel_usba(g)		(!strcmp("atmel_usba_udc", (g)->name))
+#define gadget_is_bcm63xx(g)		(!strcmp("bcm63xx_udc", (g)->name))
+#define gadget_is_ci13xxx_msm(g)	(!strcmp("ci13xxx_msm", (g)->name))
+#define gadget_is_ci13xxx_pci(g)	(!strcmp("ci13xxx_pci", (g)->name))
+#define gadget_is_dummy(g)		(!strcmp("dummy_udc", (g)->name))
+#define gadget_is_dwc3(g)		(!strcmp("dwc3-gadget", (g)->name))
+#define gadget_is_fsl_qe(g)		(!strcmp("fsl_qe_udc", (g)->name))
+#define gadget_is_fsl_usb2(g)		(!strcmp("fsl-usb2-udc", (g)->name))
+#define gadget_is_goku(g)		(!strcmp("goku_udc", (g)->name))
+#define gadget_is_imx(g)		(!strcmp("imx_udc", (g)->name))
+#define gadget_is_langwell(g)		(!strcmp("langwell_udc", (g)->name))
+#define gadget_is_lpc32xx(g)		(!strcmp("lpc32xx_udc", (g)->name))
+#define gadget_is_m66592(g)		(!strcmp("m66592_udc", (g)->name))
+#define gadget_is_musbhdrc(g)		(!strcmp("musb-hdrc", (g)->name))
+#define gadget_is_net2272(g)		(!strcmp("net2272", (g)->name))
+#define gadget_is_net2280(g)		(!strcmp("net2280", (g)->name))
+#define gadget_is_omap(g)		(!strcmp("omap_udc", (g)->name))
+#define gadget_is_pch(g)		(!strcmp("pch_udc", (g)->name))
+#define gadget_is_pxa(g)		(!strcmp("pxa25x_udc", (g)->name))
+#define gadget_is_pxa27x(g)		(!strcmp("pxa27x_udc", (g)->name))
+#define gadget_is_r8a66597(g)		(!strcmp("r8a66597_udc", (g)->name))
+#define gadget_is_renesas_usbhs(g)	(!strcmp("renesas_usbhs_udc", (g)->name))
+#define gadget_is_s3c2410(g)		(!strcmp("s3c2410_udc", (g)->name))
+#define gadget_is_s3c_hsotg(g)		(!strcmp("s3c-hsotg", (g)->name))
+#define gadget_is_s3c_hsudc(g)		(!strcmp("s3c-hsudc", (g)->name))
+
+/**
+ * usb_gadget_controller_number - support bcdDevice id convention
+ * @gadget: the controller being driven
+ *
+ * Return a 2-digit BCD value associated with the peripheral controller,
+ * suitable for use as part of a bcdDevice value, or a negative error code.
+ *
+ * NOTE:  this convention is purely optional, and has no meaning in terms of
+ * any USB specification.  If you want to use a different convention in your
+ * gadget driver firmware -- maybe a more formal revision ID -- feel free.
+ *
+ * Hosts see these bcdDevice numbers, and are allowed (but not encouraged!)
+ * to change their behavior accordingly.  For example it might help avoiding
+ * some chip bug.
+ */
+static inline int usb_gadget_controller_number(struct usb_gadget *gadget)
+{
+	if (gadget_is_net2280(gadget))
+		return 0x01;
+	else if (gadget_is_dummy(gadget))
+		return 0x02;
+	else if (gadget_is_pxa(gadget))
+		return 0x03;
+	else if (gadget_is_goku(gadget))
+		return 0x06;
+	else if (gadget_is_omap(gadget))
+		return 0x08;
+	else if (gadget_is_pxa27x(gadget))
+		return 0x11;
+	else if (gadget_is_s3c2410(gadget))
+		return 0x12;
+	else if (gadget_is_at91(gadget))
+		return 0x13;
+	else if (gadget_is_imx(gadget))
+		return 0x14;
+	else if (gadget_is_musbhdrc(gadget))
+		return 0x16;
+	else if (gadget_is_atmel_usba(gadget))
+		return 0x18;
+	else if (gadget_is_fsl_usb2(gadget))
+		return 0x19;
+	else if (gadget_is_amd5536udc(gadget))
+		return 0x20;
+	else if (gadget_is_m66592(gadget))
+		return 0x21;
+	else if (gadget_is_fsl_qe(gadget))
+		return 0x22;
+	else if (gadget_is_ci13xxx_pci(gadget))
+		return 0x23;
+	else if (gadget_is_langwell(gadget))
+		return 0x24;
+	else if (gadget_is_r8a66597(gadget))
+		return 0x25;
+	else if (gadget_is_s3c_hsotg(gadget))
+		return 0x26;
+	else if (gadget_is_pch(gadget))
+		return 0x27;
+	else if (gadget_is_ci13xxx_msm(gadget))
+		return 0x28;
+	else if (gadget_is_renesas_usbhs(gadget))
+		return 0x29;
+	else if (gadget_is_s3c_hsudc(gadget))
+		return 0x30;
+	else if (gadget_is_net2272(gadget))
+		return 0x31;
+	else if (gadget_is_dwc3(gadget))
+		return 0x32;
+	else if (gadget_is_lpc32xx(gadget))
+		return 0x33;
+	else if (gadget_is_bcm63xx(gadget))
+		return 0x34;
+
+	return -ENOENT;
+}
+
+
+/**
+ * gadget_supports_altsettings - return true if altsettings work
+ * @gadget: the gadget in question
+ */
+static inline bool gadget_supports_altsettings(struct usb_gadget *gadget)
+{
+	/* PXA 21x/25x/26x has no altsettings at all */
+	if (gadget_is_pxa(gadget))
+		return false;
+
+	/* PXA 27x and 3xx have *broken* altsetting support */
+	if (gadget_is_pxa27x(gadget))
+		return false;
+
+	/* Everything else is *presumably* fine ... */
+	return true;
+}
+
+#endif /* __GADGET_CHIPS_H */
diff --git a/drivers/staging/ccg/ndis.h b/drivers/staging/ccg/ndis.h
new file mode 100644
index 0000000..a19f72d
--- /dev/null
+++ b/drivers/staging/ccg/ndis.h
@@ -0,0 +1,47 @@
+/*
+ * ndis.h
+ *
+ * ntddndis.h modified by Benedikt Spranger <b.spranger at pengutronix.de>
+ *
+ * Thanks to the cygwin development team,
+ * espacially to Casper S. Hornstrup <chorns at users.sourceforge.net>
+ *
+ * THIS SOFTWARE IS NOT COPYRIGHTED
+ *
+ * This source code is offered for use in the public domain. You may
+ * use, modify or distribute it freely.
+ */
+
+#ifndef _LINUX_NDIS_H
+#define _LINUX_NDIS_H
+
+enum NDIS_DEVICE_POWER_STATE {
+	NdisDeviceStateUnspecified = 0,
+	NdisDeviceStateD0,
+	NdisDeviceStateD1,
+	NdisDeviceStateD2,
+	NdisDeviceStateD3,
+	NdisDeviceStateMaximum
+};
+
+struct NDIS_PM_WAKE_UP_CAPABILITIES {
+	enum NDIS_DEVICE_POWER_STATE  MinMagicPacketWakeUp;
+	enum NDIS_DEVICE_POWER_STATE  MinPatternWakeUp;
+	enum NDIS_DEVICE_POWER_STATE  MinLinkChangeWakeUp;
+};
+
+struct NDIS_PNP_CAPABILITIES {
+	__le32					Flags;
+	struct NDIS_PM_WAKE_UP_CAPABILITIES	WakeUpCapabilities;
+};
+
+struct NDIS_PM_PACKET_PATTERN {
+	__le32	Priority;
+	__le32	Reserved;
+	__le32	MaskSize;
+	__le32	PatternOffset;
+	__le32	PatternSize;
+	__le32	PatternFlags;
+};
+
+#endif /* _LINUX_NDIS_H */
diff --git a/drivers/staging/ccg/rndis.c b/drivers/staging/ccg/rndis.c
new file mode 100644
index 0000000..e4192b8
--- /dev/null
+++ b/drivers/staging/ccg/rndis.c
@@ -0,0 +1,1175 @@
+/*
+ * RNDIS MSG parser
+ *
+ * Authors:	Benedikt Spranger, Pengutronix
+ *		Robert Schwebel, Pengutronix
+ *
+ *              This program is free software; you can redistribute it and/or
+ *              modify it under the terms of the GNU General Public License
+ *              version 2, as published by the Free Software Foundation.
+ *
+ *		This software was originally developed in conformance with
+ *		Microsoft's Remote NDIS Specification License Agreement.
+ *
+ * 03/12/2004 Kai-Uwe Bloem <linux-development at auerswald.de>
+ *		Fixed message length bug in init_response
+ *
+ * 03/25/2004 Kai-Uwe Bloem <linux-development at auerswald.de>
+ *		Fixed rndis_rm_hdr length bug.
+ *
+ * Copyright (C) 2004 by David Brownell
+ *		updates to merge with Linux 2.6, better match RNDIS spec
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/proc_fs.h>
+#include <linux/slab.h>
+#include <linux/seq_file.h>
+#include <linux/netdevice.h>
+
+#include <asm/io.h>
+#include <asm/byteorder.h>
+#include <asm/unaligned.h>
+
+
+#undef	VERBOSE_DEBUG
+
+#include "rndis.h"
+
+
+/* The driver for your USB chip needs to support ep0 OUT to work with
+ * RNDIS, plus all three CDC Ethernet endpoints (interrupt not optional).
+ *
+ * Windows hosts need an INF file like Documentation/usb/linux.inf
+ * and will be happier if you provide the host_addr module parameter.
+ */
+
+#if 0
+static int rndis_debug = 0;
+module_param (rndis_debug, int, 0);
+MODULE_PARM_DESC (rndis_debug, "enable debugging");
+#else
+#define rndis_debug		0
+#endif
+
+#define RNDIS_MAX_CONFIGS	1
+
+
+static rndis_params rndis_per_dev_params[RNDIS_MAX_CONFIGS];
+
+/* Driver Version */
+static const __le32 rndis_driver_version = cpu_to_le32(1);
+
+/* Function Prototypes */
+static rndis_resp_t *rndis_add_response(int configNr, u32 length);
+
+
+/* supported OIDs */
+static const u32 oid_supported_list[] =
+{
+	/* the general stuff */
+	RNDIS_OID_GEN_SUPPORTED_LIST,
+	RNDIS_OID_GEN_HARDWARE_STATUS,
+	RNDIS_OID_GEN_MEDIA_SUPPORTED,
+	RNDIS_OID_GEN_MEDIA_IN_USE,
+	RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE,
+	RNDIS_OID_GEN_LINK_SPEED,
+	RNDIS_OID_GEN_TRANSMIT_BLOCK_SIZE,
+	RNDIS_OID_GEN_RECEIVE_BLOCK_SIZE,
+	RNDIS_OID_GEN_VENDOR_ID,
+	RNDIS_OID_GEN_VENDOR_DESCRIPTION,
+	RNDIS_OID_GEN_VENDOR_DRIVER_VERSION,
+	RNDIS_OID_GEN_CURRENT_PACKET_FILTER,
+	RNDIS_OID_GEN_MAXIMUM_TOTAL_SIZE,
+	RNDIS_OID_GEN_MEDIA_CONNECT_STATUS,
+	RNDIS_OID_GEN_PHYSICAL_MEDIUM,
+
+	/* the statistical stuff */
+	RNDIS_OID_GEN_XMIT_OK,
+	RNDIS_OID_GEN_RCV_OK,
+	RNDIS_OID_GEN_XMIT_ERROR,
+	RNDIS_OID_GEN_RCV_ERROR,
+	RNDIS_OID_GEN_RCV_NO_BUFFER,
+#ifdef	RNDIS_OPTIONAL_STATS
+	RNDIS_OID_GEN_DIRECTED_BYTES_XMIT,
+	RNDIS_OID_GEN_DIRECTED_FRAMES_XMIT,
+	RNDIS_OID_GEN_MULTICAST_BYTES_XMIT,
+	RNDIS_OID_GEN_MULTICAST_FRAMES_XMIT,
+	RNDIS_OID_GEN_BROADCAST_BYTES_XMIT,
+	RNDIS_OID_GEN_BROADCAST_FRAMES_XMIT,
+	RNDIS_OID_GEN_DIRECTED_BYTES_RCV,
+	RNDIS_OID_GEN_DIRECTED_FRAMES_RCV,
+	RNDIS_OID_GEN_MULTICAST_BYTES_RCV,
+	RNDIS_OID_GEN_MULTICAST_FRAMES_RCV,
+	RNDIS_OID_GEN_BROADCAST_BYTES_RCV,
+	RNDIS_OID_GEN_BROADCAST_FRAMES_RCV,
+	RNDIS_OID_GEN_RCV_CRC_ERROR,
+	RNDIS_OID_GEN_TRANSMIT_QUEUE_LENGTH,
+#endif	/* RNDIS_OPTIONAL_STATS */
+
+	/* mandatory 802.3 */
+	/* the general stuff */
+	RNDIS_OID_802_3_PERMANENT_ADDRESS,
+	RNDIS_OID_802_3_CURRENT_ADDRESS,
+	RNDIS_OID_802_3_MULTICAST_LIST,
+	RNDIS_OID_802_3_MAC_OPTIONS,
+	RNDIS_OID_802_3_MAXIMUM_LIST_SIZE,
+
+	/* the statistical stuff */
+	RNDIS_OID_802_3_RCV_ERROR_ALIGNMENT,
+	RNDIS_OID_802_3_XMIT_ONE_COLLISION,
+	RNDIS_OID_802_3_XMIT_MORE_COLLISIONS,
+#ifdef	RNDIS_OPTIONAL_STATS
+	RNDIS_OID_802_3_XMIT_DEFERRED,
+	RNDIS_OID_802_3_XMIT_MAX_COLLISIONS,
+	RNDIS_OID_802_3_RCV_OVERRUN,
+	RNDIS_OID_802_3_XMIT_UNDERRUN,
+	RNDIS_OID_802_3_XMIT_HEARTBEAT_FAILURE,
+	RNDIS_OID_802_3_XMIT_TIMES_CRS_LOST,
+	RNDIS_OID_802_3_XMIT_LATE_COLLISIONS,
+#endif	/* RNDIS_OPTIONAL_STATS */
+
+#ifdef	RNDIS_PM
+	/* PM and wakeup are "mandatory" for USB, but the RNDIS specs
+	 * don't say what they mean ... and the NDIS specs are often
+	 * confusing and/or ambiguous in this context.  (That is, more
+	 * so than their specs for the other OIDs.)
+	 *
+	 * FIXME someone who knows what these should do, please
+	 * implement them!
+	 */
+
+	/* power management */
+	OID_PNP_CAPABILITIES,
+	OID_PNP_QUERY_POWER,
+	OID_PNP_SET_POWER,
+
+#ifdef	RNDIS_WAKEUP
+	/* wake up host */
+	OID_PNP_ENABLE_WAKE_UP,
+	OID_PNP_ADD_WAKE_UP_PATTERN,
+	OID_PNP_REMOVE_WAKE_UP_PATTERN,
+#endif	/* RNDIS_WAKEUP */
+#endif	/* RNDIS_PM */
+};
+
+
+/* NDIS Functions */
+static int gen_ndis_query_resp(int configNr, u32 OID, u8 *buf,
+			       unsigned buf_len, rndis_resp_t *r)
+{
+	int retval = -ENOTSUPP;
+	u32 length = 4;	/* usually */
+	__le32 *outbuf;
+	int i, count;
+	rndis_query_cmplt_type *resp;
+	struct net_device *net;
+	struct rtnl_link_stats64 temp;
+	const struct rtnl_link_stats64 *stats;
+
+	if (!r) return -ENOMEM;
+	resp = (rndis_query_cmplt_type *)r->buf;
+
+	if (!resp) return -ENOMEM;
+
+	if (buf_len && rndis_debug > 1) {
+		pr_debug("query OID %08x value, len %d:\n", OID, buf_len);
+		for (i = 0; i < buf_len; i += 16) {
+			pr_debug("%03d: %08x %08x %08x %08x\n", i,
+				get_unaligned_le32(&buf[i]),
+				get_unaligned_le32(&buf[i + 4]),
+				get_unaligned_le32(&buf[i + 8]),
+				get_unaligned_le32(&buf[i + 12]));
+		}
+	}
+
+	/* response goes here, right after the header */
+	outbuf = (__le32 *)&resp[1];
+	resp->InformationBufferOffset = cpu_to_le32(16);
+
+	net = rndis_per_dev_params[configNr].dev;
+	stats = dev_get_stats(net, &temp);
+
+	switch (OID) {
+
+	/* general oids (table 4-1) */
+
+	/* mandatory */
+	case RNDIS_OID_GEN_SUPPORTED_LIST:
+		pr_debug("%s: RNDIS_OID_GEN_SUPPORTED_LIST\n", __func__);
+		length = sizeof(oid_supported_list);
+		count  = length / sizeof(u32);
+		for (i = 0; i < count; i++)
+			outbuf[i] = cpu_to_le32(oid_supported_list[i]);
+		retval = 0;
+		break;
+
+	/* mandatory */
+	case RNDIS_OID_GEN_HARDWARE_STATUS:
+		pr_debug("%s: RNDIS_OID_GEN_HARDWARE_STATUS\n", __func__);
+		/* Bogus question!
+		 * Hardware must be ready to receive high level protocols.
+		 * BTW:
+		 * reddite ergo quae sunt Caesaris Caesari
+		 * et quae sunt Dei Deo!
+		 */
+		*outbuf = cpu_to_le32(0);
+		retval = 0;
+		break;
+
+	/* mandatory */
+	case RNDIS_OID_GEN_MEDIA_SUPPORTED:
+		pr_debug("%s: RNDIS_OID_GEN_MEDIA_SUPPORTED\n", __func__);
+		*outbuf = cpu_to_le32(rndis_per_dev_params[configNr].medium);
+		retval = 0;
+		break;
+
+	/* mandatory */
+	case RNDIS_OID_GEN_MEDIA_IN_USE:
+		pr_debug("%s: RNDIS_OID_GEN_MEDIA_IN_USE\n", __func__);
+		/* one medium, one transport... (maybe you do it better) */
+		*outbuf = cpu_to_le32(rndis_per_dev_params[configNr].medium);
+		retval = 0;
+		break;
+
+	/* mandatory */
+	case RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE:
+		pr_debug("%s: RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE\n", __func__);
+		if (rndis_per_dev_params[configNr].dev) {
+			*outbuf = cpu_to_le32(
+				rndis_per_dev_params[configNr].dev->mtu);
+			retval = 0;
+		}
+		break;
+
+	/* mandatory */
+	case RNDIS_OID_GEN_LINK_SPEED:
+		if (rndis_debug > 1)
+			pr_debug("%s: RNDIS_OID_GEN_LINK_SPEED\n", __func__);
+		if (rndis_per_dev_params[configNr].media_state
+				== RNDIS_MEDIA_STATE_DISCONNECTED)
+			*outbuf = cpu_to_le32(0);
+		else
+			*outbuf = cpu_to_le32(
+				rndis_per_dev_params[configNr].speed);
+		retval = 0;
+		break;
+
+	/* mandatory */
+	case RNDIS_OID_GEN_TRANSMIT_BLOCK_SIZE:
+		pr_debug("%s: RNDIS_OID_GEN_TRANSMIT_BLOCK_SIZE\n", __func__);
+		if (rndis_per_dev_params[configNr].dev) {
+			*outbuf = cpu_to_le32(
+				rndis_per_dev_params[configNr].dev->mtu);
+			retval = 0;
+		}
+		break;
+
+	/* mandatory */
+	case RNDIS_OID_GEN_RECEIVE_BLOCK_SIZE:
+		pr_debug("%s: RNDIS_OID_GEN_RECEIVE_BLOCK_SIZE\n", __func__);
+		if (rndis_per_dev_params[configNr].dev) {
+			*outbuf = cpu_to_le32(
+				rndis_per_dev_params[configNr].dev->mtu);
+			retval = 0;
+		}
+		break;
+
+	/* mandatory */
+	case RNDIS_OID_GEN_VENDOR_ID:
+		pr_debug("%s: RNDIS_OID_GEN_VENDOR_ID\n", __func__);
+		*outbuf = cpu_to_le32(
+			rndis_per_dev_params[configNr].vendorID);
+		retval = 0;
+		break;
+
+	/* mandatory */
+	case RNDIS_OID_GEN_VENDOR_DESCRIPTION:
+		pr_debug("%s: RNDIS_OID_GEN_VENDOR_DESCRIPTION\n", __func__);
+		if (rndis_per_dev_params[configNr].vendorDescr) {
+			length = strlen(rndis_per_dev_params[configNr].
+					vendorDescr);
+			memcpy(outbuf,
+				rndis_per_dev_params[configNr].vendorDescr,
+				length);
+		} else {
+			outbuf[0] = 0;
+		}
+		retval = 0;
+		break;
+
+	case RNDIS_OID_GEN_VENDOR_DRIVER_VERSION:
+		pr_debug("%s: RNDIS_OID_GEN_VENDOR_DRIVER_VERSION\n", __func__);
+		/* Created as LE */
+		*outbuf = rndis_driver_version;
+		retval = 0;
+		break;
+
+	/* mandatory */
+	case RNDIS_OID_GEN_CURRENT_PACKET_FILTER:
+		pr_debug("%s: RNDIS_OID_GEN_CURRENT_PACKET_FILTER\n", __func__);
+		*outbuf = cpu_to_le32(*rndis_per_dev_params[configNr].filter);
+		retval = 0;
+		break;
+
+	/* mandatory */
+	case RNDIS_OID_GEN_MAXIMUM_TOTAL_SIZE:
+		pr_debug("%s: RNDIS_OID_GEN_MAXIMUM_TOTAL_SIZE\n", __func__);
+		*outbuf = cpu_to_le32(RNDIS_MAX_TOTAL_SIZE);
+		retval = 0;
+		break;
+
+	/* mandatory */
+	case RNDIS_OID_GEN_MEDIA_CONNECT_STATUS:
+		if (rndis_debug > 1)
+			pr_debug("%s: RNDIS_OID_GEN_MEDIA_CONNECT_STATUS\n", __func__);
+		*outbuf = cpu_to_le32(rndis_per_dev_params[configNr]
+						.media_state);
+		retval = 0;
+		break;
+
+	case RNDIS_OID_GEN_PHYSICAL_MEDIUM:
+		pr_debug("%s: RNDIS_OID_GEN_PHYSICAL_MEDIUM\n", __func__);
+		*outbuf = cpu_to_le32(0);
+		retval = 0;
+		break;
+
+	/* The RNDIS specification is incomplete/wrong.   Some versions
+	 * of MS-Windows expect OIDs that aren't specified there.  Other
+	 * versions emit undefined RNDIS messages. DOCUMENT ALL THESE!
+	 */
+	case RNDIS_OID_GEN_MAC_OPTIONS:		/* from WinME */
+		pr_debug("%s: RNDIS_OID_GEN_MAC_OPTIONS\n", __func__);
+		*outbuf = cpu_to_le32(
+			  RNDIS_MAC_OPTION_RECEIVE_SERIALIZED
+			| RNDIS_MAC_OPTION_FULL_DUPLEX);
+		retval = 0;
+		break;
+
+	/* statistics OIDs (table 4-2) */
+
+	/* mandatory */
+	case RNDIS_OID_GEN_XMIT_OK:
+		if (rndis_debug > 1)
+			pr_debug("%s: RNDIS_OID_GEN_XMIT_OK\n", __func__);
+		if (stats) {
+			*outbuf = cpu_to_le32(stats->tx_packets
+				- stats->tx_errors - stats->tx_dropped);
+			retval = 0;
+		}
+		break;
+
+	/* mandatory */
+	case RNDIS_OID_GEN_RCV_OK:
+		if (rndis_debug > 1)
+			pr_debug("%s: RNDIS_OID_GEN_RCV_OK\n", __func__);
+		if (stats) {
+			*outbuf = cpu_to_le32(stats->rx_packets
+				- stats->rx_errors - stats->rx_dropped);
+			retval = 0;
+		}
+		break;
+
+	/* mandatory */
+	case RNDIS_OID_GEN_XMIT_ERROR:
+		if (rndis_debug > 1)
+			pr_debug("%s: RNDIS_OID_GEN_XMIT_ERROR\n", __func__);
+		if (stats) {
+			*outbuf = cpu_to_le32(stats->tx_errors);
+			retval = 0;
+		}
+		break;
+
+	/* mandatory */
+	case RNDIS_OID_GEN_RCV_ERROR:
+		if (rndis_debug > 1)
+			pr_debug("%s: RNDIS_OID_GEN_RCV_ERROR\n", __func__);
+		if (stats) {
+			*outbuf = cpu_to_le32(stats->rx_errors);
+			retval = 0;
+		}
+		break;
+
+	/* mandatory */
+	case RNDIS_OID_GEN_RCV_NO_BUFFER:
+		pr_debug("%s: RNDIS_OID_GEN_RCV_NO_BUFFER\n", __func__);
+		if (stats) {
+			*outbuf = cpu_to_le32(stats->rx_dropped);
+			retval = 0;
+		}
+		break;
+
+	/* ieee802.3 OIDs (table 4-3) */
+
+	/* mandatory */
+	case RNDIS_OID_802_3_PERMANENT_ADDRESS:
+		pr_debug("%s: RNDIS_OID_802_3_PERMANENT_ADDRESS\n", __func__);
+		if (rndis_per_dev_params[configNr].dev) {
+			length = ETH_ALEN;
+			memcpy(outbuf,
+				rndis_per_dev_params[configNr].host_mac,
+				length);
+			retval = 0;
+		}
+		break;
+
+	/* mandatory */
+	case RNDIS_OID_802_3_CURRENT_ADDRESS:
+		pr_debug("%s: RNDIS_OID_802_3_CURRENT_ADDRESS\n", __func__);
+		if (rndis_per_dev_params[configNr].dev) {
+			length = ETH_ALEN;
+			memcpy(outbuf,
+				rndis_per_dev_params [configNr].host_mac,
+				length);
+			retval = 0;
+		}
+		break;
+
+	/* mandatory */
+	case RNDIS_OID_802_3_MULTICAST_LIST:
+		pr_debug("%s: RNDIS_OID_802_3_MULTICAST_LIST\n", __func__);
+		/* Multicast base address only */
+		*outbuf = cpu_to_le32(0xE0000000);
+		retval = 0;
+		break;
+
+	/* mandatory */
+	case RNDIS_OID_802_3_MAXIMUM_LIST_SIZE:
+		pr_debug("%s: RNDIS_OID_802_3_MAXIMUM_LIST_SIZE\n", __func__);
+		/* Multicast base address only */
+		*outbuf = cpu_to_le32(1);
+		retval = 0;
+		break;
+
+	case RNDIS_OID_802_3_MAC_OPTIONS:
+		pr_debug("%s: RNDIS_OID_802_3_MAC_OPTIONS\n", __func__);
+		*outbuf = cpu_to_le32(0);
+		retval = 0;
+		break;
+
+	/* ieee802.3 statistics OIDs (table 4-4) */
+
+	/* mandatory */
+	case RNDIS_OID_802_3_RCV_ERROR_ALIGNMENT:
+		pr_debug("%s: RNDIS_OID_802_3_RCV_ERROR_ALIGNMENT\n", __func__);
+		if (stats) {
+			*outbuf = cpu_to_le32(stats->rx_frame_errors);
+			retval = 0;
+		}
+		break;
+
+	/* mandatory */
+	case RNDIS_OID_802_3_XMIT_ONE_COLLISION:
+		pr_debug("%s: RNDIS_OID_802_3_XMIT_ONE_COLLISION\n", __func__);
+		*outbuf = cpu_to_le32(0);
+		retval = 0;
+		break;
+
+	/* mandatory */
+	case RNDIS_OID_802_3_XMIT_MORE_COLLISIONS:
+		pr_debug("%s: RNDIS_OID_802_3_XMIT_MORE_COLLISIONS\n", __func__);
+		*outbuf = cpu_to_le32(0);
+		retval = 0;
+		break;
+
+	default:
+		pr_warning("%s: query unknown OID 0x%08X\n",
+			 __func__, OID);
+	}
+	if (retval < 0)
+		length = 0;
+
+	resp->InformationBufferLength = cpu_to_le32(length);
+	r->length = length + sizeof(*resp);
+	resp->MessageLength = cpu_to_le32(r->length);
+	return retval;
+}
+
+static int gen_ndis_set_resp(u8 configNr, u32 OID, u8 *buf, u32 buf_len,
+			     rndis_resp_t *r)
+{
+	rndis_set_cmplt_type *resp;
+	int i, retval = -ENOTSUPP;
+	struct rndis_params *params;
+
+	if (!r)
+		return -ENOMEM;
+	resp = (rndis_set_cmplt_type *)r->buf;
+	if (!resp)
+		return -ENOMEM;
+
+	if (buf_len && rndis_debug > 1) {
+		pr_debug("set OID %08x value, len %d:\n", OID, buf_len);
+		for (i = 0; i < buf_len; i += 16) {
+			pr_debug("%03d: %08x %08x %08x %08x\n", i,
+				get_unaligned_le32(&buf[i]),
+				get_unaligned_le32(&buf[i + 4]),
+				get_unaligned_le32(&buf[i + 8]),
+				get_unaligned_le32(&buf[i + 12]));
+		}
+	}
+
+	params = &rndis_per_dev_params[configNr];
+	switch (OID) {
+	case RNDIS_OID_GEN_CURRENT_PACKET_FILTER:
+
+		/* these NDIS_PACKET_TYPE_* bitflags are shared with
+		 * cdc_filter; it's not RNDIS-specific
+		 * NDIS_PACKET_TYPE_x == USB_CDC_PACKET_TYPE_x for x in:
+		 *	PROMISCUOUS, DIRECTED,
+		 *	MULTICAST, ALL_MULTICAST, BROADCAST
+		 */
+		*params->filter = (u16)get_unaligned_le32(buf);
+		pr_debug("%s: RNDIS_OID_GEN_CURRENT_PACKET_FILTER %08x\n",
+			__func__, *params->filter);
+
+		/* this call has a significant side effect:  it's
+		 * what makes the packet flow start and stop, like
+		 * activating the CDC Ethernet altsetting.
+		 */
+		retval = 0;
+		if (*params->filter) {
+			params->state = RNDIS_DATA_INITIALIZED;
+			netif_carrier_on(params->dev);
+			if (netif_running(params->dev))
+				netif_wake_queue(params->dev);
+		} else {
+			params->state = RNDIS_INITIALIZED;
+			netif_carrier_off(params->dev);
+			netif_stop_queue(params->dev);
+		}
+		break;
+
+	case RNDIS_OID_802_3_MULTICAST_LIST:
+		/* I think we can ignore this */
+		pr_debug("%s: RNDIS_OID_802_3_MULTICAST_LIST\n", __func__);
+		retval = 0;
+		break;
+
+	default:
+		pr_warning("%s: set unknown OID 0x%08X, size %d\n",
+			 __func__, OID, buf_len);
+	}
+
+	return retval;
+}
+
+/*
+ * Response Functions
+ */
+
+static int rndis_init_response(int configNr, rndis_init_msg_type *buf)
+{
+	rndis_init_cmplt_type *resp;
+	rndis_resp_t *r;
+	struct rndis_params *params = rndis_per_dev_params + configNr;
+
+	if (!params->dev)
+		return -ENOTSUPP;
+
+	r = rndis_add_response(configNr, sizeof(rndis_init_cmplt_type));
+	if (!r)
+		return -ENOMEM;
+	resp = (rndis_init_cmplt_type *)r->buf;
+
+	resp->MessageType = cpu_to_le32(RNDIS_MSG_INIT_C);
+	resp->MessageLength = cpu_to_le32(52);
+	resp->RequestID = buf->RequestID; /* Still LE in msg buffer */
+	resp->Status = cpu_to_le32(RNDIS_STATUS_SUCCESS);
+	resp->MajorVersion = cpu_to_le32(RNDIS_MAJOR_VERSION);
+	resp->MinorVersion = cpu_to_le32(RNDIS_MINOR_VERSION);
+	resp->DeviceFlags = cpu_to_le32(RNDIS_DF_CONNECTIONLESS);
+	resp->Medium = cpu_to_le32(RNDIS_MEDIUM_802_3);
+	resp->MaxPacketsPerTransfer = cpu_to_le32(1);
+	resp->MaxTransferSize = cpu_to_le32(
+		  params->dev->mtu
+		+ sizeof(struct ethhdr)
+		+ sizeof(struct rndis_packet_msg_type)
+		+ 22);
+	resp->PacketAlignmentFactor = cpu_to_le32(0);
+	resp->AFListOffset = cpu_to_le32(0);
+	resp->AFListSize = cpu_to_le32(0);
+
+	params->resp_avail(params->v);
+	return 0;
+}
+
+static int rndis_query_response(int configNr, rndis_query_msg_type *buf)
+{
+	rndis_query_cmplt_type *resp;
+	rndis_resp_t *r;
+	struct rndis_params *params = rndis_per_dev_params + configNr;
+
+	/* pr_debug("%s: OID = %08X\n", __func__, cpu_to_le32(buf->OID)); */
+	if (!params->dev)
+		return -ENOTSUPP;
+
+	/*
+	 * we need more memory:
+	 * gen_ndis_query_resp expects enough space for
+	 * rndis_query_cmplt_type followed by data.
+	 * oid_supported_list is the largest data reply
+	 */
+	r = rndis_add_response(configNr,
+		sizeof(oid_supported_list) + sizeof(rndis_query_cmplt_type));
+	if (!r)
+		return -ENOMEM;
+	resp = (rndis_query_cmplt_type *)r->buf;
+
+	resp->MessageType = cpu_to_le32(RNDIS_MSG_QUERY_C);
+	resp->RequestID = buf->RequestID; /* Still LE in msg buffer */
+
+	if (gen_ndis_query_resp(configNr, le32_to_cpu(buf->OID),
+			le32_to_cpu(buf->InformationBufferOffset)
+					+ 8 + (u8 *)buf,
+			le32_to_cpu(buf->InformationBufferLength),
+			r)) {
+		/* OID not supported */
+		resp->Status = cpu_to_le32(RNDIS_STATUS_NOT_SUPPORTED);
+		resp->MessageLength = cpu_to_le32(sizeof *resp);
+		resp->InformationBufferLength = cpu_to_le32(0);
+		resp->InformationBufferOffset = cpu_to_le32(0);
+	} else
+		resp->Status = cpu_to_le32(RNDIS_STATUS_SUCCESS);
+
+	params->resp_avail(params->v);
+	return 0;
+}
+
+static int rndis_set_response(int configNr, rndis_set_msg_type *buf)
+{
+	u32 BufLength, BufOffset;
+	rndis_set_cmplt_type *resp;
+	rndis_resp_t *r;
+	struct rndis_params *params = rndis_per_dev_params + configNr;
+
+	r = rndis_add_response(configNr, sizeof(rndis_set_cmplt_type));
+	if (!r)
+		return -ENOMEM;
+	resp = (rndis_set_cmplt_type *)r->buf;
+
+	BufLength = le32_to_cpu(buf->InformationBufferLength);
+	BufOffset = le32_to_cpu(buf->InformationBufferOffset);
+
+#ifdef	VERBOSE_DEBUG
+	pr_debug("%s: Length: %d\n", __func__, BufLength);
+	pr_debug("%s: Offset: %d\n", __func__, BufOffset);
+	pr_debug("%s: InfoBuffer: ", __func__);
+
+	for (i = 0; i < BufLength; i++) {
+		pr_debug("%02x ", *(((u8 *) buf) + i + 8 + BufOffset));
+	}
+
+	pr_debug("\n");
+#endif
+
+	resp->MessageType = cpu_to_le32(RNDIS_MSG_SET_C);
+	resp->MessageLength = cpu_to_le32(16);
+	resp->RequestID = buf->RequestID; /* Still LE in msg buffer */
+	if (gen_ndis_set_resp(configNr, le32_to_cpu(buf->OID),
+			((u8 *)buf) + 8 + BufOffset, BufLength, r))
+		resp->Status = cpu_to_le32(RNDIS_STATUS_NOT_SUPPORTED);
+	else
+		resp->Status = cpu_to_le32(RNDIS_STATUS_SUCCESS);
+
+	params->resp_avail(params->v);
+	return 0;
+}
+
+static int rndis_reset_response(int configNr, rndis_reset_msg_type *buf)
+{
+	rndis_reset_cmplt_type *resp;
+	rndis_resp_t *r;
+	struct rndis_params *params = rndis_per_dev_params + configNr;
+
+	r = rndis_add_response(configNr, sizeof(rndis_reset_cmplt_type));
+	if (!r)
+		return -ENOMEM;
+	resp = (rndis_reset_cmplt_type *)r->buf;
+
+	resp->MessageType = cpu_to_le32(RNDIS_MSG_RESET_C);
+	resp->MessageLength = cpu_to_le32(16);
+	resp->Status = cpu_to_le32(RNDIS_STATUS_SUCCESS);
+	/* resent information */
+	resp->AddressingReset = cpu_to_le32(1);
+
+	params->resp_avail(params->v);
+	return 0;
+}
+
+static int rndis_keepalive_response(int configNr,
+				    rndis_keepalive_msg_type *buf)
+{
+	rndis_keepalive_cmplt_type *resp;
+	rndis_resp_t *r;
+	struct rndis_params *params = rndis_per_dev_params + configNr;
+
+	/* host "should" check only in RNDIS_DATA_INITIALIZED state */
+
+	r = rndis_add_response(configNr, sizeof(rndis_keepalive_cmplt_type));
+	if (!r)
+		return -ENOMEM;
+	resp = (rndis_keepalive_cmplt_type *)r->buf;
+
+	resp->MessageType = cpu_to_le32(RNDIS_MSG_KEEPALIVE_C);
+	resp->MessageLength = cpu_to_le32(16);
+	resp->RequestID = buf->RequestID; /* Still LE in msg buffer */
+	resp->Status = cpu_to_le32(RNDIS_STATUS_SUCCESS);
+
+	params->resp_avail(params->v);
+	return 0;
+}
+
+
+/*
+ * Device to Host Comunication
+ */
+static int rndis_indicate_status_msg(int configNr, u32 status)
+{
+	rndis_indicate_status_msg_type *resp;
+	rndis_resp_t *r;
+	struct rndis_params *params = rndis_per_dev_params + configNr;
+
+	if (params->state == RNDIS_UNINITIALIZED)
+		return -ENOTSUPP;
+
+	r = rndis_add_response(configNr,
+				sizeof(rndis_indicate_status_msg_type));
+	if (!r)
+		return -ENOMEM;
+	resp = (rndis_indicate_status_msg_type *)r->buf;
+
+	resp->MessageType = cpu_to_le32(RNDIS_MSG_INDICATE);
+	resp->MessageLength = cpu_to_le32(20);
+	resp->Status = cpu_to_le32(status);
+	resp->StatusBufferLength = cpu_to_le32(0);
+	resp->StatusBufferOffset = cpu_to_le32(0);
+
+	params->resp_avail(params->v);
+	return 0;
+}
+
+int rndis_signal_connect(int configNr)
+{
+	rndis_per_dev_params[configNr].media_state
+			= RNDIS_MEDIA_STATE_CONNECTED;
+	return rndis_indicate_status_msg(configNr,
+					  RNDIS_STATUS_MEDIA_CONNECT);
+}
+
+int rndis_signal_disconnect(int configNr)
+{
+	rndis_per_dev_params[configNr].media_state
+			= RNDIS_MEDIA_STATE_DISCONNECTED;
+	return rndis_indicate_status_msg(configNr,
+					  RNDIS_STATUS_MEDIA_DISCONNECT);
+}
+
+void rndis_uninit(int configNr)
+{
+	u8 *buf;
+	u32 length;
+
+	if (configNr >= RNDIS_MAX_CONFIGS)
+		return;
+	rndis_per_dev_params[configNr].state = RNDIS_UNINITIALIZED;
+
+	/* drain the response queue */
+	while ((buf = rndis_get_next_response(configNr, &length)))
+		rndis_free_response(configNr, buf);
+}
+
+void rndis_set_host_mac(int configNr, const u8 *addr)
+{
+	rndis_per_dev_params[configNr].host_mac = addr;
+}
+
+/*
+ * Message Parser
+ */
+int rndis_msg_parser(u8 configNr, u8 *buf)
+{
+	u32 MsgType, MsgLength;
+	__le32 *tmp;
+	struct rndis_params *params;
+
+	if (!buf)
+		return -ENOMEM;
+
+	tmp = (__le32 *)buf;
+	MsgType   = get_unaligned_le32(tmp++);
+	MsgLength = get_unaligned_le32(tmp++);
+
+	if (configNr >= RNDIS_MAX_CONFIGS)
+		return -ENOTSUPP;
+	params = &rndis_per_dev_params[configNr];
+
+	/* NOTE: RNDIS is *EXTREMELY* chatty ... Windows constantly polls for
+	 * rx/tx statistics and link status, in addition to KEEPALIVE traffic
+	 * and normal HC level polling to see if there's any IN traffic.
+	 */
+
+	/* For USB: responses may take up to 10 seconds */
+	switch (MsgType) {
+	case RNDIS_MSG_INIT:
+		pr_debug("%s: RNDIS_MSG_INIT\n",
+			__func__);
+		params->state = RNDIS_INITIALIZED;
+		return rndis_init_response(configNr,
+					(rndis_init_msg_type *)buf);
+
+	case RNDIS_MSG_HALT:
+		pr_debug("%s: RNDIS_MSG_HALT\n",
+			__func__);
+		params->state = RNDIS_UNINITIALIZED;
+		if (params->dev) {
+			netif_carrier_off(params->dev);
+			netif_stop_queue(params->dev);
+		}
+		return 0;
+
+	case RNDIS_MSG_QUERY:
+		return rndis_query_response(configNr,
+					(rndis_query_msg_type *)buf);
+
+	case RNDIS_MSG_SET:
+		return rndis_set_response(configNr,
+					(rndis_set_msg_type *)buf);
+
+	case RNDIS_MSG_RESET:
+		pr_debug("%s: RNDIS_MSG_RESET\n",
+			__func__);
+		return rndis_reset_response(configNr,
+					(rndis_reset_msg_type *)buf);
+
+	case RNDIS_MSG_KEEPALIVE:
+		/* For USB: host does this every 5 seconds */
+		if (rndis_debug > 1)
+			pr_debug("%s: RNDIS_MSG_KEEPALIVE\n",
+				__func__);
+		return rndis_keepalive_response(configNr,
+						 (rndis_keepalive_msg_type *)
+						 buf);
+
+	default:
+		/* At least Windows XP emits some undefined RNDIS messages.
+		 * In one case those messages seemed to relate to the host
+		 * suspending itself.
+		 */
+		pr_warning("%s: unknown RNDIS message 0x%08X len %d\n",
+			__func__, MsgType, MsgLength);
+		print_hex_dump_bytes(__func__, DUMP_PREFIX_OFFSET,
+				     buf, MsgLength);
+		break;
+	}
+
+	return -ENOTSUPP;
+}
+
+int rndis_register(void (*resp_avail)(void *v), void *v)
+{
+	u8 i;
+
+	if (!resp_avail)
+		return -EINVAL;
+
+	for (i = 0; i < RNDIS_MAX_CONFIGS; i++) {
+		if (!rndis_per_dev_params[i].used) {
+			rndis_per_dev_params[i].used = 1;
+			rndis_per_dev_params[i].resp_avail = resp_avail;
+			rndis_per_dev_params[i].v = v;
+			pr_debug("%s: configNr = %d\n", __func__, i);
+			return i;
+		}
+	}
+	pr_debug("failed\n");
+
+	return -ENODEV;
+}
+
+void rndis_deregister(int configNr)
+{
+	pr_debug("%s:\n", __func__);
+
+	if (configNr >= RNDIS_MAX_CONFIGS) return;
+	rndis_per_dev_params[configNr].used = 0;
+}
+
+int rndis_set_param_dev(u8 configNr, struct net_device *dev, u16 *cdc_filter)
+{
+	pr_debug("%s:\n", __func__);
+	if (!dev)
+		return -EINVAL;
+	if (configNr >= RNDIS_MAX_CONFIGS) return -1;
+
+	rndis_per_dev_params[configNr].dev = dev;
+	rndis_per_dev_params[configNr].filter = cdc_filter;
+
+	return 0;
+}
+
+int rndis_set_param_vendor(u8 configNr, u32 vendorID, const char *vendorDescr)
+{
+	pr_debug("%s:\n", __func__);
+	if (!vendorDescr) return -1;
+	if (configNr >= RNDIS_MAX_CONFIGS) return -1;
+
+	rndis_per_dev_params[configNr].vendorID = vendorID;
+	rndis_per_dev_params[configNr].vendorDescr = vendorDescr;
+
+	return 0;
+}
+
+int rndis_set_param_medium(u8 configNr, u32 medium, u32 speed)
+{
+	pr_debug("%s: %u %u\n", __func__, medium, speed);
+	if (configNr >= RNDIS_MAX_CONFIGS) return -1;
+
+	rndis_per_dev_params[configNr].medium = medium;
+	rndis_per_dev_params[configNr].speed = speed;
+
+	return 0;
+}
+
+void rndis_add_hdr(struct sk_buff *skb)
+{
+	struct rndis_packet_msg_type *header;
+
+	if (!skb)
+		return;
+	header = (void *)skb_push(skb, sizeof(*header));
+	memset(header, 0, sizeof *header);
+	header->MessageType = cpu_to_le32(RNDIS_MSG_PACKET);
+	header->MessageLength = cpu_to_le32(skb->len);
+	header->DataOffset = cpu_to_le32(36);
+	header->DataLength = cpu_to_le32(skb->len - sizeof(*header));
+}
+
+void rndis_free_response(int configNr, u8 *buf)
+{
+	rndis_resp_t *r;
+	struct list_head *act, *tmp;
+
+	list_for_each_safe(act, tmp,
+			&(rndis_per_dev_params[configNr].resp_queue))
+	{
+		r = list_entry(act, rndis_resp_t, list);
+		if (r && r->buf == buf) {
+			list_del(&r->list);
+			kfree(r);
+		}
+	}
+}
+
+u8 *rndis_get_next_response(int configNr, u32 *length)
+{
+	rndis_resp_t *r;
+	struct list_head *act, *tmp;
+
+	if (!length) return NULL;
+
+	list_for_each_safe(act, tmp,
+			&(rndis_per_dev_params[configNr].resp_queue))
+	{
+		r = list_entry(act, rndis_resp_t, list);
+		if (!r->send) {
+			r->send = 1;
+			*length = r->length;
+			return r->buf;
+		}
+	}
+
+	return NULL;
+}
+
+static rndis_resp_t *rndis_add_response(int configNr, u32 length)
+{
+	rndis_resp_t *r;
+
+	/* NOTE: this gets copied into ether.c USB_BUFSIZ bytes ... */
+	r = kmalloc(sizeof(rndis_resp_t) + length, GFP_ATOMIC);
+	if (!r) return NULL;
+
+	r->buf = (u8 *)(r + 1);
+	r->length = length;
+	r->send = 0;
+
+	list_add_tail(&r->list,
+		&(rndis_per_dev_params[configNr].resp_queue));
+	return r;
+}
+
+int rndis_rm_hdr(struct gether *port,
+			struct sk_buff *skb,
+			struct sk_buff_head *list)
+{
+	/* tmp points to a struct rndis_packet_msg_type */
+	__le32 *tmp = (void *)skb->data;
+
+	/* MessageType, MessageLength */
+	if (cpu_to_le32(RNDIS_MSG_PACKET)
+			!= get_unaligned(tmp++)) {
+		dev_kfree_skb_any(skb);
+		return -EINVAL;
+	}
+	tmp++;
+
+	/* DataOffset, DataLength */
+	if (!skb_pull(skb, get_unaligned_le32(tmp++) + 8)) {
+		dev_kfree_skb_any(skb);
+		return -EOVERFLOW;
+	}
+	skb_trim(skb, get_unaligned_le32(tmp++));
+
+	skb_queue_tail(list, skb);
+	return 0;
+}
+
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
+
+static int rndis_proc_show(struct seq_file *m, void *v)
+{
+	rndis_params *param = m->private;
+
+	seq_printf(m,
+			 "Config Nr. %d\n"
+			 "used      : %s\n"
+			 "state     : %s\n"
+			 "medium    : 0x%08X\n"
+			 "speed     : %d\n"
+			 "cable     : %s\n"
+			 "vendor ID : 0x%08X\n"
+			 "vendor    : %s\n",
+			 param->confignr, (param->used) ? "y" : "n",
+			 ({ char *s = "?";
+			 switch (param->state) {
+			 case RNDIS_UNINITIALIZED:
+				s = "RNDIS_UNINITIALIZED"; break;
+			 case RNDIS_INITIALIZED:
+				s = "RNDIS_INITIALIZED"; break;
+			 case RNDIS_DATA_INITIALIZED:
+				s = "RNDIS_DATA_INITIALIZED"; break;
+			}; s; }),
+			 param->medium,
+			 (param->media_state) ? 0 : param->speed*100,
+			 (param->media_state) ? "disconnected" : "connected",
+			 param->vendorID, param->vendorDescr);
+	return 0;
+}
+
+static ssize_t rndis_proc_write(struct file *file, const char __user *buffer,
+				size_t count, loff_t *ppos)
+{
+	rndis_params *p = PDE(file->f_path.dentry->d_inode)->data;
+	u32 speed = 0;
+	int i, fl_speed = 0;
+
+	for (i = 0; i < count; i++) {
+		char c;
+		if (get_user(c, buffer))
+			return -EFAULT;
+		switch (c) {
+		case '0':
+		case '1':
+		case '2':
+		case '3':
+		case '4':
+		case '5':
+		case '6':
+		case '7':
+		case '8':
+		case '9':
+			fl_speed = 1;
+			speed = speed * 10 + c - '0';
+			break;
+		case 'C':
+		case 'c':
+			rndis_signal_connect(p->confignr);
+			break;
+		case 'D':
+		case 'd':
+			rndis_signal_disconnect(p->confignr);
+			break;
+		default:
+			if (fl_speed) p->speed = speed;
+			else pr_debug("%c is not valid\n", c);
+			break;
+		}
+
+		buffer++;
+	}
+
+	return count;
+}
+
+static int rndis_proc_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, rndis_proc_show, PDE(inode)->data);
+}
+
+static const struct file_operations rndis_proc_fops = {
+	.owner		= THIS_MODULE,
+	.open		= rndis_proc_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+	.write		= rndis_proc_write,
+};
+
+#define	NAME_TEMPLATE "driver/rndis-%03d"
+
+static struct proc_dir_entry *rndis_connect_state [RNDIS_MAX_CONFIGS];
+
+#endif /* CONFIG_USB_GADGET_DEBUG_FILES */
+
+
+int rndis_init(void)
+{
+	u8 i;
+
+	for (i = 0; i < RNDIS_MAX_CONFIGS; i++) {
+#ifdef	CONFIG_USB_GADGET_DEBUG_FILES
+		char name [20];
+
+		sprintf(name, NAME_TEMPLATE, i);
+		rndis_connect_state[i] = proc_create_data(name, 0660, NULL,
+					&rndis_proc_fops,
+					(void *)(rndis_per_dev_params + i));
+		if (!rndis_connect_state[i]) {
+			pr_debug("%s: remove entries", __func__);
+			while (i) {
+				sprintf(name, NAME_TEMPLATE, --i);
+				remove_proc_entry(name, NULL);
+			}
+			pr_debug("\n");
+			return -EIO;
+		}
+#endif
+		rndis_per_dev_params[i].confignr = i;
+		rndis_per_dev_params[i].used = 0;
+		rndis_per_dev_params[i].state = RNDIS_UNINITIALIZED;
+		rndis_per_dev_params[i].media_state
+				= RNDIS_MEDIA_STATE_DISCONNECTED;
+		INIT_LIST_HEAD(&(rndis_per_dev_params[i].resp_queue));
+	}
+
+	return 0;
+}
+
+void rndis_exit(void)
+{
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
+	u8 i;
+	char name[20];
+
+	for (i = 0; i < RNDIS_MAX_CONFIGS; i++) {
+		sprintf(name, NAME_TEMPLATE, i);
+		remove_proc_entry(name, NULL);
+	}
+#endif
+}
diff --git a/drivers/staging/ccg/rndis.h b/drivers/staging/ccg/rndis.h
new file mode 100644
index 0000000..0647f2f
--- /dev/null
+++ b/drivers/staging/ccg/rndis.h
@@ -0,0 +1,222 @@
+/*
+ * RNDIS	Definitions for Remote NDIS
+ *
+ * Authors:	Benedikt Spranger, Pengutronix
+ *		Robert Schwebel, Pengutronix
+ *
+ *		This program is free software; you can redistribute it and/or
+ *		modify it under the terms of the GNU General Public License
+ *		version 2, as published by the Free Software Foundation.
+ *
+ *		This software was originally developed in conformance with
+ *		Microsoft's Remote NDIS Specification License Agreement.
+ */
+
+#ifndef _LINUX_RNDIS_H
+#define _LINUX_RNDIS_H
+
+#include <linux/rndis.h>
+#include "ndis.h"
+
+#define RNDIS_MAXIMUM_FRAME_SIZE	1518
+#define RNDIS_MAX_TOTAL_SIZE		1558
+
+typedef struct rndis_init_msg_type
+{
+	__le32	MessageType;
+	__le32	MessageLength;
+	__le32	RequestID;
+	__le32	MajorVersion;
+	__le32	MinorVersion;
+	__le32	MaxTransferSize;
+} rndis_init_msg_type;
+
+typedef struct rndis_init_cmplt_type
+{
+	__le32	MessageType;
+	__le32	MessageLength;
+	__le32	RequestID;
+	__le32	Status;
+	__le32	MajorVersion;
+	__le32	MinorVersion;
+	__le32	DeviceFlags;
+	__le32	Medium;
+	__le32	MaxPacketsPerTransfer;
+	__le32	MaxTransferSize;
+	__le32	PacketAlignmentFactor;
+	__le32	AFListOffset;
+	__le32	AFListSize;
+} rndis_init_cmplt_type;
+
+typedef struct rndis_halt_msg_type
+{
+	__le32	MessageType;
+	__le32	MessageLength;
+	__le32	RequestID;
+} rndis_halt_msg_type;
+
+typedef struct rndis_query_msg_type
+{
+	__le32	MessageType;
+	__le32	MessageLength;
+	__le32	RequestID;
+	__le32	OID;
+	__le32	InformationBufferLength;
+	__le32	InformationBufferOffset;
+	__le32	DeviceVcHandle;
+} rndis_query_msg_type;
+
+typedef struct rndis_query_cmplt_type
+{
+	__le32	MessageType;
+	__le32	MessageLength;
+	__le32	RequestID;
+	__le32	Status;
+	__le32	InformationBufferLength;
+	__le32	InformationBufferOffset;
+} rndis_query_cmplt_type;
+
+typedef struct rndis_set_msg_type
+{
+	__le32	MessageType;
+	__le32	MessageLength;
+	__le32	RequestID;
+	__le32	OID;
+	__le32	InformationBufferLength;
+	__le32	InformationBufferOffset;
+	__le32	DeviceVcHandle;
+} rndis_set_msg_type;
+
+typedef struct rndis_set_cmplt_type
+{
+	__le32	MessageType;
+	__le32	MessageLength;
+	__le32	RequestID;
+	__le32	Status;
+} rndis_set_cmplt_type;
+
+typedef struct rndis_reset_msg_type
+{
+	__le32	MessageType;
+	__le32	MessageLength;
+	__le32	Reserved;
+} rndis_reset_msg_type;
+
+typedef struct rndis_reset_cmplt_type
+{
+	__le32	MessageType;
+	__le32	MessageLength;
+	__le32	Status;
+	__le32	AddressingReset;
+} rndis_reset_cmplt_type;
+
+typedef struct rndis_indicate_status_msg_type
+{
+	__le32	MessageType;
+	__le32	MessageLength;
+	__le32	Status;
+	__le32	StatusBufferLength;
+	__le32	StatusBufferOffset;
+} rndis_indicate_status_msg_type;
+
+typedef struct rndis_keepalive_msg_type
+{
+	__le32	MessageType;
+	__le32	MessageLength;
+	__le32	RequestID;
+} rndis_keepalive_msg_type;
+
+typedef struct rndis_keepalive_cmplt_type
+{
+	__le32	MessageType;
+	__le32	MessageLength;
+	__le32	RequestID;
+	__le32	Status;
+} rndis_keepalive_cmplt_type;
+
+struct rndis_packet_msg_type
+{
+	__le32	MessageType;
+	__le32	MessageLength;
+	__le32	DataOffset;
+	__le32	DataLength;
+	__le32	OOBDataOffset;
+	__le32	OOBDataLength;
+	__le32	NumOOBDataElements;
+	__le32	PerPacketInfoOffset;
+	__le32	PerPacketInfoLength;
+	__le32	VcHandle;
+	__le32	Reserved;
+} __attribute__ ((packed));
+
+struct rndis_config_parameter
+{
+	__le32	ParameterNameOffset;
+	__le32	ParameterNameLength;
+	__le32	ParameterType;
+	__le32	ParameterValueOffset;
+	__le32	ParameterValueLength;
+};
+
+/* implementation specific */
+enum rndis_state
+{
+	RNDIS_UNINITIALIZED,
+	RNDIS_INITIALIZED,
+	RNDIS_DATA_INITIALIZED,
+};
+
+typedef struct rndis_resp_t
+{
+	struct list_head	list;
+	u8			*buf;
+	u32			length;
+	int			send;
+} rndis_resp_t;
+
+typedef struct rndis_params
+{
+	u8			confignr;
+	u8			used;
+	u16			saved_filter;
+	enum rndis_state	state;
+	u32			medium;
+	u32			speed;
+	u32			media_state;
+
+	const u8		*host_mac;
+	u16			*filter;
+	struct net_device	*dev;
+
+	u32			vendorID;
+	const char		*vendorDescr;
+	void			(*resp_avail)(void *v);
+	void			*v;
+	struct list_head	resp_queue;
+} rndis_params;
+
+/* RNDIS Message parser and other useless functions */
+int  rndis_msg_parser (u8 configNr, u8 *buf);
+int  rndis_register(void (*resp_avail)(void *v), void *v);
+void rndis_deregister (int configNr);
+int  rndis_set_param_dev (u8 configNr, struct net_device *dev,
+			 u16 *cdc_filter);
+int  rndis_set_param_vendor (u8 configNr, u32 vendorID,
+			    const char *vendorDescr);
+int  rndis_set_param_medium (u8 configNr, u32 medium, u32 speed);
+void rndis_add_hdr (struct sk_buff *skb);
+int rndis_rm_hdr(struct gether *port, struct sk_buff *skb,
+			struct sk_buff_head *list);
+u8   *rndis_get_next_response (int configNr, u32 *length);
+void rndis_free_response (int configNr, u8 *buf);
+
+void rndis_uninit (int configNr);
+int  rndis_signal_connect (int configNr);
+int  rndis_signal_disconnect (int configNr);
+int  rndis_state (int configNr);
+extern void rndis_set_host_mac (int configNr, const u8 *addr);
+
+int rndis_init(void);
+void rndis_exit (void);
+
+#endif  /* _LINUX_RNDIS_H */
diff --git a/drivers/staging/ccg/storage_common.c b/drivers/staging/ccg/storage_common.c
new file mode 100644
index 0000000..8d9bcd8
--- /dev/null
+++ b/drivers/staging/ccg/storage_common.c
@@ -0,0 +1,893 @@
+/*
+ * storage_common.c -- Common definitions for mass storage functionality
+ *
+ * Copyright (C) 2003-2008 Alan Stern
+ * Copyeight (C) 2009 Samsung Electronics
+ * Author: Michal Nazarewicz (mina86 at mina86.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+
+/*
+ * This file requires the following identifiers used in USB strings to
+ * be defined (each of type pointer to char):
+ *  - fsg_string_manufacturer -- name of the manufacturer
+ *  - fsg_string_product      -- name of the product
+ *  - fsg_string_config       -- name of the configuration
+ *  - fsg_string_interface    -- name of the interface
+ * The first four are only needed when FSG_DESCRIPTORS_DEVICE_STRINGS
+ * macro is defined prior to including this file.
+ */
+
+/*
+ * When FSG_NO_INTR_EP is defined fsg_fs_intr_in_desc and
+ * fsg_hs_intr_in_desc objects as well as
+ * FSG_FS_FUNCTION_PRE_EP_ENTRIES and FSG_HS_FUNCTION_PRE_EP_ENTRIES
+ * macros are not defined.
+ *
+ * When FSG_NO_DEVICE_STRINGS is defined FSG_STRING_MANUFACTURER,
+ * FSG_STRING_PRODUCT, FSG_STRING_SERIAL and FSG_STRING_CONFIG are not
+ * defined (as well as corresponding entries in string tables are
+ * missing) and FSG_STRING_INTERFACE has value of zero.
+ *
+ * When FSG_NO_OTG is defined fsg_otg_desc won't be defined.
+ */
+
+/*
+ * When USB_GADGET_DEBUG_FILES is defined the module param num_buffers
+ * sets the number of pipeline buffers (length of the fsg_buffhd array).
+ * The valid range of num_buffers is: num >= 2 && num <= 4.
+ */
+
+
+#include <linux/usb/storage.h>
+#include <scsi/scsi.h>
+#include <asm/unaligned.h>
+
+
+/*
+ * Thanks to NetChip Technologies for donating this product ID.
+ *
+ * DO NOT REUSE THESE IDs with any other driver!!  Ever!!
+ * Instead:  allocate your own, using normal USB-IF procedures.
+ */
+#define FSG_VENDOR_ID	0x0525	/* NetChip */
+#define FSG_PRODUCT_ID	0xa4a5	/* Linux-USB File-backed Storage Gadget */
+
+
+/*-------------------------------------------------------------------------*/
+
+
+#ifndef DEBUG
+#undef VERBOSE_DEBUG
+#undef DUMP_MSGS
+#endif /* !DEBUG */
+
+#ifdef VERBOSE_DEBUG
+#define VLDBG	LDBG
+#else
+#define VLDBG(lun, fmt, args...) do { } while (0)
+#endif /* VERBOSE_DEBUG */
+
+#define LDBG(lun, fmt, args...)   dev_dbg (&(lun)->dev, fmt, ## args)
+#define LERROR(lun, fmt, args...) dev_err (&(lun)->dev, fmt, ## args)
+#define LWARN(lun, fmt, args...)  dev_warn(&(lun)->dev, fmt, ## args)
+#define LINFO(lun, fmt, args...)  dev_info(&(lun)->dev, fmt, ## args)
+
+/*
+ * Keep those macros in sync with those in
+ * include/linux/usb/composite.h or else GCC will complain.  If they
+ * are identical (the same names of arguments, white spaces in the
+ * same places) GCC will allow redefinition otherwise (even if some
+ * white space is removed or added) warning will be issued.
+ *
+ * Those macros are needed here because File Storage Gadget does not
+ * include the composite.h header.  For composite gadgets those macros
+ * are redundant since composite.h is included any way.
+ *
+ * One could check whether those macros are already defined (which
+ * would indicate composite.h had been included) or not (which would
+ * indicate we were in FSG) but this is not done because a warning is
+ * desired if definitions here differ from the ones in composite.h.
+ *
+ * We want the definitions to match and be the same in File Storage
+ * Gadget as well as Mass Storage Function (and so composite gadgets
+ * using MSF).  If someone changes them in composite.h it will produce
+ * a warning in this file when building MSF.
+ */
+#define DBG(d, fmt, args...)     dev_dbg(&(d)->gadget->dev , fmt , ## args)
+#define VDBG(d, fmt, args...)    dev_vdbg(&(d)->gadget->dev , fmt , ## args)
+#define ERROR(d, fmt, args...)   dev_err(&(d)->gadget->dev , fmt , ## args)
+#define WARNING(d, fmt, args...) dev_warn(&(d)->gadget->dev , fmt , ## args)
+#define INFO(d, fmt, args...)    dev_info(&(d)->gadget->dev , fmt , ## args)
+
+
+
+#ifdef DUMP_MSGS
+
+#  define dump_msg(fsg, /* const char * */ label,			\
+		   /* const u8 * */ buf, /* unsigned */ length) do {	\
+	if (length < 512) {						\
+		DBG(fsg, "%s, length %u:\n", label, length);		\
+		print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET,	\
+			       16, 1, buf, length, 0);			\
+	}								\
+} while (0)
+
+#  define dump_cdb(fsg) do { } while (0)
+
+#else
+
+#  define dump_msg(fsg, /* const char * */ label, \
+		   /* const u8 * */ buf, /* unsigned */ length) do { } while (0)
+
+#  ifdef VERBOSE_DEBUG
+
+#    define dump_cdb(fsg)						\
+	print_hex_dump(KERN_DEBUG, "SCSI CDB: ", DUMP_PREFIX_NONE,	\
+		       16, 1, (fsg)->cmnd, (fsg)->cmnd_size, 0)		\
+
+#  else
+
+#    define dump_cdb(fsg) do { } while (0)
+
+#  endif /* VERBOSE_DEBUG */
+
+#endif /* DUMP_MSGS */
+
+/*-------------------------------------------------------------------------*/
+
+/* CBI Interrupt data structure */
+struct interrupt_data {
+	u8	bType;
+	u8	bValue;
+};
+
+#define CBI_INTERRUPT_DATA_LEN		2
+
+/* CBI Accept Device-Specific Command request */
+#define USB_CBI_ADSC_REQUEST		0x00
+
+
+/* Length of a SCSI Command Data Block */
+#define MAX_COMMAND_SIZE	16
+
+/* SCSI Sense Key/Additional Sense Code/ASC Qualifier values */
+#define SS_NO_SENSE				0
+#define SS_COMMUNICATION_FAILURE		0x040800
+#define SS_INVALID_COMMAND			0x052000
+#define SS_INVALID_FIELD_IN_CDB			0x052400
+#define SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE	0x052100
+#define SS_LOGICAL_UNIT_NOT_SUPPORTED		0x052500
+#define SS_MEDIUM_NOT_PRESENT			0x023a00
+#define SS_MEDIUM_REMOVAL_PREVENTED		0x055302
+#define SS_NOT_READY_TO_READY_TRANSITION	0x062800
+#define SS_RESET_OCCURRED			0x062900
+#define SS_SAVING_PARAMETERS_NOT_SUPPORTED	0x053900
+#define SS_UNRECOVERED_READ_ERROR		0x031100
+#define SS_WRITE_ERROR				0x030c02
+#define SS_WRITE_PROTECTED			0x072700
+
+#define SK(x)		((u8) ((x) >> 16))	/* Sense Key byte, etc. */
+#define ASC(x)		((u8) ((x) >> 8))
+#define ASCQ(x)		((u8) (x))
+
+
+/*-------------------------------------------------------------------------*/
+
+
+struct fsg_lun {
+	struct file	*filp;
+	loff_t		file_length;
+	loff_t		num_sectors;
+
+	unsigned int	initially_ro:1;
+	unsigned int	ro:1;
+	unsigned int	removable:1;
+	unsigned int	cdrom:1;
+	unsigned int	prevent_medium_removal:1;
+	unsigned int	registered:1;
+	unsigned int	info_valid:1;
+	unsigned int	nofua:1;
+
+	u32		sense_data;
+	u32		sense_data_info;
+	u32		unit_attention_data;
+
+	unsigned int	blkbits;	/* Bits of logical block size of bound block device */
+	unsigned int	blksize;	/* logical block size of bound block device */
+	struct device	dev;
+};
+
+#define fsg_lun_is_open(curlun)	((curlun)->filp != NULL)
+
+static struct fsg_lun *fsg_lun_from_dev(struct device *dev)
+{
+	return container_of(dev, struct fsg_lun, dev);
+}
+
+
+/* Big enough to hold our biggest descriptor */
+#define EP0_BUFSIZE	256
+#define DELAYED_STATUS	(EP0_BUFSIZE + 999)	/* An impossibly large value */
+
+#ifdef CONFIG_USB_GADGET_DEBUG_FILES
+
+static unsigned int fsg_num_buffers = CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS;
+module_param_named(num_buffers, fsg_num_buffers, uint, S_IRUGO);
+MODULE_PARM_DESC(num_buffers, "Number of pipeline buffers");
+
+#else
+
+/*
+ * Number of buffers we will use.
+ * 2 is usually enough for good buffering pipeline
+ */
+#define fsg_num_buffers	CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS
+
+#endif /* CONFIG_USB_DEBUG */
+
+/* check if fsg_num_buffers is within a valid range */
+static inline int fsg_num_buffers_validate(void)
+{
+	if (fsg_num_buffers >= 2 && fsg_num_buffers <= 4)
+		return 0;
+	pr_err("fsg_num_buffers %u is out of range (%d to %d)\n",
+	       fsg_num_buffers, 2 ,4);
+	return -EINVAL;
+}
+
+/* Default size of buffer length. */
+#define FSG_BUFLEN	((u32)16384)
+
+/* Maximal number of LUNs supported in mass storage function */
+#define FSG_MAX_LUNS	8
+
+enum fsg_buffer_state {
+	BUF_STATE_EMPTY = 0,
+	BUF_STATE_FULL,
+	BUF_STATE_BUSY
+};
+
+struct fsg_buffhd {
+	void				*buf;
+	enum fsg_buffer_state		state;
+	struct fsg_buffhd		*next;
+
+	/*
+	 * The NetChip 2280 is faster, and handles some protocol faults
+	 * better, if we don't submit any short bulk-out read requests.
+	 * So we will record the intended request length here.
+	 */
+	unsigned int			bulk_out_intended_length;
+
+	struct usb_request		*inreq;
+	int				inreq_busy;
+	struct usb_request		*outreq;
+	int				outreq_busy;
+};
+
+enum fsg_state {
+	/* This one isn't used anywhere */
+	FSG_STATE_COMMAND_PHASE = -10,
+	FSG_STATE_DATA_PHASE,
+	FSG_STATE_STATUS_PHASE,
+
+	FSG_STATE_IDLE = 0,
+	FSG_STATE_ABORT_BULK_OUT,
+	FSG_STATE_RESET,
+	FSG_STATE_INTERFACE_CHANGE,
+	FSG_STATE_CONFIG_CHANGE,
+	FSG_STATE_DISCONNECT,
+	FSG_STATE_EXIT,
+	FSG_STATE_TERMINATED
+};
+
+enum data_direction {
+	DATA_DIR_UNKNOWN = 0,
+	DATA_DIR_FROM_HOST,
+	DATA_DIR_TO_HOST,
+	DATA_DIR_NONE
+};
+
+
+/*-------------------------------------------------------------------------*/
+
+
+static inline u32 get_unaligned_be24(u8 *buf)
+{
+	return 0xffffff & (u32) get_unaligned_be32(buf - 1);
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+
+enum {
+#ifndef FSG_NO_DEVICE_STRINGS
+	FSG_STRING_MANUFACTURER	= 1,
+	FSG_STRING_PRODUCT,
+	FSG_STRING_SERIAL,
+	FSG_STRING_CONFIG,
+#endif
+	FSG_STRING_INTERFACE
+};
+
+
+#ifndef FSG_NO_OTG
+static struct usb_otg_descriptor
+fsg_otg_desc = {
+	.bLength =		sizeof fsg_otg_desc,
+	.bDescriptorType =	USB_DT_OTG,
+
+	.bmAttributes =		USB_OTG_SRP,
+};
+#endif
+
+/* There is only one interface. */
+
+static struct usb_interface_descriptor
+fsg_intf_desc = {
+	.bLength =		sizeof fsg_intf_desc,
+	.bDescriptorType =	USB_DT_INTERFACE,
+
+	.bNumEndpoints =	2,		/* Adjusted during fsg_bind() */
+	.bInterfaceClass =	USB_CLASS_MASS_STORAGE,
+	.bInterfaceSubClass =	USB_SC_SCSI,	/* Adjusted during fsg_bind() */
+	.bInterfaceProtocol =	USB_PR_BULK,	/* Adjusted during fsg_bind() */
+	.iInterface =		FSG_STRING_INTERFACE,
+};
+
+/*
+ * Three full-speed endpoint descriptors: bulk-in, bulk-out, and
+ * interrupt-in.
+ */
+
+static struct usb_endpoint_descriptor
+fsg_fs_bulk_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	/* wMaxPacketSize set by autoconfiguration */
+};
+
+static struct usb_endpoint_descriptor
+fsg_fs_bulk_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	/* wMaxPacketSize set by autoconfiguration */
+};
+
+#ifndef FSG_NO_INTR_EP
+
+static struct usb_endpoint_descriptor
+fsg_fs_intr_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	cpu_to_le16(2),
+	.bInterval =		32,	/* frames -> 32 ms */
+};
+
+#ifndef FSG_NO_OTG
+#  define FSG_FS_FUNCTION_PRE_EP_ENTRIES	2
+#else
+#  define FSG_FS_FUNCTION_PRE_EP_ENTRIES	1
+#endif
+
+#endif
+
+static struct usb_descriptor_header *fsg_fs_function[] = {
+#ifndef FSG_NO_OTG
+	(struct usb_descriptor_header *) &fsg_otg_desc,
+#endif
+	(struct usb_descriptor_header *) &fsg_intf_desc,
+	(struct usb_descriptor_header *) &fsg_fs_bulk_in_desc,
+	(struct usb_descriptor_header *) &fsg_fs_bulk_out_desc,
+#ifndef FSG_NO_INTR_EP
+	(struct usb_descriptor_header *) &fsg_fs_intr_in_desc,
+#endif
+	NULL,
+};
+
+
+/*
+ * USB 2.0 devices need to expose both high speed and full speed
+ * descriptors, unless they only run at full speed.
+ *
+ * That means alternate endpoint descriptors (bigger packets)
+ * and a "device qualifier" ... plus more construction options
+ * for the configuration descriptor.
+ */
+static struct usb_endpoint_descriptor
+fsg_hs_bulk_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	/* bEndpointAddress copied from fs_bulk_in_desc during fsg_bind() */
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor
+fsg_hs_bulk_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	/* bEndpointAddress copied from fs_bulk_out_desc during fsg_bind() */
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(512),
+	.bInterval =		1,	/* NAK every 1 uframe */
+};
+
+#ifndef FSG_NO_INTR_EP
+
+static struct usb_endpoint_descriptor
+fsg_hs_intr_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	/* bEndpointAddress copied from fs_intr_in_desc during fsg_bind() */
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	cpu_to_le16(2),
+	.bInterval =		9,	/* 2**(9-1) = 256 uframes -> 32 ms */
+};
+
+#ifndef FSG_NO_OTG
+#  define FSG_HS_FUNCTION_PRE_EP_ENTRIES	2
+#else
+#  define FSG_HS_FUNCTION_PRE_EP_ENTRIES	1
+#endif
+
+#endif
+
+static struct usb_descriptor_header *fsg_hs_function[] = {
+#ifndef FSG_NO_OTG
+	(struct usb_descriptor_header *) &fsg_otg_desc,
+#endif
+	(struct usb_descriptor_header *) &fsg_intf_desc,
+	(struct usb_descriptor_header *) &fsg_hs_bulk_in_desc,
+	(struct usb_descriptor_header *) &fsg_hs_bulk_out_desc,
+#ifndef FSG_NO_INTR_EP
+	(struct usb_descriptor_header *) &fsg_hs_intr_in_desc,
+#endif
+	NULL,
+};
+
+static struct usb_endpoint_descriptor
+fsg_ss_bulk_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	/* bEndpointAddress copied from fs_bulk_in_desc during fsg_bind() */
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor fsg_ss_bulk_in_comp_desc = {
+	.bLength =		sizeof(fsg_ss_bulk_in_comp_desc),
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+
+	/*.bMaxBurst =		DYNAMIC, */
+};
+
+static struct usb_endpoint_descriptor
+fsg_ss_bulk_out_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	/* bEndpointAddress copied from fs_bulk_out_desc during fsg_bind() */
+	.bmAttributes =		USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize =	cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor fsg_ss_bulk_out_comp_desc = {
+	.bLength =		sizeof(fsg_ss_bulk_in_comp_desc),
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+
+	/*.bMaxBurst =		DYNAMIC, */
+};
+
+#ifndef FSG_NO_INTR_EP
+
+static struct usb_endpoint_descriptor
+fsg_ss_intr_in_desc = {
+	.bLength =		USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+
+	/* bEndpointAddress copied from fs_intr_in_desc during fsg_bind() */
+	.bmAttributes =		USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize =	cpu_to_le16(2),
+	.bInterval =		9,	/* 2**(9-1) = 256 uframes -> 32 ms */
+};
+
+static struct usb_ss_ep_comp_descriptor fsg_ss_intr_in_comp_desc = {
+	.bLength =		sizeof(fsg_ss_bulk_in_comp_desc),
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+
+	.wBytesPerInterval =	cpu_to_le16(2),
+};
+
+#ifndef FSG_NO_OTG
+#  define FSG_SS_FUNCTION_PRE_EP_ENTRIES	2
+#else
+#  define FSG_SS_FUNCTION_PRE_EP_ENTRIES	1
+#endif
+
+#endif
+
+static __maybe_unused struct usb_ext_cap_descriptor fsg_ext_cap_desc = {
+	.bLength =		USB_DT_USB_EXT_CAP_SIZE,
+	.bDescriptorType =	USB_DT_DEVICE_CAPABILITY,
+	.bDevCapabilityType =	USB_CAP_TYPE_EXT,
+
+	.bmAttributes =		cpu_to_le32(USB_LPM_SUPPORT),
+};
+
+static __maybe_unused struct usb_ss_cap_descriptor fsg_ss_cap_desc = {
+	.bLength =		USB_DT_USB_SS_CAP_SIZE,
+	.bDescriptorType =	USB_DT_DEVICE_CAPABILITY,
+	.bDevCapabilityType =	USB_SS_CAP_TYPE,
+
+	/* .bmAttributes = LTM is not supported yet */
+
+	.wSpeedSupported =	cpu_to_le16(USB_LOW_SPEED_OPERATION
+		| USB_FULL_SPEED_OPERATION
+		| USB_HIGH_SPEED_OPERATION
+		| USB_5GBPS_OPERATION),
+	.bFunctionalitySupport = USB_LOW_SPEED_OPERATION,
+	.bU1devExitLat =	USB_DEFAULT_U1_DEV_EXIT_LAT,
+	.bU2DevExitLat =	cpu_to_le16(USB_DEFAULT_U2_DEV_EXIT_LAT),
+};
+
+static __maybe_unused struct usb_bos_descriptor fsg_bos_desc = {
+	.bLength =		USB_DT_BOS_SIZE,
+	.bDescriptorType =	USB_DT_BOS,
+
+	.wTotalLength =		cpu_to_le16(USB_DT_BOS_SIZE
+				+ USB_DT_USB_EXT_CAP_SIZE
+				+ USB_DT_USB_SS_CAP_SIZE),
+
+	.bNumDeviceCaps =	2,
+};
+
+static struct usb_descriptor_header *fsg_ss_function[] = {
+#ifndef FSG_NO_OTG
+	(struct usb_descriptor_header *) &fsg_otg_desc,
+#endif
+	(struct usb_descriptor_header *) &fsg_intf_desc,
+	(struct usb_descriptor_header *) &fsg_ss_bulk_in_desc,
+	(struct usb_descriptor_header *) &fsg_ss_bulk_in_comp_desc,
+	(struct usb_descriptor_header *) &fsg_ss_bulk_out_desc,
+	(struct usb_descriptor_header *) &fsg_ss_bulk_out_comp_desc,
+#ifndef FSG_NO_INTR_EP
+	(struct usb_descriptor_header *) &fsg_ss_intr_in_desc,
+	(struct usb_descriptor_header *) &fsg_ss_intr_in_comp_desc,
+#endif
+	NULL,
+};
+
+/* Maxpacket and other transfer characteristics vary by speed. */
+static __maybe_unused struct usb_endpoint_descriptor *
+fsg_ep_desc(struct usb_gadget *g, struct usb_endpoint_descriptor *fs,
+		struct usb_endpoint_descriptor *hs,
+		struct usb_endpoint_descriptor *ss)
+{
+	if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER)
+		return ss;
+	else if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH)
+		return hs;
+	return fs;
+}
+
+
+/* Static strings, in UTF-8 (for simplicity we use only ASCII characters) */
+static struct usb_string		fsg_strings[] = {
+#ifndef FSG_NO_DEVICE_STRINGS
+	{FSG_STRING_MANUFACTURER,	fsg_string_manufacturer},
+	{FSG_STRING_PRODUCT,		fsg_string_product},
+	{FSG_STRING_SERIAL,		""},
+	{FSG_STRING_CONFIG,		fsg_string_config},
+#endif
+	{FSG_STRING_INTERFACE,		fsg_string_interface},
+	{}
+};
+
+static struct usb_gadget_strings	fsg_stringtab = {
+	.language	= 0x0409,		/* en-us */
+	.strings	= fsg_strings,
+};
+
+
+ /*-------------------------------------------------------------------------*/
+
+/*
+ * If the next two routines are called while the gadget is registered,
+ * the caller must own fsg->filesem for writing.
+ */
+
+static void fsg_lun_close(struct fsg_lun *curlun)
+{
+	if (curlun->filp) {
+		LDBG(curlun, "close backing file\n");
+		fput(curlun->filp);
+		curlun->filp = NULL;
+	}
+}
+
+
+static int fsg_lun_open(struct fsg_lun *curlun, const char *filename)
+{
+	int				ro;
+	struct file			*filp = NULL;
+	int				rc = -EINVAL;
+	struct inode			*inode = NULL;
+	loff_t				size;
+	loff_t				num_sectors;
+	loff_t				min_sectors;
+	unsigned int			blkbits;
+	unsigned int			blksize;
+
+	/* R/W if we can, R/O if we must */
+	ro = curlun->initially_ro;
+	if (!ro) {
+		filp = filp_open(filename, O_RDWR | O_LARGEFILE, 0);
+		if (PTR_ERR(filp) == -EROFS || PTR_ERR(filp) == -EACCES)
+			ro = 1;
+	}
+	if (ro)
+		filp = filp_open(filename, O_RDONLY | O_LARGEFILE, 0);
+	if (IS_ERR(filp)) {
+		LINFO(curlun, "unable to open backing file: %s\n", filename);
+		return PTR_ERR(filp);
+	}
+
+	if (!(filp->f_mode & FMODE_WRITE))
+		ro = 1;
+
+	inode = filp->f_path.dentry->d_inode;
+	if ((!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))) {
+		LINFO(curlun, "invalid file type: %s\n", filename);
+		goto out;
+	}
+
+	/*
+	 * If we can't read the file, it's no good.
+	 * If we can't write the file, use it read-only.
+	 */
+	if (!(filp->f_op->read || filp->f_op->aio_read)) {
+		LINFO(curlun, "file not readable: %s\n", filename);
+		goto out;
+	}
+	if (!(filp->f_op->write || filp->f_op->aio_write))
+		ro = 1;
+
+	size = i_size_read(inode->i_mapping->host);
+	if (size < 0) {
+		LINFO(curlun, "unable to find file size: %s\n", filename);
+		rc = (int) size;
+		goto out;
+	}
+
+	if (curlun->cdrom) {
+		blksize = 2048;
+		blkbits = 11;
+	} else if (inode->i_bdev) {
+		blksize = bdev_logical_block_size(inode->i_bdev);
+		blkbits = blksize_bits(blksize);
+	} else {
+		blksize = 512;
+		blkbits = 9;
+	}
+
+	num_sectors = size >> blkbits; /* File size in logic-block-size blocks */
+	min_sectors = 1;
+	if (curlun->cdrom) {
+		min_sectors = 300;	/* Smallest track is 300 frames */
+		if (num_sectors >= 256*60*75) {
+			num_sectors = 256*60*75 - 1;
+			LINFO(curlun, "file too big: %s\n", filename);
+			LINFO(curlun, "using only first %d blocks\n",
+					(int) num_sectors);
+		}
+	}
+	if (num_sectors < min_sectors) {
+		LINFO(curlun, "file too small: %s\n", filename);
+		rc = -ETOOSMALL;
+		goto out;
+	}
+
+	if (fsg_lun_is_open(curlun))
+		fsg_lun_close(curlun);
+
+	curlun->blksize = blksize;
+	curlun->blkbits = blkbits;
+	curlun->ro = ro;
+	curlun->filp = filp;
+	curlun->file_length = size;
+	curlun->num_sectors = num_sectors;
+	LDBG(curlun, "open backing file: %s\n", filename);
+	return 0;
+
+out:
+	fput(filp);
+	return rc;
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Sync the file data, don't bother with the metadata.
+ * This code was copied from fs/buffer.c:sys_fdatasync().
+ */
+static int fsg_lun_fsync_sub(struct fsg_lun *curlun)
+{
+	struct file	*filp = curlun->filp;
+
+	if (curlun->ro || !filp)
+		return 0;
+	return vfs_fsync(filp, 1);
+}
+
+static void store_cdrom_address(u8 *dest, int msf, u32 addr)
+{
+	if (msf) {
+		/* Convert to Minutes-Seconds-Frames */
+		addr >>= 2;		/* Convert to 2048-byte frames */
+		addr += 2*75;		/* Lead-in occupies 2 seconds */
+		dest[3] = addr % 75;	/* Frames */
+		addr /= 75;
+		dest[2] = addr % 60;	/* Seconds */
+		addr /= 60;
+		dest[1] = addr;		/* Minutes */
+		dest[0] = 0;		/* Reserved */
+	} else {
+		/* Absolute sector */
+		put_unaligned_be32(addr, dest);
+	}
+}
+
+
+/*-------------------------------------------------------------------------*/
+
+
+static ssize_t fsg_show_ro(struct device *dev, struct device_attribute *attr,
+			   char *buf)
+{
+	struct fsg_lun	*curlun = fsg_lun_from_dev(dev);
+
+	return sprintf(buf, "%d\n", fsg_lun_is_open(curlun)
+				  ? curlun->ro
+				  : curlun->initially_ro);
+}
+
+static ssize_t fsg_show_nofua(struct device *dev, struct device_attribute *attr,
+			      char *buf)
+{
+	struct fsg_lun	*curlun = fsg_lun_from_dev(dev);
+
+	return sprintf(buf, "%u\n", curlun->nofua);
+}
+
+static ssize_t fsg_show_file(struct device *dev, struct device_attribute *attr,
+			     char *buf)
+{
+	struct fsg_lun	*curlun = fsg_lun_from_dev(dev);
+	struct rw_semaphore	*filesem = dev_get_drvdata(dev);
+	char		*p;
+	ssize_t		rc;
+
+	down_read(filesem);
+	if (fsg_lun_is_open(curlun)) {	/* Get the complete pathname */
+		p = d_path(&curlun->filp->f_path, buf, PAGE_SIZE - 1);
+		if (IS_ERR(p))
+			rc = PTR_ERR(p);
+		else {
+			rc = strlen(p);
+			memmove(buf, p, rc);
+			buf[rc] = '\n';		/* Add a newline */
+			buf[++rc] = 0;
+		}
+	} else {				/* No file, return 0 bytes */
+		*buf = 0;
+		rc = 0;
+	}
+	up_read(filesem);
+	return rc;
+}
+
+
+static ssize_t fsg_store_ro(struct device *dev, struct device_attribute *attr,
+			    const char *buf, size_t count)
+{
+	ssize_t		rc;
+	struct fsg_lun	*curlun = fsg_lun_from_dev(dev);
+	struct rw_semaphore	*filesem = dev_get_drvdata(dev);
+	unsigned	ro;
+
+	rc = kstrtouint(buf, 2, &ro);
+	if (rc)
+		return rc;
+
+	/*
+	 * Allow the write-enable status to change only while the
+	 * backing file is closed.
+	 */
+	down_read(filesem);
+	if (fsg_lun_is_open(curlun)) {
+		LDBG(curlun, "read-only status change prevented\n");
+		rc = -EBUSY;
+	} else {
+		curlun->ro = ro;
+		curlun->initially_ro = ro;
+		LDBG(curlun, "read-only status set to %d\n", curlun->ro);
+		rc = count;
+	}
+	up_read(filesem);
+	return rc;
+}
+
+static ssize_t fsg_store_nofua(struct device *dev,
+			       struct device_attribute *attr,
+			       const char *buf, size_t count)
+{
+	struct fsg_lun	*curlun = fsg_lun_from_dev(dev);
+	unsigned	nofua;
+	int		ret;
+
+	ret = kstrtouint(buf, 2, &nofua);
+	if (ret)
+		return ret;
+
+	/* Sync data when switching from async mode to sync */
+	if (!nofua && curlun->nofua)
+		fsg_lun_fsync_sub(curlun);
+
+	curlun->nofua = nofua;
+
+	return count;
+}
+
+static ssize_t fsg_store_file(struct device *dev, struct device_attribute *attr,
+			      const char *buf, size_t count)
+{
+	struct fsg_lun	*curlun = fsg_lun_from_dev(dev);
+	struct rw_semaphore	*filesem = dev_get_drvdata(dev);
+	int		rc = 0;
+
+	if (curlun->prevent_medium_removal && fsg_lun_is_open(curlun)) {
+		LDBG(curlun, "eject attempt prevented\n");
+		return -EBUSY;				/* "Door is locked" */
+	}
+
+	/* Remove a trailing newline */
+	if (count > 0 && buf[count-1] == '\n')
+		((char *) buf)[count-1] = 0;		/* Ugh! */
+
+	/* Load new medium */
+	down_write(filesem);
+	if (count > 0 && buf[0]) {
+		/* fsg_lun_open() will close existing file if any. */
+		rc = fsg_lun_open(curlun, buf);
+		if (rc == 0)
+			curlun->unit_attention_data =
+					SS_NOT_READY_TO_READY_TRANSITION;
+	} else if (fsg_lun_is_open(curlun)) {
+		fsg_lun_close(curlun);
+		curlun->unit_attention_data = SS_MEDIUM_NOT_PRESENT;
+	}
+	up_write(filesem);
+	return (rc < 0 ? rc : count);
+}
diff --git a/drivers/staging/ccg/u_ether.c b/drivers/staging/ccg/u_ether.c
new file mode 100644
index 0000000..1154a99
--- /dev/null
+++ b/drivers/staging/ccg/u_ether.c
@@ -0,0 +1,986 @@
+/*
+ * u_ether.c -- Ethernet-over-USB link layer utilities for Gadget stack
+ *
+ * Copyright (C) 2003-2005,2008 David Brownell
+ * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
+ * Copyright (C) 2008 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+/* #define VERBOSE_DEBUG */
+
+#include <linux/kernel.h>
+#include <linux/gfp.h>
+#include <linux/device.h>
+#include <linux/ctype.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+
+#include "u_ether.h"
+
+
+/*
+ * This component encapsulates the Ethernet link glue needed to provide
+ * one (!) network link through the USB gadget stack, normally "usb0".
+ *
+ * The control and data models are handled by the function driver which
+ * connects to this code; such as CDC Ethernet (ECM or EEM),
+ * "CDC Subset", or RNDIS.  That includes all descriptor and endpoint
+ * management.
+ *
+ * Link level addressing is handled by this component using module
+ * parameters; if no such parameters are provided, random link level
+ * addresses are used.  Each end of the link uses one address.  The
+ * host end address is exported in various ways, and is often recorded
+ * in configuration databases.
+ *
+ * The driver which assembles each configuration using such a link is
+ * responsible for ensuring that each configuration includes at most one
+ * instance of is network link.  (The network layer provides ways for
+ * this single "physical" link to be used by multiple virtual links.)
+ */
+
+#define UETH__VERSION	"29-May-2008"
+
+struct eth_dev {
+	/* lock is held while accessing port_usb
+	 * or updating its backlink port_usb->ioport
+	 */
+	spinlock_t		lock;
+	struct gether		*port_usb;
+
+	struct net_device	*net;
+	struct usb_gadget	*gadget;
+
+	spinlock_t		req_lock;	/* guard {rx,tx}_reqs */
+	struct list_head	tx_reqs, rx_reqs;
+	atomic_t		tx_qlen;
+
+	struct sk_buff_head	rx_frames;
+
+	unsigned		header_len;
+	struct sk_buff		*(*wrap)(struct gether *, struct sk_buff *skb);
+	int			(*unwrap)(struct gether *,
+						struct sk_buff *skb,
+						struct sk_buff_head *list);
+
+	struct work_struct	work;
+
+	unsigned long		todo;
+#define	WORK_RX_MEMORY		0
+
+	bool			zlp;
+	u8			host_mac[ETH_ALEN];
+};
+
+/*-------------------------------------------------------------------------*/
+
+#define RX_EXTRA	20	/* bytes guarding against rx overflows */
+
+#define DEFAULT_QLEN	2	/* double buffering by default */
+
+static unsigned qmult = 5;
+module_param(qmult, uint, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(qmult, "queue length multiplier at high/super speed");
+
+/* for dual-speed hardware, use deeper queues at high/super speed */
+static inline int qlen(struct usb_gadget *gadget)
+{
+	if (gadget_is_dualspeed(gadget) && (gadget->speed == USB_SPEED_HIGH ||
+					    gadget->speed == USB_SPEED_SUPER))
+		return qmult * DEFAULT_QLEN;
+	else
+		return DEFAULT_QLEN;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* REVISIT there must be a better way than having two sets
+ * of debug calls ...
+ */
+
+#undef DBG
+#undef VDBG
+#undef ERROR
+#undef INFO
+
+#define xprintk(d, level, fmt, args...) \
+	printk(level "%s: " fmt , (d)->net->name , ## args)
+
+#ifdef DEBUG
+#undef DEBUG
+#define DBG(dev, fmt, args...) \
+	xprintk(dev , KERN_DEBUG , fmt , ## args)
+#else
+#define DBG(dev, fmt, args...) \
+	do { } while (0)
+#endif /* DEBUG */
+
+#ifdef VERBOSE_DEBUG
+#define VDBG	DBG
+#else
+#define VDBG(dev, fmt, args...) \
+	do { } while (0)
+#endif /* DEBUG */
+
+#define ERROR(dev, fmt, args...) \
+	xprintk(dev , KERN_ERR , fmt , ## args)
+#define INFO(dev, fmt, args...) \
+	xprintk(dev , KERN_INFO , fmt , ## args)
+
+/*-------------------------------------------------------------------------*/
+
+/* NETWORK DRIVER HOOKUP (to the layer above this driver) */
+
+static int ueth_change_mtu(struct net_device *net, int new_mtu)
+{
+	struct eth_dev	*dev = netdev_priv(net);
+	unsigned long	flags;
+	int		status = 0;
+
+	/* don't change MTU on "live" link (peer won't know) */
+	spin_lock_irqsave(&dev->lock, flags);
+	if (dev->port_usb)
+		status = -EBUSY;
+	else if (new_mtu <= ETH_HLEN || new_mtu > ETH_FRAME_LEN)
+		status = -ERANGE;
+	else
+		net->mtu = new_mtu;
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	return status;
+}
+
+static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p)
+{
+	struct eth_dev	*dev = netdev_priv(net);
+
+	strlcpy(p->driver, "g_ether", sizeof p->driver);
+	strlcpy(p->version, UETH__VERSION, sizeof p->version);
+	strlcpy(p->fw_version, dev->gadget->name, sizeof p->fw_version);
+	strlcpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof p->bus_info);
+}
+
+/* REVISIT can also support:
+ *   - WOL (by tracking suspends and issuing remote wakeup)
+ *   - msglevel (implies updated messaging)
+ *   - ... probably more ethtool ops
+ */
+
+static const struct ethtool_ops ops = {
+	.get_drvinfo = eth_get_drvinfo,
+	.get_link = ethtool_op_get_link,
+};
+
+static void defer_kevent(struct eth_dev *dev, int flag)
+{
+	if (test_and_set_bit(flag, &dev->todo))
+		return;
+	if (!schedule_work(&dev->work))
+		ERROR(dev, "kevent %d may have been dropped\n", flag);
+	else
+		DBG(dev, "kevent %d scheduled\n", flag);
+}
+
+static void rx_complete(struct usb_ep *ep, struct usb_request *req);
+
+static int
+rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
+{
+	struct sk_buff	*skb;
+	int		retval = -ENOMEM;
+	size_t		size = 0;
+	struct usb_ep	*out;
+	unsigned long	flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	if (dev->port_usb)
+		out = dev->port_usb->out_ep;
+	else
+		out = NULL;
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	if (!out)
+		return -ENOTCONN;
+
+
+	/* Padding up to RX_EXTRA handles minor disagreements with host.
+	 * Normally we use the USB "terminate on short read" convention;
+	 * so allow up to (N*maxpacket), since that memory is normally
+	 * already allocated.  Some hardware doesn't deal well with short
+	 * reads (e.g. DMA must be N*maxpacket), so for now don't trim a
+	 * byte off the end (to force hardware errors on overflow).
+	 *
+	 * RNDIS uses internal framing, and explicitly allows senders to
+	 * pad to end-of-packet.  That's potentially nice for speed, but
+	 * means receivers can't recover lost synch on their own (because
+	 * new packets don't only start after a short RX).
+	 */
+	size += sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA;
+	size += dev->port_usb->header_len;
+	size += out->maxpacket - 1;
+	size -= size % out->maxpacket;
+
+	if (dev->port_usb->is_fixed)
+		size = max_t(size_t, size, dev->port_usb->fixed_out_len);
+
+	skb = alloc_skb(size + NET_IP_ALIGN, gfp_flags);
+	if (skb == NULL) {
+		DBG(dev, "no rx skb\n");
+		goto enomem;
+	}
+
+	/* Some platforms perform better when IP packets are aligned,
+	 * but on at least one, checksumming fails otherwise.  Note:
+	 * RNDIS headers involve variable numbers of LE32 values.
+	 */
+	skb_reserve(skb, NET_IP_ALIGN);
+
+	req->buf = skb->data;
+	req->length = size;
+	req->complete = rx_complete;
+	req->context = skb;
+
+	retval = usb_ep_queue(out, req, gfp_flags);
+	if (retval == -ENOMEM)
+enomem:
+		defer_kevent(dev, WORK_RX_MEMORY);
+	if (retval) {
+		DBG(dev, "rx submit --> %d\n", retval);
+		if (skb)
+			dev_kfree_skb_any(skb);
+		spin_lock_irqsave(&dev->req_lock, flags);
+		list_add(&req->list, &dev->rx_reqs);
+		spin_unlock_irqrestore(&dev->req_lock, flags);
+	}
+	return retval;
+}
+
+static void rx_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct sk_buff	*skb = req->context, *skb2;
+	struct eth_dev	*dev = ep->driver_data;
+	int		status = req->status;
+
+	switch (status) {
+
+	/* normal completion */
+	case 0:
+		skb_put(skb, req->actual);
+
+		if (dev->unwrap) {
+			unsigned long	flags;
+
+			spin_lock_irqsave(&dev->lock, flags);
+			if (dev->port_usb) {
+				status = dev->unwrap(dev->port_usb,
+							skb,
+							&dev->rx_frames);
+			} else {
+				dev_kfree_skb_any(skb);
+				status = -ENOTCONN;
+			}
+			spin_unlock_irqrestore(&dev->lock, flags);
+		} else {
+			skb_queue_tail(&dev->rx_frames, skb);
+		}
+		skb = NULL;
+
+		skb2 = skb_dequeue(&dev->rx_frames);
+		while (skb2) {
+			if (status < 0
+					|| ETH_HLEN > skb2->len
+					|| skb2->len > ETH_FRAME_LEN) {
+				dev->net->stats.rx_errors++;
+				dev->net->stats.rx_length_errors++;
+				DBG(dev, "rx length %d\n", skb2->len);
+				dev_kfree_skb_any(skb2);
+				goto next_frame;
+			}
+			skb2->protocol = eth_type_trans(skb2, dev->net);
+			dev->net->stats.rx_packets++;
+			dev->net->stats.rx_bytes += skb2->len;
+
+			/* no buffer copies needed, unless hardware can't
+			 * use skb buffers.
+			 */
+			status = netif_rx(skb2);
+next_frame:
+			skb2 = skb_dequeue(&dev->rx_frames);
+		}
+		break;
+
+	/* software-driven interface shutdown */
+	case -ECONNRESET:		/* unlink */
+	case -ESHUTDOWN:		/* disconnect etc */
+		VDBG(dev, "rx shutdown, code %d\n", status);
+		goto quiesce;
+
+	/* for hardware automagic (such as pxa) */
+	case -ECONNABORTED:		/* endpoint reset */
+		DBG(dev, "rx %s reset\n", ep->name);
+		defer_kevent(dev, WORK_RX_MEMORY);
+quiesce:
+		dev_kfree_skb_any(skb);
+		goto clean;
+
+	/* data overrun */
+	case -EOVERFLOW:
+		dev->net->stats.rx_over_errors++;
+		/* FALLTHROUGH */
+
+	default:
+		dev->net->stats.rx_errors++;
+		DBG(dev, "rx status %d\n", status);
+		break;
+	}
+
+	if (skb)
+		dev_kfree_skb_any(skb);
+	if (!netif_running(dev->net)) {
+clean:
+		spin_lock(&dev->req_lock);
+		list_add(&req->list, &dev->rx_reqs);
+		spin_unlock(&dev->req_lock);
+		req = NULL;
+	}
+	if (req)
+		rx_submit(dev, req, GFP_ATOMIC);
+}
+
+static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n)
+{
+	unsigned		i;
+	struct usb_request	*req;
+
+	if (!n)
+		return -ENOMEM;
+
+	/* queue/recycle up to N requests */
+	i = n;
+	list_for_each_entry(req, list, list) {
+		if (i-- == 0)
+			goto extra;
+	}
+	while (i--) {
+		req = usb_ep_alloc_request(ep, GFP_ATOMIC);
+		if (!req)
+			return list_empty(list) ? -ENOMEM : 0;
+		list_add(&req->list, list);
+	}
+	return 0;
+
+extra:
+	/* free extras */
+	for (;;) {
+		struct list_head	*next;
+
+		next = req->list.next;
+		list_del(&req->list);
+		usb_ep_free_request(ep, req);
+
+		if (next == list)
+			break;
+
+		req = container_of(next, struct usb_request, list);
+	}
+	return 0;
+}
+
+static int alloc_requests(struct eth_dev *dev, struct gether *link, unsigned n)
+{
+	int	status;
+
+	spin_lock(&dev->req_lock);
+	status = prealloc(&dev->tx_reqs, link->in_ep, n);
+	if (status < 0)
+		goto fail;
+	status = prealloc(&dev->rx_reqs, link->out_ep, n);
+	if (status < 0)
+		goto fail;
+	goto done;
+fail:
+	DBG(dev, "can't alloc requests\n");
+done:
+	spin_unlock(&dev->req_lock);
+	return status;
+}
+
+static void rx_fill(struct eth_dev *dev, gfp_t gfp_flags)
+{
+	struct usb_request	*req;
+	unsigned long		flags;
+
+	/* fill unused rxq slots with some skb */
+	spin_lock_irqsave(&dev->req_lock, flags);
+	while (!list_empty(&dev->rx_reqs)) {
+		req = container_of(dev->rx_reqs.next,
+				struct usb_request, list);
+		list_del_init(&req->list);
+		spin_unlock_irqrestore(&dev->req_lock, flags);
+
+		if (rx_submit(dev, req, gfp_flags) < 0) {
+			defer_kevent(dev, WORK_RX_MEMORY);
+			return;
+		}
+
+		spin_lock_irqsave(&dev->req_lock, flags);
+	}
+	spin_unlock_irqrestore(&dev->req_lock, flags);
+}
+
+static void eth_work(struct work_struct *work)
+{
+	struct eth_dev	*dev = container_of(work, struct eth_dev, work);
+
+	if (test_and_clear_bit(WORK_RX_MEMORY, &dev->todo)) {
+		if (netif_running(dev->net))
+			rx_fill(dev, GFP_KERNEL);
+	}
+
+	if (dev->todo)
+		DBG(dev, "work done, flags = 0x%lx\n", dev->todo);
+}
+
+static void tx_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct sk_buff	*skb = req->context;
+	struct eth_dev	*dev = ep->driver_data;
+
+	switch (req->status) {
+	default:
+		dev->net->stats.tx_errors++;
+		VDBG(dev, "tx err %d\n", req->status);
+		/* FALLTHROUGH */
+	case -ECONNRESET:		/* unlink */
+	case -ESHUTDOWN:		/* disconnect etc */
+		break;
+	case 0:
+		dev->net->stats.tx_bytes += skb->len;
+	}
+	dev->net->stats.tx_packets++;
+
+	spin_lock(&dev->req_lock);
+	list_add(&req->list, &dev->tx_reqs);
+	spin_unlock(&dev->req_lock);
+	dev_kfree_skb_any(skb);
+
+	atomic_dec(&dev->tx_qlen);
+	if (netif_carrier_ok(dev->net))
+		netif_wake_queue(dev->net);
+}
+
+static inline int is_promisc(u16 cdc_filter)
+{
+	return cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS;
+}
+
+static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
+					struct net_device *net)
+{
+	struct eth_dev		*dev = netdev_priv(net);
+	int			length = skb->len;
+	int			retval;
+	struct usb_request	*req = NULL;
+	unsigned long		flags;
+	struct usb_ep		*in;
+	u16			cdc_filter;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	if (dev->port_usb) {
+		in = dev->port_usb->in_ep;
+		cdc_filter = dev->port_usb->cdc_filter;
+	} else {
+		in = NULL;
+		cdc_filter = 0;
+	}
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	if (!in) {
+		dev_kfree_skb_any(skb);
+		return NETDEV_TX_OK;
+	}
+
+	/* apply outgoing CDC or RNDIS filters */
+	if (!is_promisc(cdc_filter)) {
+		u8		*dest = skb->data;
+
+		if (is_multicast_ether_addr(dest)) {
+			u16	type;
+
+			/* ignores USB_CDC_PACKET_TYPE_MULTICAST and host
+			 * SET_ETHERNET_MULTICAST_FILTERS requests
+			 */
+			if (is_broadcast_ether_addr(dest))
+				type = USB_CDC_PACKET_TYPE_BROADCAST;
+			else
+				type = USB_CDC_PACKET_TYPE_ALL_MULTICAST;
+			if (!(cdc_filter & type)) {
+				dev_kfree_skb_any(skb);
+				return NETDEV_TX_OK;
+			}
+		}
+		/* ignores USB_CDC_PACKET_TYPE_DIRECTED */
+	}
+
+	spin_lock_irqsave(&dev->req_lock, flags);
+	/*
+	 * this freelist can be empty if an interrupt triggered disconnect()
+	 * and reconfigured the gadget (shutting down this queue) after the
+	 * network stack decided to xmit but before we got the spinlock.
+	 */
+	if (list_empty(&dev->tx_reqs)) {
+		spin_unlock_irqrestore(&dev->req_lock, flags);
+		return NETDEV_TX_BUSY;
+	}
+
+	req = container_of(dev->tx_reqs.next, struct usb_request, list);
+	list_del(&req->list);
+
+	/* temporarily stop TX queue when the freelist empties */
+	if (list_empty(&dev->tx_reqs))
+		netif_stop_queue(net);
+	spin_unlock_irqrestore(&dev->req_lock, flags);
+
+	/* no buffer copies needed, unless the network stack did it
+	 * or the hardware can't use skb buffers.
+	 * or there's not enough space for extra headers we need
+	 */
+	if (dev->wrap) {
+		unsigned long	flags;
+
+		spin_lock_irqsave(&dev->lock, flags);
+		if (dev->port_usb)
+			skb = dev->wrap(dev->port_usb, skb);
+		spin_unlock_irqrestore(&dev->lock, flags);
+		if (!skb)
+			goto drop;
+
+		length = skb->len;
+	}
+	req->buf = skb->data;
+	req->context = skb;
+	req->complete = tx_complete;
+
+	/* NCM requires no zlp if transfer is dwNtbInMaxSize */
+	if (dev->port_usb->is_fixed &&
+	    length == dev->port_usb->fixed_in_len &&
+	    (length % in->maxpacket) == 0)
+		req->zero = 0;
+	else
+		req->zero = 1;
+
+	/* use zlp framing on tx for strict CDC-Ether conformance,
+	 * though any robust network rx path ignores extra padding.
+	 * and some hardware doesn't like to write zlps.
+	 */
+	if (req->zero && !dev->zlp && (length % in->maxpacket) == 0)
+		length++;
+
+	req->length = length;
+
+	/* throttle high/super speed IRQ rate back slightly */
+	if (gadget_is_dualspeed(dev->gadget))
+		req->no_interrupt = (dev->gadget->speed == USB_SPEED_HIGH ||
+				     dev->gadget->speed == USB_SPEED_SUPER)
+			? ((atomic_read(&dev->tx_qlen) % qmult) != 0)
+			: 0;
+
+	retval = usb_ep_queue(in, req, GFP_ATOMIC);
+	switch (retval) {
+	default:
+		DBG(dev, "tx queue err %d\n", retval);
+		break;
+	case 0:
+		net->trans_start = jiffies;
+		atomic_inc(&dev->tx_qlen);
+	}
+
+	if (retval) {
+		dev_kfree_skb_any(skb);
+drop:
+		dev->net->stats.tx_dropped++;
+		spin_lock_irqsave(&dev->req_lock, flags);
+		if (list_empty(&dev->tx_reqs))
+			netif_start_queue(net);
+		list_add(&req->list, &dev->tx_reqs);
+		spin_unlock_irqrestore(&dev->req_lock, flags);
+	}
+	return NETDEV_TX_OK;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void eth_start(struct eth_dev *dev, gfp_t gfp_flags)
+{
+	DBG(dev, "%s\n", __func__);
+
+	/* fill the rx queue */
+	rx_fill(dev, gfp_flags);
+
+	/* and open the tx floodgates */
+	atomic_set(&dev->tx_qlen, 0);
+	netif_wake_queue(dev->net);
+}
+
+static int eth_open(struct net_device *net)
+{
+	struct eth_dev	*dev = netdev_priv(net);
+	struct gether	*link;
+
+	DBG(dev, "%s\n", __func__);
+	if (netif_carrier_ok(dev->net))
+		eth_start(dev, GFP_KERNEL);
+
+	spin_lock_irq(&dev->lock);
+	link = dev->port_usb;
+	if (link && link->open)
+		link->open(link);
+	spin_unlock_irq(&dev->lock);
+
+	return 0;
+}
+
+static int eth_stop(struct net_device *net)
+{
+	struct eth_dev	*dev = netdev_priv(net);
+	unsigned long	flags;
+
+	VDBG(dev, "%s\n", __func__);
+	netif_stop_queue(net);
+
+	DBG(dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n",
+		dev->net->stats.rx_packets, dev->net->stats.tx_packets,
+		dev->net->stats.rx_errors, dev->net->stats.tx_errors
+		);
+
+	/* ensure there are no more active requests */
+	spin_lock_irqsave(&dev->lock, flags);
+	if (dev->port_usb) {
+		struct gether	*link = dev->port_usb;
+
+		if (link->close)
+			link->close(link);
+
+		/* NOTE:  we have no abort-queue primitive we could use
+		 * to cancel all pending I/O.  Instead, we disable then
+		 * reenable the endpoints ... this idiom may leave toggle
+		 * wrong, but that's a self-correcting error.
+		 *
+		 * REVISIT:  we *COULD* just let the transfers complete at
+		 * their own pace; the network stack can handle old packets.
+		 * For the moment we leave this here, since it works.
+		 */
+		usb_ep_disable(link->in_ep);
+		usb_ep_disable(link->out_ep);
+		if (netif_carrier_ok(net)) {
+			DBG(dev, "host still using in/out endpoints\n");
+			usb_ep_enable(link->in_ep);
+			usb_ep_enable(link->out_ep);
+		}
+	}
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* initial value, changed by "ifconfig usb0 hw ether xx:xx:xx:xx:xx:xx" */
+static char *dev_addr;
+module_param(dev_addr, charp, S_IRUGO);
+MODULE_PARM_DESC(dev_addr, "Device Ethernet Address");
+
+/* this address is invisible to ifconfig */
+static char *host_addr;
+module_param(host_addr, charp, S_IRUGO);
+MODULE_PARM_DESC(host_addr, "Host Ethernet Address");
+
+static int get_ether_addr(const char *str, u8 *dev_addr)
+{
+	if (str) {
+		unsigned	i;
+
+		for (i = 0; i < 6; i++) {
+			unsigned char num;
+
+			if ((*str == '.') || (*str == ':'))
+				str++;
+			num = hex_to_bin(*str++) << 4;
+			num |= hex_to_bin(*str++);
+			dev_addr [i] = num;
+		}
+		if (is_valid_ether_addr(dev_addr))
+			return 0;
+	}
+	eth_random_addr(dev_addr);
+	return 1;
+}
+
+static struct eth_dev *the_dev;
+
+static const struct net_device_ops eth_netdev_ops = {
+	.ndo_open		= eth_open,
+	.ndo_stop		= eth_stop,
+	.ndo_start_xmit		= eth_start_xmit,
+	.ndo_change_mtu		= ueth_change_mtu,
+	.ndo_set_mac_address 	= eth_mac_addr,
+	.ndo_validate_addr	= eth_validate_addr,
+};
+
+static struct device_type gadget_type = {
+	.name	= "gadget",
+};
+
+/**
+ * gether_setup_name - initialize one ethernet-over-usb link
+ * @g: gadget to associated with these links
+ * @ethaddr: NULL, or a buffer in which the ethernet address of the
+ *	host side of the link is recorded
+ * @netname: name for network device (for example, "usb")
+ * Context: may sleep
+ *
+ * This sets up the single network link that may be exported by a
+ * gadget driver using this framework.  The link layer addresses are
+ * set up using module parameters.
+ *
+ * Returns negative errno, or zero on success
+ */
+int gether_setup_name(struct usb_gadget *g, u8 ethaddr[ETH_ALEN],
+		const char *netname)
+{
+	struct eth_dev		*dev;
+	struct net_device	*net;
+	int			status;
+
+	if (the_dev)
+		return -EBUSY;
+
+	net = alloc_etherdev(sizeof *dev);
+	if (!net)
+		return -ENOMEM;
+
+	dev = netdev_priv(net);
+	spin_lock_init(&dev->lock);
+	spin_lock_init(&dev->req_lock);
+	INIT_WORK(&dev->work, eth_work);
+	INIT_LIST_HEAD(&dev->tx_reqs);
+	INIT_LIST_HEAD(&dev->rx_reqs);
+
+	skb_queue_head_init(&dev->rx_frames);
+
+	/* network device setup */
+	dev->net = net;
+	snprintf(net->name, sizeof(net->name), "%s%%d", netname);
+
+	if (get_ether_addr(dev_addr, net->dev_addr))
+		dev_warn(&g->dev,
+			"using random %s ethernet address\n", "self");
+	if (get_ether_addr(host_addr, dev->host_mac))
+		dev_warn(&g->dev,
+			"using random %s ethernet address\n", "host");
+
+	if (ethaddr)
+		memcpy(ethaddr, dev->host_mac, ETH_ALEN);
+
+	net->netdev_ops = &eth_netdev_ops;
+
+	SET_ETHTOOL_OPS(net, &ops);
+
+	dev->gadget = g;
+	SET_NETDEV_DEV(net, &g->dev);
+	SET_NETDEV_DEVTYPE(net, &gadget_type);
+
+	status = register_netdev(net);
+	if (status < 0) {
+		dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
+		free_netdev(net);
+	} else {
+		INFO(dev, "MAC %pM\n", net->dev_addr);
+		INFO(dev, "HOST MAC %pM\n", dev->host_mac);
+
+		the_dev = dev;
+
+		/* two kinds of host-initiated state changes:
+		 *  - iff DATA transfer is active, carrier is "on"
+		 *  - tx queueing enabled if open *and* carrier is "on"
+		 */
+		netif_carrier_off(net);
+	}
+
+	return status;
+}
+
+/**
+ * gether_cleanup - remove Ethernet-over-USB device
+ * Context: may sleep
+ *
+ * This is called to free all resources allocated by @gether_setup().
+ */
+void gether_cleanup(void)
+{
+	if (!the_dev)
+		return;
+
+	unregister_netdev(the_dev->net);
+	flush_work_sync(&the_dev->work);
+	free_netdev(the_dev->net);
+
+	the_dev = NULL;
+}
+
+
+/**
+ * gether_connect - notify network layer that USB link is active
+ * @link: the USB link, set up with endpoints, descriptors matching
+ *	current device speed, and any framing wrapper(s) set up.
+ * Context: irqs blocked
+ *
+ * This is called to activate endpoints and let the network layer know
+ * the connection is active ("carrier detect").  It may cause the I/O
+ * queues to open and start letting network packets flow, but will in
+ * any case activate the endpoints so that they respond properly to the
+ * USB host.
+ *
+ * Verify net_device pointer returned using IS_ERR().  If it doesn't
+ * indicate some error code (negative errno), ep->driver_data values
+ * have been overwritten.
+ */
+struct net_device *gether_connect(struct gether *link)
+{
+	struct eth_dev		*dev = the_dev;
+	int			result = 0;
+
+	if (!dev)
+		return ERR_PTR(-EINVAL);
+
+	link->in_ep->driver_data = dev;
+	result = usb_ep_enable(link->in_ep);
+	if (result != 0) {
+		DBG(dev, "enable %s --> %d\n",
+			link->in_ep->name, result);
+		goto fail0;
+	}
+
+	link->out_ep->driver_data = dev;
+	result = usb_ep_enable(link->out_ep);
+	if (result != 0) {
+		DBG(dev, "enable %s --> %d\n",
+			link->out_ep->name, result);
+		goto fail1;
+	}
+
+	if (result == 0)
+		result = alloc_requests(dev, link, qlen(dev->gadget));
+
+	if (result == 0) {
+		dev->zlp = link->is_zlp_ok;
+		DBG(dev, "qlen %d\n", qlen(dev->gadget));
+
+		dev->header_len = link->header_len;
+		dev->unwrap = link->unwrap;
+		dev->wrap = link->wrap;
+
+		spin_lock(&dev->lock);
+		dev->port_usb = link;
+		link->ioport = dev;
+		if (netif_running(dev->net)) {
+			if (link->open)
+				link->open(link);
+		} else {
+			if (link->close)
+				link->close(link);
+		}
+		spin_unlock(&dev->lock);
+
+		netif_carrier_on(dev->net);
+		if (netif_running(dev->net))
+			eth_start(dev, GFP_ATOMIC);
+
+	/* on error, disable any endpoints  */
+	} else {
+		(void) usb_ep_disable(link->out_ep);
+fail1:
+		(void) usb_ep_disable(link->in_ep);
+	}
+fail0:
+	/* caller is responsible for cleanup on error */
+	if (result < 0)
+		return ERR_PTR(result);
+	return dev->net;
+}
+
+/**
+ * gether_disconnect - notify network layer that USB link is inactive
+ * @link: the USB link, on which gether_connect() was called
+ * Context: irqs blocked
+ *
+ * This is called to deactivate endpoints and let the network layer know
+ * the connection went inactive ("no carrier").
+ *
+ * On return, the state is as if gether_connect() had never been called.
+ * The endpoints are inactive, and accordingly without active USB I/O.
+ * Pointers to endpoint descriptors and endpoint private data are nulled.
+ */
+void gether_disconnect(struct gether *link)
+{
+	struct eth_dev		*dev = link->ioport;
+	struct usb_request	*req;
+
+	WARN_ON(!dev);
+	if (!dev)
+		return;
+
+	DBG(dev, "%s\n", __func__);
+
+	netif_stop_queue(dev->net);
+	netif_carrier_off(dev->net);
+
+	/* disable endpoints, forcing (synchronous) completion
+	 * of all pending i/o.  then free the request objects
+	 * and forget about the endpoints.
+	 */
+	usb_ep_disable(link->in_ep);
+	spin_lock(&dev->req_lock);
+	while (!list_empty(&dev->tx_reqs)) {
+		req = container_of(dev->tx_reqs.next,
+					struct usb_request, list);
+		list_del(&req->list);
+
+		spin_unlock(&dev->req_lock);
+		usb_ep_free_request(link->in_ep, req);
+		spin_lock(&dev->req_lock);
+	}
+	spin_unlock(&dev->req_lock);
+	link->in_ep->driver_data = NULL;
+	link->in_ep->desc = NULL;
+
+	usb_ep_disable(link->out_ep);
+	spin_lock(&dev->req_lock);
+	while (!list_empty(&dev->rx_reqs)) {
+		req = container_of(dev->rx_reqs.next,
+					struct usb_request, list);
+		list_del(&req->list);
+
+		spin_unlock(&dev->req_lock);
+		usb_ep_free_request(link->out_ep, req);
+		spin_lock(&dev->req_lock);
+	}
+	spin_unlock(&dev->req_lock);
+	link->out_ep->driver_data = NULL;
+	link->out_ep->desc = NULL;
+
+	/* finish forgetting about this USB link episode */
+	dev->header_len = 0;
+	dev->unwrap = NULL;
+	dev->wrap = NULL;
+
+	spin_lock(&dev->lock);
+	dev->port_usb = NULL;
+	link->ioport = NULL;
+	spin_unlock(&dev->lock);
+}
diff --git a/drivers/staging/ccg/u_ether.h b/drivers/staging/ccg/u_ether.h
new file mode 100644
index 0000000..6f4a162
--- /dev/null
+++ b/drivers/staging/ccg/u_ether.h
@@ -0,0 +1,154 @@
+/*
+ * u_ether.h -- interface to USB gadget "ethernet link" utilities
+ *
+ * Copyright (C) 2003-2005,2008 David Brownell
+ * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
+ * Copyright (C) 2008 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __U_ETHER_H
+#define __U_ETHER_H
+
+#include <linux/err.h>
+#include <linux/if_ether.h>
+#include <linux/usb/composite.h>
+#include <linux/usb/cdc.h>
+
+#include "gadget_chips.h"
+
+
+/*
+ * This represents the USB side of an "ethernet" link, managed by a USB
+ * function which provides control and (maybe) framing.  Two functions
+ * in different configurations could share the same ethernet link/netdev,
+ * using different host interaction models.
+ *
+ * There is a current limitation that only one instance of this link may
+ * be present in any given configuration.  When that's a problem, network
+ * layer facilities can be used to package multiple logical links on this
+ * single "physical" one.
+ */
+struct gether {
+	struct usb_function		func;
+
+	/* updated by gether_{connect,disconnect} */
+	struct eth_dev			*ioport;
+
+	/* endpoints handle full and/or high speeds */
+	struct usb_ep			*in_ep;
+	struct usb_ep			*out_ep;
+
+	bool				is_zlp_ok;
+
+	u16				cdc_filter;
+
+	/* hooks for added framing, as needed for RNDIS and EEM. */
+	u32				header_len;
+	/* NCM requires fixed size bundles */
+	bool				is_fixed;
+	u32				fixed_out_len;
+	u32				fixed_in_len;
+	struct sk_buff			*(*wrap)(struct gether *port,
+						struct sk_buff *skb);
+	int				(*unwrap)(struct gether *port,
+						struct sk_buff *skb,
+						struct sk_buff_head *list);
+
+	/* called on network open/close */
+	void				(*open)(struct gether *);
+	void				(*close)(struct gether *);
+};
+
+#define	DEFAULT_FILTER	(USB_CDC_PACKET_TYPE_BROADCAST \
+			|USB_CDC_PACKET_TYPE_ALL_MULTICAST \
+			|USB_CDC_PACKET_TYPE_PROMISCUOUS \
+			|USB_CDC_PACKET_TYPE_DIRECTED)
+
+/* variant of gether_setup that allows customizing network device name */
+int gether_setup_name(struct usb_gadget *g, u8 ethaddr[ETH_ALEN],
+		const char *netname);
+
+/* netdev setup/teardown as directed by the gadget driver */
+/* gether_setup - initialize one ethernet-over-usb link
+ * @g: gadget to associated with these links
+ * @ethaddr: NULL, or a buffer in which the ethernet address of the
+ *	host side of the link is recorded
+ * Context: may sleep
+ *
+ * This sets up the single network link that may be exported by a
+ * gadget driver using this framework.  The link layer addresses are
+ * set up using module parameters.
+ *
+ * Returns negative errno, or zero on success
+ */
+static inline int gether_setup(struct usb_gadget *g, u8 ethaddr[ETH_ALEN])
+{
+	return gether_setup_name(g, ethaddr, "usb");
+}
+
+void gether_cleanup(void);
+
+/* connect/disconnect is handled by individual functions */
+struct net_device *gether_connect(struct gether *);
+void gether_disconnect(struct gether *);
+
+/* Some controllers can't support CDC Ethernet (ECM) ... */
+static inline bool can_support_ecm(struct usb_gadget *gadget)
+{
+	if (!gadget_supports_altsettings(gadget))
+		return false;
+
+	/* Everything else is *presumably* fine ... but this is a bit
+	 * chancy, so be **CERTAIN** there are no hardware issues with
+	 * your controller.  Add it above if it can't handle CDC.
+	 */
+	return true;
+}
+
+/* each configuration may bind one instance of an ethernet link */
+int geth_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN]);
+int ecm_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN]);
+int ncm_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN]);
+int eem_bind_config(struct usb_configuration *c);
+
+#ifdef USB_ETH_RNDIS
+
+int rndis_bind_config_vendor(struct usb_configuration *c, u8 ethaddr[ETH_ALEN],
+				u32 vendorID, const char *manufacturer);
+
+#else
+
+static inline int
+rndis_bind_config_vendor(struct usb_configuration *c, u8 ethaddr[ETH_ALEN],
+				u32 vendorID, const char *manufacturer)
+{
+	return 0;
+}
+
+#endif
+
+/**
+ * rndis_bind_config - add RNDIS network link to a configuration
+ * @c: the configuration to support the network link
+ * @ethaddr: a buffer in which the ethernet address of the host side
+ *	side of the link was recorded
+ * Context: single threaded during gadget setup
+ *
+ * Returns zero on success, else negative errno.
+ *
+ * Caller must have called @gether_setup().  Caller is also responsible
+ * for calling @gether_cleanup() before module unload.
+ */
+static inline int rndis_bind_config(struct usb_configuration *c,
+				    u8 ethaddr[ETH_ALEN])
+{
+	return rndis_bind_config_vendor(c, ethaddr, 0, NULL);
+}
+
+
+#endif /* __U_ETHER_H */
diff --git a/drivers/staging/ccg/u_serial.c b/drivers/staging/ccg/u_serial.c
new file mode 100644
index 0000000..5b3f5ff
--- /dev/null
+++ b/drivers/staging/ccg/u_serial.c
@@ -0,0 +1,1341 @@
+/*
+ * u_serial.c - utilities for USB gadget "serial port"/TTY support
+ *
+ * Copyright (C) 2003 Al Borchers (alborchers at steinerpoint.com)
+ * Copyright (C) 2008 David Brownell
+ * Copyright (C) 2008 by Nokia Corporation
+ *
+ * This code also borrows from usbserial.c, which is
+ * Copyright (C) 1999 - 2002 Greg Kroah-Hartman (greg at kroah.com)
+ * Copyright (C) 2000 Peter Berger (pberger at brimson.com)
+ * Copyright (C) 2000 Al Borchers (alborchers at steinerpoint.com)
+ *
+ * This software is distributed under the terms of the GNU General
+ * Public License ("GPL") as published by the Free Software Foundation,
+ * either version 2 of that License or (at your option) any later version.
+ */
+
+/* #define VERBOSE_DEBUG */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+
+#include "u_serial.h"
+
+
+/*
+ * This component encapsulates the TTY layer glue needed to provide basic
+ * "serial port" functionality through the USB gadget stack.  Each such
+ * port is exposed through a /dev/ttyGS* node.
+ *
+ * After initialization (gserial_setup), these TTY port devices stay
+ * available until they are removed (gserial_cleanup).  Each one may be
+ * connected to a USB function (gserial_connect), or disconnected (with
+ * gserial_disconnect) when the USB host issues a config change event.
+ * Data can only flow when the port is connected to the host.
+ *
+ * A given TTY port can be made available in multiple configurations.
+ * For example, each one might expose a ttyGS0 node which provides a
+ * login application.  In one case that might use CDC ACM interface 0,
+ * while another configuration might use interface 3 for that.  The
+ * work to handle that (including descriptor management) is not part
+ * of this component.
+ *
+ * Configurations may expose more than one TTY port.  For example, if
+ * ttyGS0 provides login service, then ttyGS1 might provide dialer access
+ * for a telephone or fax link.  And ttyGS2 might be something that just
+ * needs a simple byte stream interface for some messaging protocol that
+ * is managed in userspace ... OBEX, PTP, and MTP have been mentioned.
+ */
+
+#define PREFIX	"ttyGS"
+
+/*
+ * gserial is the lifecycle interface, used by USB functions
+ * gs_port is the I/O nexus, used by the tty driver
+ * tty_struct links to the tty/filesystem framework
+ *
+ * gserial <---> gs_port ... links will be null when the USB link is
+ * inactive; managed by gserial_{connect,disconnect}().  each gserial
+ * instance can wrap its own USB control protocol.
+ *	gserial->ioport == usb_ep->driver_data ... gs_port
+ *	gs_port->port_usb ... gserial
+ *
+ * gs_port <---> tty_struct ... links will be null when the TTY file
+ * isn't opened; managed by gs_open()/gs_close()
+ *	gserial->port_tty ... tty_struct
+ *	tty_struct->driver_data ... gserial
+ */
+
+/* RX and TX queues can buffer QUEUE_SIZE packets before they hit the
+ * next layer of buffering.  For TX that's a circular buffer; for RX
+ * consider it a NOP.  A third layer is provided by the TTY code.
+ */
+#define QUEUE_SIZE		16
+#define WRITE_BUF_SIZE		8192		/* TX only */
+
+/* circular buffer */
+struct gs_buf {
+	unsigned		buf_size;
+	char			*buf_buf;
+	char			*buf_get;
+	char			*buf_put;
+};
+
+/*
+ * The port structure holds info for each port, one for each minor number
+ * (and thus for each /dev/ node).
+ */
+struct gs_port {
+	struct tty_port		port;
+	spinlock_t		port_lock;	/* guard port_* access */
+
+	struct gserial		*port_usb;
+
+	bool			openclose;	/* open/close in progress */
+	u8			port_num;
+
+	struct list_head	read_pool;
+	int read_started;
+	int read_allocated;
+	struct list_head	read_queue;
+	unsigned		n_read;
+	struct tasklet_struct	push;
+
+	struct list_head	write_pool;
+	int write_started;
+	int write_allocated;
+	struct gs_buf		port_write_buf;
+	wait_queue_head_t	drain_wait;	/* wait while writes drain */
+
+	/* REVISIT this state ... */
+	struct usb_cdc_line_coding port_line_coding;	/* 8-N-1 etc */
+};
+
+/* increase N_PORTS if you need more */
+#define N_PORTS		4
+static struct portmaster {
+	struct mutex	lock;			/* protect open/close */
+	struct gs_port	*port;
+} ports[N_PORTS];
+static unsigned	n_ports;
+
+#define GS_CLOSE_TIMEOUT		15		/* seconds */
+
+
+
+#ifdef VERBOSE_DEBUG
+#define pr_vdebug(fmt, arg...) \
+	pr_debug(fmt, ##arg)
+#else
+#define pr_vdebug(fmt, arg...) \
+	({ if (0) pr_debug(fmt, ##arg); })
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+/* Circular Buffer */
+
+/*
+ * gs_buf_alloc
+ *
+ * Allocate a circular buffer and all associated memory.
+ */
+static int gs_buf_alloc(struct gs_buf *gb, unsigned size)
+{
+	gb->buf_buf = kmalloc(size, GFP_KERNEL);
+	if (gb->buf_buf == NULL)
+		return -ENOMEM;
+
+	gb->buf_size = size;
+	gb->buf_put = gb->buf_buf;
+	gb->buf_get = gb->buf_buf;
+
+	return 0;
+}
+
+/*
+ * gs_buf_free
+ *
+ * Free the buffer and all associated memory.
+ */
+static void gs_buf_free(struct gs_buf *gb)
+{
+	kfree(gb->buf_buf);
+	gb->buf_buf = NULL;
+}
+
+/*
+ * gs_buf_clear
+ *
+ * Clear out all data in the circular buffer.
+ */
+static void gs_buf_clear(struct gs_buf *gb)
+{
+	gb->buf_get = gb->buf_put;
+	/* equivalent to a get of all data available */
+}
+
+/*
+ * gs_buf_data_avail
+ *
+ * Return the number of bytes of data written into the circular
+ * buffer.
+ */
+static unsigned gs_buf_data_avail(struct gs_buf *gb)
+{
+	return (gb->buf_size + gb->buf_put - gb->buf_get) % gb->buf_size;
+}
+
+/*
+ * gs_buf_space_avail
+ *
+ * Return the number of bytes of space available in the circular
+ * buffer.
+ */
+static unsigned gs_buf_space_avail(struct gs_buf *gb)
+{
+	return (gb->buf_size + gb->buf_get - gb->buf_put - 1) % gb->buf_size;
+}
+
+/*
+ * gs_buf_put
+ *
+ * Copy data data from a user buffer and put it into the circular buffer.
+ * Restrict to the amount of space available.
+ *
+ * Return the number of bytes copied.
+ */
+static unsigned
+gs_buf_put(struct gs_buf *gb, const char *buf, unsigned count)
+{
+	unsigned len;
+
+	len  = gs_buf_space_avail(gb);
+	if (count > len)
+		count = len;
+
+	if (count == 0)
+		return 0;
+
+	len = gb->buf_buf + gb->buf_size - gb->buf_put;
+	if (count > len) {
+		memcpy(gb->buf_put, buf, len);
+		memcpy(gb->buf_buf, buf+len, count - len);
+		gb->buf_put = gb->buf_buf + count - len;
+	} else {
+		memcpy(gb->buf_put, buf, count);
+		if (count < len)
+			gb->buf_put += count;
+		else /* count == len */
+			gb->buf_put = gb->buf_buf;
+	}
+
+	return count;
+}
+
+/*
+ * gs_buf_get
+ *
+ * Get data from the circular buffer and copy to the given buffer.
+ * Restrict to the amount of data available.
+ *
+ * Return the number of bytes copied.
+ */
+static unsigned
+gs_buf_get(struct gs_buf *gb, char *buf, unsigned count)
+{
+	unsigned len;
+
+	len = gs_buf_data_avail(gb);
+	if (count > len)
+		count = len;
+
+	if (count == 0)
+		return 0;
+
+	len = gb->buf_buf + gb->buf_size - gb->buf_get;
+	if (count > len) {
+		memcpy(buf, gb->buf_get, len);
+		memcpy(buf+len, gb->buf_buf, count - len);
+		gb->buf_get = gb->buf_buf + count - len;
+	} else {
+		memcpy(buf, gb->buf_get, count);
+		if (count < len)
+			gb->buf_get += count;
+		else /* count == len */
+			gb->buf_get = gb->buf_buf;
+	}
+
+	return count;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* I/O glue between TTY (upper) and USB function (lower) driver layers */
+
+/*
+ * gs_alloc_req
+ *
+ * Allocate a usb_request and its buffer.  Returns a pointer to the
+ * usb_request or NULL if there is an error.
+ */
+struct usb_request *
+gs_alloc_req(struct usb_ep *ep, unsigned len, gfp_t kmalloc_flags)
+{
+	struct usb_request *req;
+
+	req = usb_ep_alloc_request(ep, kmalloc_flags);
+
+	if (req != NULL) {
+		req->length = len;
+		req->buf = kmalloc(len, kmalloc_flags);
+		if (req->buf == NULL) {
+			usb_ep_free_request(ep, req);
+			return NULL;
+		}
+	}
+
+	return req;
+}
+
+/*
+ * gs_free_req
+ *
+ * Free a usb_request and its buffer.
+ */
+void gs_free_req(struct usb_ep *ep, struct usb_request *req)
+{
+	kfree(req->buf);
+	usb_ep_free_request(ep, req);
+}
+
+/*
+ * gs_send_packet
+ *
+ * If there is data to send, a packet is built in the given
+ * buffer and the size is returned.  If there is no data to
+ * send, 0 is returned.
+ *
+ * Called with port_lock held.
+ */
+static unsigned
+gs_send_packet(struct gs_port *port, char *packet, unsigned size)
+{
+	unsigned len;
+
+	len = gs_buf_data_avail(&port->port_write_buf);
+	if (len < size)
+		size = len;
+	if (size != 0)
+		size = gs_buf_get(&port->port_write_buf, packet, size);
+	return size;
+}
+
+/*
+ * gs_start_tx
+ *
+ * This function finds available write requests, calls
+ * gs_send_packet to fill these packets with data, and
+ * continues until either there are no more write requests
+ * available or no more data to send.  This function is
+ * run whenever data arrives or write requests are available.
+ *
+ * Context: caller owns port_lock; port_usb is non-null.
+ */
+static int gs_start_tx(struct gs_port *port)
+/*
+__releases(&port->port_lock)
+__acquires(&port->port_lock)
+*/
+{
+	struct list_head	*pool = &port->write_pool;
+	struct usb_ep		*in = port->port_usb->in;
+	int			status = 0;
+	bool			do_tty_wake = false;
+
+	while (!list_empty(pool)) {
+		struct usb_request	*req;
+		int			len;
+
+		if (port->write_started >= QUEUE_SIZE)
+			break;
+
+		req = list_entry(pool->next, struct usb_request, list);
+		len = gs_send_packet(port, req->buf, in->maxpacket);
+		if (len == 0) {
+			wake_up_interruptible(&port->drain_wait);
+			break;
+		}
+		do_tty_wake = true;
+
+		req->length = len;
+		list_del(&req->list);
+		req->zero = (gs_buf_data_avail(&port->port_write_buf) == 0);
+
+		pr_vdebug(PREFIX "%d: tx len=%d, 0x%02x 0x%02x 0x%02x ...\n",
+				port->port_num, len, *((u8 *)req->buf),
+				*((u8 *)req->buf+1), *((u8 *)req->buf+2));
+
+		/* Drop lock while we call out of driver; completions
+		 * could be issued while we do so.  Disconnection may
+		 * happen too; maybe immediately before we queue this!
+		 *
+		 * NOTE that we may keep sending data for a while after
+		 * the TTY closed (dev->ioport->port_tty is NULL).
+		 */
+		spin_unlock(&port->port_lock);
+		status = usb_ep_queue(in, req, GFP_ATOMIC);
+		spin_lock(&port->port_lock);
+
+		if (status) {
+			pr_debug("%s: %s %s err %d\n",
+					__func__, "queue", in->name, status);
+			list_add(&req->list, pool);
+			break;
+		}
+
+		port->write_started++;
+
+		/* abort immediately after disconnect */
+		if (!port->port_usb)
+			break;
+	}
+
+	if (do_tty_wake && port->port.tty)
+		tty_wakeup(port->port.tty);
+	return status;
+}
+
+/*
+ * Context: caller owns port_lock, and port_usb is set
+ */
+static unsigned gs_start_rx(struct gs_port *port)
+/*
+__releases(&port->port_lock)
+__acquires(&port->port_lock)
+*/
+{
+	struct list_head	*pool = &port->read_pool;
+	struct usb_ep		*out = port->port_usb->out;
+
+	while (!list_empty(pool)) {
+		struct usb_request	*req;
+		int			status;
+		struct tty_struct	*tty;
+
+		/* no more rx if closed */
+		tty = port->port.tty;
+		if (!tty)
+			break;
+
+		if (port->read_started >= QUEUE_SIZE)
+			break;
+
+		req = list_entry(pool->next, struct usb_request, list);
+		list_del(&req->list);
+		req->length = out->maxpacket;
+
+		/* drop lock while we call out; the controller driver
+		 * may need to call us back (e.g. for disconnect)
+		 */
+		spin_unlock(&port->port_lock);
+		status = usb_ep_queue(out, req, GFP_ATOMIC);
+		spin_lock(&port->port_lock);
+
+		if (status) {
+			pr_debug("%s: %s %s err %d\n",
+					__func__, "queue", out->name, status);
+			list_add(&req->list, pool);
+			break;
+		}
+		port->read_started++;
+
+		/* abort immediately after disconnect */
+		if (!port->port_usb)
+			break;
+	}
+	return port->read_started;
+}
+
+/*
+ * RX tasklet takes data out of the RX queue and hands it up to the TTY
+ * layer until it refuses to take any more data (or is throttled back).
+ * Then it issues reads for any further data.
+ *
+ * If the RX queue becomes full enough that no usb_request is queued,
+ * the OUT endpoint may begin NAKing as soon as its FIFO fills up.
+ * So QUEUE_SIZE packets plus however many the FIFO holds (usually two)
+ * can be buffered before the TTY layer's buffers (currently 64 KB).
+ */
+static void gs_rx_push(unsigned long _port)
+{
+	struct gs_port		*port = (void *)_port;
+	struct tty_struct	*tty;
+	struct list_head	*queue = &port->read_queue;
+	bool			disconnect = false;
+	bool			do_push = false;
+
+	/* hand any queued data to the tty */
+	spin_lock_irq(&port->port_lock);
+	tty = port->port.tty;
+	while (!list_empty(queue)) {
+		struct usb_request	*req;
+
+		req = list_first_entry(queue, struct usb_request, list);
+
+		/* discard data if tty was closed */
+		if (!tty)
+			goto recycle;
+
+		/* leave data queued if tty was rx throttled */
+		if (test_bit(TTY_THROTTLED, &tty->flags))
+			break;
+
+		switch (req->status) {
+		case -ESHUTDOWN:
+			disconnect = true;
+			pr_vdebug(PREFIX "%d: shutdown\n", port->port_num);
+			break;
+
+		default:
+			/* presumably a transient fault */
+			pr_warning(PREFIX "%d: unexpected RX status %d\n",
+					port->port_num, req->status);
+			/* FALLTHROUGH */
+		case 0:
+			/* normal completion */
+			break;
+		}
+
+		/* push data to (open) tty */
+		if (req->actual) {
+			char		*packet = req->buf;
+			unsigned	size = req->actual;
+			unsigned	n;
+			int		count;
+
+			/* we may have pushed part of this packet already... */
+			n = port->n_read;
+			if (n) {
+				packet += n;
+				size -= n;
+			}
+
+			count = tty_insert_flip_string(tty, packet, size);
+			if (count)
+				do_push = true;
+			if (count != size) {
+				/* stop pushing; TTY layer can't handle more */
+				port->n_read += count;
+				pr_vdebug(PREFIX "%d: rx block %d/%d\n",
+						port->port_num,
+						count, req->actual);
+				break;
+			}
+			port->n_read = 0;
+		}
+recycle:
+		list_move(&req->list, &port->read_pool);
+		port->read_started--;
+	}
+
+	/* Push from tty to ldisc; without low_latency set this is handled by
+	 * a workqueue, so we won't get callbacks and can hold port_lock
+	 */
+	if (tty && do_push)
+		tty_flip_buffer_push(tty);
+
+
+	/* We want our data queue to become empty ASAP, keeping data
+	 * in the tty and ldisc (not here).  If we couldn't push any
+	 * this time around, there may be trouble unless there's an
+	 * implicit tty_unthrottle() call on its way...
+	 *
+	 * REVISIT we should probably add a timer to keep the tasklet
+	 * from starving ... but it's not clear that case ever happens.
+	 */
+	if (!list_empty(queue) && tty) {
+		if (!test_bit(TTY_THROTTLED, &tty->flags)) {
+			if (do_push)
+				tasklet_schedule(&port->push);
+			else
+				pr_warning(PREFIX "%d: RX not scheduled?\n",
+					port->port_num);
+		}
+	}
+
+	/* If we're still connected, refill the USB RX queue. */
+	if (!disconnect && port->port_usb)
+		gs_start_rx(port);
+
+	spin_unlock_irq(&port->port_lock);
+}
+
+static void gs_read_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct gs_port	*port = ep->driver_data;
+
+	/* Queue all received data until the tty layer is ready for it. */
+	spin_lock(&port->port_lock);
+	list_add_tail(&req->list, &port->read_queue);
+	tasklet_schedule(&port->push);
+	spin_unlock(&port->port_lock);
+}
+
+static void gs_write_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct gs_port	*port = ep->driver_data;
+
+	spin_lock(&port->port_lock);
+	list_add(&req->list, &port->write_pool);
+	port->write_started--;
+
+	switch (req->status) {
+	default:
+		/* presumably a transient fault */
+		pr_warning("%s: unexpected %s status %d\n",
+				__func__, ep->name, req->status);
+		/* FALL THROUGH */
+	case 0:
+		/* normal completion */
+		gs_start_tx(port);
+		break;
+
+	case -ESHUTDOWN:
+		/* disconnect */
+		pr_vdebug("%s: %s shutdown\n", __func__, ep->name);
+		break;
+	}
+
+	spin_unlock(&port->port_lock);
+}
+
+static void gs_free_requests(struct usb_ep *ep, struct list_head *head,
+							 int *allocated)
+{
+	struct usb_request	*req;
+
+	while (!list_empty(head)) {
+		req = list_entry(head->next, struct usb_request, list);
+		list_del(&req->list);
+		gs_free_req(ep, req);
+		if (allocated)
+			(*allocated)--;
+	}
+}
+
+static int gs_alloc_requests(struct usb_ep *ep, struct list_head *head,
+		void (*fn)(struct usb_ep *, struct usb_request *),
+		int *allocated)
+{
+	int			i;
+	struct usb_request	*req;
+	int n = allocated ? QUEUE_SIZE - *allocated : QUEUE_SIZE;
+
+	/* Pre-allocate up to QUEUE_SIZE transfers, but if we can't
+	 * do quite that many this time, don't fail ... we just won't
+	 * be as speedy as we might otherwise be.
+	 */
+	for (i = 0; i < n; i++) {
+		req = gs_alloc_req(ep, ep->maxpacket, GFP_ATOMIC);
+		if (!req)
+			return list_empty(head) ? -ENOMEM : 0;
+		req->complete = fn;
+		list_add_tail(&req->list, head);
+		if (allocated)
+			(*allocated)++;
+	}
+	return 0;
+}
+
+/**
+ * gs_start_io - start USB I/O streams
+ * @dev: encapsulates endpoints to use
+ * Context: holding port_lock; port_tty and port_usb are non-null
+ *
+ * We only start I/O when something is connected to both sides of
+ * this port.  If nothing is listening on the host side, we may
+ * be pointlessly filling up our TX buffers and FIFO.
+ */
+static int gs_start_io(struct gs_port *port)
+{
+	struct list_head	*head = &port->read_pool;
+	struct usb_ep		*ep = port->port_usb->out;
+	int			status;
+	unsigned		started;
+
+	/* Allocate RX and TX I/O buffers.  We can't easily do this much
+	 * earlier (with GFP_KERNEL) because the requests are coupled to
+	 * endpoints, as are the packet sizes we'll be using.  Different
+	 * configurations may use different endpoints with a given port;
+	 * and high speed vs full speed changes packet sizes too.
+	 */
+	status = gs_alloc_requests(ep, head, gs_read_complete,
+		&port->read_allocated);
+	if (status)
+		return status;
+
+	status = gs_alloc_requests(port->port_usb->in, &port->write_pool,
+			gs_write_complete, &port->write_allocated);
+	if (status) {
+		gs_free_requests(ep, head, &port->read_allocated);
+		return status;
+	}
+
+	/* queue read requests */
+	port->n_read = 0;
+	started = gs_start_rx(port);
+
+	/* unblock any pending writes into our circular buffer */
+	if (started) {
+		tty_wakeup(port->port.tty);
+	} else {
+		gs_free_requests(ep, head, &port->read_allocated);
+		gs_free_requests(port->port_usb->in, &port->write_pool,
+			&port->write_allocated);
+		status = -EIO;
+	}
+
+	return status;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* TTY Driver */
+
+/*
+ * gs_open sets up the link between a gs_port and its associated TTY.
+ * That link is broken *only* by TTY close(), and all driver methods
+ * know that.
+ */
+static int gs_open(struct tty_struct *tty, struct file *file)
+{
+	int		port_num = tty->index;
+	struct gs_port	*port;
+	int		status;
+
+	do {
+		mutex_lock(&ports[port_num].lock);
+		port = ports[port_num].port;
+		if (!port)
+			status = -ENODEV;
+		else {
+			spin_lock_irq(&port->port_lock);
+
+			/* already open?  Great. */
+			if (port->port.count) {
+				status = 0;
+				port->port.count++;
+
+			/* currently opening/closing? wait ... */
+			} else if (port->openclose) {
+				status = -EBUSY;
+
+			/* ... else we do the work */
+			} else {
+				status = -EAGAIN;
+				port->openclose = true;
+			}
+			spin_unlock_irq(&port->port_lock);
+		}
+		mutex_unlock(&ports[port_num].lock);
+
+		switch (status) {
+		default:
+			/* fully handled */
+			return status;
+		case -EAGAIN:
+			/* must do the work */
+			break;
+		case -EBUSY:
+			/* wait for EAGAIN task to finish */
+			msleep(1);
+			/* REVISIT could have a waitchannel here, if
+			 * concurrent open performance is important
+			 */
+			break;
+		}
+	} while (status != -EAGAIN);
+
+	/* Do the "real open" */
+	spin_lock_irq(&port->port_lock);
+
+	/* allocate circular buffer on first open */
+	if (port->port_write_buf.buf_buf == NULL) {
+
+		spin_unlock_irq(&port->port_lock);
+		status = gs_buf_alloc(&port->port_write_buf, WRITE_BUF_SIZE);
+		spin_lock_irq(&port->port_lock);
+
+		if (status) {
+			pr_debug("gs_open: ttyGS%d (%p,%p) no buffer\n",
+				port->port_num, tty, file);
+			port->openclose = false;
+			goto exit_unlock_port;
+		}
+	}
+
+	/* REVISIT if REMOVED (ports[].port NULL), abort the open
+	 * to let rmmod work faster (but this way isn't wrong).
+	 */
+
+	/* REVISIT maybe wait for "carrier detect" */
+
+	tty->driver_data = port;
+	port->port.tty = tty;
+
+	port->port.count = 1;
+	port->openclose = false;
+
+	/* if connected, start the I/O stream */
+	if (port->port_usb) {
+		struct gserial	*gser = port->port_usb;
+
+		pr_debug("gs_open: start ttyGS%d\n", port->port_num);
+		gs_start_io(port);
+
+		if (gser->connect)
+			gser->connect(gser);
+	}
+
+	pr_debug("gs_open: ttyGS%d (%p,%p)\n", port->port_num, tty, file);
+
+	status = 0;
+
+exit_unlock_port:
+	spin_unlock_irq(&port->port_lock);
+	return status;
+}
+
+static int gs_writes_finished(struct gs_port *p)
+{
+	int cond;
+
+	/* return true on disconnect or empty buffer */
+	spin_lock_irq(&p->port_lock);
+	cond = (p->port_usb == NULL) || !gs_buf_data_avail(&p->port_write_buf);
+	spin_unlock_irq(&p->port_lock);
+
+	return cond;
+}
+
+static void gs_close(struct tty_struct *tty, struct file *file)
+{
+	struct gs_port *port = tty->driver_data;
+	struct gserial	*gser;
+
+	spin_lock_irq(&port->port_lock);
+
+	if (port->port.count != 1) {
+		if (port->port.count == 0)
+			WARN_ON(1);
+		else
+			--port->port.count;
+		goto exit;
+	}
+
+	pr_debug("gs_close: ttyGS%d (%p,%p) ...\n", port->port_num, tty, file);
+
+	/* mark port as closing but in use; we can drop port lock
+	 * and sleep if necessary
+	 */
+	port->openclose = true;
+	port->port.count = 0;
+
+	gser = port->port_usb;
+	if (gser && gser->disconnect)
+		gser->disconnect(gser);
+
+	/* wait for circular write buffer to drain, disconnect, or at
+	 * most GS_CLOSE_TIMEOUT seconds; then discard the rest
+	 */
+	if (gs_buf_data_avail(&port->port_write_buf) > 0 && gser) {
+		spin_unlock_irq(&port->port_lock);
+		wait_event_interruptible_timeout(port->drain_wait,
+					gs_writes_finished(port),
+					GS_CLOSE_TIMEOUT * HZ);
+		spin_lock_irq(&port->port_lock);
+		gser = port->port_usb;
+	}
+
+	/* Iff we're disconnected, there can be no I/O in flight so it's
+	 * ok to free the circular buffer; else just scrub it.  And don't
+	 * let the push tasklet fire again until we're re-opened.
+	 */
+	if (gser == NULL)
+		gs_buf_free(&port->port_write_buf);
+	else
+		gs_buf_clear(&port->port_write_buf);
+
+	tty->driver_data = NULL;
+	port->port.tty = NULL;
+
+	port->openclose = false;
+
+	pr_debug("gs_close: ttyGS%d (%p,%p) done!\n",
+			port->port_num, tty, file);
+
+	wake_up_interruptible(&port->port.close_wait);
+exit:
+	spin_unlock_irq(&port->port_lock);
+}
+
+static int gs_write(struct tty_struct *tty, const unsigned char *buf, int count)
+{
+	struct gs_port	*port = tty->driver_data;
+	unsigned long	flags;
+	int		status;
+
+	pr_vdebug("gs_write: ttyGS%d (%p) writing %d bytes\n",
+			port->port_num, tty, count);
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	if (count)
+		count = gs_buf_put(&port->port_write_buf, buf, count);
+	/* treat count == 0 as flush_chars() */
+	if (port->port_usb)
+		status = gs_start_tx(port);
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	return count;
+}
+
+static int gs_put_char(struct tty_struct *tty, unsigned char ch)
+{
+	struct gs_port	*port = tty->driver_data;
+	unsigned long	flags;
+	int		status;
+
+	pr_vdebug("gs_put_char: (%d,%p) char=0x%x, called from %pf\n",
+		port->port_num, tty, ch, __builtin_return_address(0));
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	status = gs_buf_put(&port->port_write_buf, &ch, 1);
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	return status;
+}
+
+static void gs_flush_chars(struct tty_struct *tty)
+{
+	struct gs_port	*port = tty->driver_data;
+	unsigned long	flags;
+
+	pr_vdebug("gs_flush_chars: (%d,%p)\n", port->port_num, tty);
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	if (port->port_usb)
+		gs_start_tx(port);
+	spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+static int gs_write_room(struct tty_struct *tty)
+{
+	struct gs_port	*port = tty->driver_data;
+	unsigned long	flags;
+	int		room = 0;
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	if (port->port_usb)
+		room = gs_buf_space_avail(&port->port_write_buf);
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	pr_vdebug("gs_write_room: (%d,%p) room=%d\n",
+		port->port_num, tty, room);
+
+	return room;
+}
+
+static int gs_chars_in_buffer(struct tty_struct *tty)
+{
+	struct gs_port	*port = tty->driver_data;
+	unsigned long	flags;
+	int		chars = 0;
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	chars = gs_buf_data_avail(&port->port_write_buf);
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	pr_vdebug("gs_chars_in_buffer: (%d,%p) chars=%d\n",
+		port->port_num, tty, chars);
+
+	return chars;
+}
+
+/* undo side effects of setting TTY_THROTTLED */
+static void gs_unthrottle(struct tty_struct *tty)
+{
+	struct gs_port		*port = tty->driver_data;
+	unsigned long		flags;
+
+	spin_lock_irqsave(&port->port_lock, flags);
+	if (port->port_usb) {
+		/* Kickstart read queue processing.  We don't do xon/xoff,
+		 * rts/cts, or other handshaking with the host, but if the
+		 * read queue backs up enough we'll be NAKing OUT packets.
+		 */
+		tasklet_schedule(&port->push);
+		pr_vdebug(PREFIX "%d: unthrottle\n", port->port_num);
+	}
+	spin_unlock_irqrestore(&port->port_lock, flags);
+}
+
+static int gs_break_ctl(struct tty_struct *tty, int duration)
+{
+	struct gs_port	*port = tty->driver_data;
+	int		status = 0;
+	struct gserial	*gser;
+
+	pr_vdebug("gs_break_ctl: ttyGS%d, send break (%d) \n",
+			port->port_num, duration);
+
+	spin_lock_irq(&port->port_lock);
+	gser = port->port_usb;
+	if (gser && gser->send_break)
+		status = gser->send_break(gser, duration);
+	spin_unlock_irq(&port->port_lock);
+
+	return status;
+}
+
+static const struct tty_operations gs_tty_ops = {
+	.open =			gs_open,
+	.close =		gs_close,
+	.write =		gs_write,
+	.put_char =		gs_put_char,
+	.flush_chars =		gs_flush_chars,
+	.write_room =		gs_write_room,
+	.chars_in_buffer =	gs_chars_in_buffer,
+	.unthrottle =		gs_unthrottle,
+	.break_ctl =		gs_break_ctl,
+};
+
+/*-------------------------------------------------------------------------*/
+
+static struct tty_driver *gs_tty_driver;
+
+static int
+gs_port_alloc(unsigned port_num, struct usb_cdc_line_coding *coding)
+{
+	struct gs_port	*port;
+
+	port = kzalloc(sizeof(struct gs_port), GFP_KERNEL);
+	if (port == NULL)
+		return -ENOMEM;
+
+	tty_port_init(&port->port);
+	spin_lock_init(&port->port_lock);
+	init_waitqueue_head(&port->drain_wait);
+
+	tasklet_init(&port->push, gs_rx_push, (unsigned long) port);
+
+	INIT_LIST_HEAD(&port->read_pool);
+	INIT_LIST_HEAD(&port->read_queue);
+	INIT_LIST_HEAD(&port->write_pool);
+
+	port->port_num = port_num;
+	port->port_line_coding = *coding;
+
+	ports[port_num].port = port;
+
+	return 0;
+}
+
+/**
+ * gserial_setup - initialize TTY driver for one or more ports
+ * @g: gadget to associate with these ports
+ * @count: how many ports to support
+ * Context: may sleep
+ *
+ * The TTY stack needs to know in advance how many devices it should
+ * plan to manage.  Use this call to set up the ports you will be
+ * exporting through USB.  Later, connect them to functions based
+ * on what configuration is activated by the USB host; and disconnect
+ * them as appropriate.
+ *
+ * An example would be a two-configuration device in which both
+ * configurations expose port 0, but through different functions.
+ * One configuration could even expose port 1 while the other
+ * one doesn't.
+ *
+ * Returns negative errno or zero.
+ */
+int gserial_setup(struct usb_gadget *g, unsigned count)
+{
+	unsigned			i;
+	struct usb_cdc_line_coding	coding;
+	int				status;
+
+	if (count == 0 || count > N_PORTS)
+		return -EINVAL;
+
+	gs_tty_driver = alloc_tty_driver(count);
+	if (!gs_tty_driver)
+		return -ENOMEM;
+
+	gs_tty_driver->driver_name = "g_serial";
+	gs_tty_driver->name = PREFIX;
+	/* uses dynamically assigned dev_t values */
+
+	gs_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
+	gs_tty_driver->subtype = SERIAL_TYPE_NORMAL;
+	gs_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
+	gs_tty_driver->init_termios = tty_std_termios;
+
+	/* 9600-8-N-1 ... matches defaults expected by "usbser.sys" on
+	 * MS-Windows.  Otherwise, most of these flags shouldn't affect
+	 * anything unless we were to actually hook up to a serial line.
+	 */
+	gs_tty_driver->init_termios.c_cflag =
+			B9600 | CS8 | CREAD | HUPCL | CLOCAL;
+	gs_tty_driver->init_termios.c_ispeed = 9600;
+	gs_tty_driver->init_termios.c_ospeed = 9600;
+
+	coding.dwDTERate = cpu_to_le32(9600);
+	coding.bCharFormat = 8;
+	coding.bParityType = USB_CDC_NO_PARITY;
+	coding.bDataBits = USB_CDC_1_STOP_BITS;
+
+	tty_set_operations(gs_tty_driver, &gs_tty_ops);
+
+	/* make devices be openable */
+	for (i = 0; i < count; i++) {
+		mutex_init(&ports[i].lock);
+		status = gs_port_alloc(i, &coding);
+		if (status) {
+			count = i;
+			goto fail;
+		}
+	}
+	n_ports = count;
+
+	/* export the driver ... */
+	status = tty_register_driver(gs_tty_driver);
+	if (status) {
+		pr_err("%s: cannot register, err %d\n",
+				__func__, status);
+		goto fail;
+	}
+
+	/* ... and sysfs class devices, so mdev/udev make /dev/ttyGS* */
+	for (i = 0; i < count; i++) {
+		struct device	*tty_dev;
+
+		tty_dev = tty_register_device(gs_tty_driver, i, &g->dev);
+		if (IS_ERR(tty_dev))
+			pr_warning("%s: no classdev for port %d, err %ld\n",
+				__func__, i, PTR_ERR(tty_dev));
+	}
+
+	pr_debug("%s: registered %d ttyGS* device%s\n", __func__,
+			count, (count == 1) ? "" : "s");
+
+	return status;
+fail:
+	while (count--)
+		kfree(ports[count].port);
+	put_tty_driver(gs_tty_driver);
+	gs_tty_driver = NULL;
+	return status;
+}
+
+static int gs_closed(struct gs_port *port)
+{
+	int cond;
+
+	spin_lock_irq(&port->port_lock);
+	cond = (port->port.count == 0) && !port->openclose;
+	spin_unlock_irq(&port->port_lock);
+	return cond;
+}
+
+/**
+ * gserial_cleanup - remove TTY-over-USB driver and devices
+ * Context: may sleep
+ *
+ * This is called to free all resources allocated by @gserial_setup().
+ * Accordingly, it may need to wait until some open /dev/ files have
+ * closed.
+ *
+ * The caller must have issued @gserial_disconnect() for any ports
+ * that had previously been connected, so that there is never any
+ * I/O pending when it's called.
+ */
+void gserial_cleanup(void)
+{
+	unsigned	i;
+	struct gs_port	*port;
+
+	if (!gs_tty_driver)
+		return;
+
+	/* start sysfs and /dev/ttyGS* node removal */
+	for (i = 0; i < n_ports; i++)
+		tty_unregister_device(gs_tty_driver, i);
+
+	for (i = 0; i < n_ports; i++) {
+		/* prevent new opens */
+		mutex_lock(&ports[i].lock);
+		port = ports[i].port;
+		ports[i].port = NULL;
+		mutex_unlock(&ports[i].lock);
+
+		tasklet_kill(&port->push);
+
+		/* wait for old opens to finish */
+		wait_event(port->port.close_wait, gs_closed(port));
+
+		WARN_ON(port->port_usb != NULL);
+
+		kfree(port);
+	}
+	n_ports = 0;
+
+	tty_unregister_driver(gs_tty_driver);
+	put_tty_driver(gs_tty_driver);
+	gs_tty_driver = NULL;
+
+	pr_debug("%s: cleaned up ttyGS* support\n", __func__);
+}
+
+/**
+ * gserial_connect - notify TTY I/O glue that USB link is active
+ * @gser: the function, set up with endpoints and descriptors
+ * @port_num: which port is active
+ * Context: any (usually from irq)
+ *
+ * This is called activate endpoints and let the TTY layer know that
+ * the connection is active ... not unlike "carrier detect".  It won't
+ * necessarily start I/O queues; unless the TTY is held open by any
+ * task, there would be no point.  However, the endpoints will be
+ * activated so the USB host can perform I/O, subject to basic USB
+ * hardware flow control.
+ *
+ * Caller needs to have set up the endpoints and USB function in @dev
+ * before calling this, as well as the appropriate (speed-specific)
+ * endpoint descriptors, and also have set up the TTY driver by calling
+ * @gserial_setup().
+ *
+ * Returns negative errno or zero.
+ * On success, ep->driver_data will be overwritten.
+ */
+int gserial_connect(struct gserial *gser, u8 port_num)
+{
+	struct gs_port	*port;
+	unsigned long	flags;
+	int		status;
+
+	if (!gs_tty_driver || port_num >= n_ports)
+		return -ENXIO;
+
+	/* we "know" gserial_cleanup() hasn't been called */
+	port = ports[port_num].port;
+
+	/* activate the endpoints */
+	status = usb_ep_enable(gser->in);
+	if (status < 0)
+		return status;
+	gser->in->driver_data = port;
+
+	status = usb_ep_enable(gser->out);
+	if (status < 0)
+		goto fail_out;
+	gser->out->driver_data = port;
+
+	/* then tell the tty glue that I/O can work */
+	spin_lock_irqsave(&port->port_lock, flags);
+	gser->ioport = port;
+	port->port_usb = gser;
+
+	/* REVISIT unclear how best to handle this state...
+	 * we don't really couple it with the Linux TTY.
+	 */
+	gser->port_line_coding = port->port_line_coding;
+
+	/* REVISIT if waiting on "carrier detect", signal. */
+
+	/* if it's already open, start I/O ... and notify the serial
+	 * protocol about open/close status (connect/disconnect).
+	 */
+	if (port->port.count) {
+		pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
+		gs_start_io(port);
+		if (gser->connect)
+			gser->connect(gser);
+	} else {
+		if (gser->disconnect)
+			gser->disconnect(gser);
+	}
+
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	return status;
+
+fail_out:
+	usb_ep_disable(gser->in);
+	gser->in->driver_data = NULL;
+	return status;
+}
+
+/**
+ * gserial_disconnect - notify TTY I/O glue that USB link is inactive
+ * @gser: the function, on which gserial_connect() was called
+ * Context: any (usually from irq)
+ *
+ * This is called to deactivate endpoints and let the TTY layer know
+ * that the connection went inactive ... not unlike "hangup".
+ *
+ * On return, the state is as if gserial_connect() had never been called;
+ * there is no active USB I/O on these endpoints.
+ */
+void gserial_disconnect(struct gserial *gser)
+{
+	struct gs_port	*port = gser->ioport;
+	unsigned long	flags;
+
+	if (!port)
+		return;
+
+	/* tell the TTY glue not to do I/O here any more */
+	spin_lock_irqsave(&port->port_lock, flags);
+
+	/* REVISIT as above: how best to track this? */
+	port->port_line_coding = gser->port_line_coding;
+
+	port->port_usb = NULL;
+	gser->ioport = NULL;
+	if (port->port.count > 0 || port->openclose) {
+		wake_up_interruptible(&port->drain_wait);
+		if (port->port.tty)
+			tty_hangup(port->port.tty);
+	}
+	spin_unlock_irqrestore(&port->port_lock, flags);
+
+	/* disable endpoints, aborting down any active I/O */
+	usb_ep_disable(gser->out);
+	gser->out->driver_data = NULL;
+
+	usb_ep_disable(gser->in);
+	gser->in->driver_data = NULL;
+
+	/* finally, free any unused/unusable I/O buffers */
+	spin_lock_irqsave(&port->port_lock, flags);
+	if (port->port.count == 0 && !port->openclose)
+		gs_buf_free(&port->port_write_buf);
+	gs_free_requests(gser->out, &port->read_pool, NULL);
+	gs_free_requests(gser->out, &port->read_queue, NULL);
+	gs_free_requests(gser->in, &port->write_pool, NULL);
+
+	port->read_allocated = port->read_started =
+		port->write_allocated = port->write_started = 0;
+
+	spin_unlock_irqrestore(&port->port_lock, flags);
+}
diff --git a/drivers/staging/ccg/u_serial.h b/drivers/staging/ccg/u_serial.h
new file mode 100644
index 0000000..9b0fe64
--- /dev/null
+++ b/drivers/staging/ccg/u_serial.h
@@ -0,0 +1,65 @@
+/*
+ * u_serial.h - interface to USB gadget "serial port"/TTY utilities
+ *
+ * Copyright (C) 2008 David Brownell
+ * Copyright (C) 2008 by Nokia Corporation
+ *
+ * This software is distributed under the terms of the GNU General
+ * Public License ("GPL") as published by the Free Software Foundation,
+ * either version 2 of that License or (at your option) any later version.
+ */
+
+#ifndef __U_SERIAL_H
+#define __U_SERIAL_H
+
+#include <linux/usb/composite.h>
+#include <linux/usb/cdc.h>
+
+/*
+ * One non-multiplexed "serial" I/O port ... there can be several of these
+ * on any given USB peripheral device, if it provides enough endpoints.
+ *
+ * The "u_serial" utility component exists to do one thing:  manage TTY
+ * style I/O using the USB peripheral endpoints listed here, including
+ * hookups to sysfs and /dev for each logical "tty" device.
+ *
+ * REVISIT at least ACM could support tiocmget() if needed.
+ *
+ * REVISIT someday, allow multiplexing several TTYs over these endpoints.
+ */
+struct gserial {
+	struct usb_function		func;
+
+	/* port is managed by gserial_{connect,disconnect} */
+	struct gs_port			*ioport;
+
+	struct usb_ep			*in;
+	struct usb_ep			*out;
+
+	/* REVISIT avoid this CDC-ACM support harder ... */
+	struct usb_cdc_line_coding port_line_coding;	/* 9600-8-N-1 etc */
+
+	/* notification callbacks */
+	void (*connect)(struct gserial *p);
+	void (*disconnect)(struct gserial *p);
+	int (*send_break)(struct gserial *p, int duration);
+};
+
+/* utilities to allocate/free request and buffer */
+struct usb_request *gs_alloc_req(struct usb_ep *ep, unsigned len, gfp_t flags);
+void gs_free_req(struct usb_ep *, struct usb_request *req);
+
+/* port setup/teardown is handled by gadget driver */
+int gserial_setup(struct usb_gadget *g, unsigned n_ports);
+void gserial_cleanup(void);
+
+/* connect/disconnect is handled by individual functions */
+int gserial_connect(struct gserial *, u8 port_num);
+void gserial_disconnect(struct gserial *);
+
+/* functions are bound to configurations by a config or gadget driver */
+int acm_bind_config(struct usb_configuration *c, u8 port_num);
+int gser_bind_config(struct usb_configuration *c, u8 port_num);
+int obex_bind_config(struct usb_configuration *c, u8 port_num);
+
+#endif /* __U_SERIAL_H */
diff --git a/drivers/staging/ccg/usbstring.c b/drivers/staging/ccg/usbstring.c
new file mode 100644
index 0000000..4d25b90
--- /dev/null
+++ b/drivers/staging/ccg/usbstring.c
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2003 David Brownell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation; either version 2.1 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/string.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/nls.h>
+
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+
+
+/**
+ * usb_gadget_get_string - fill out a string descriptor 
+ * @table: of c strings encoded using UTF-8
+ * @id: string id, from low byte of wValue in get string descriptor
+ * @buf: at least 256 bytes, must be 16-bit aligned
+ *
+ * Finds the UTF-8 string matching the ID, and converts it into a
+ * string descriptor in utf16-le.
+ * Returns length of descriptor (always even) or negative errno
+ *
+ * If your driver needs stings in multiple languages, you'll probably
+ * "switch (wIndex) { ... }"  in your ep0 string descriptor logic,
+ * using this routine after choosing which set of UTF-8 strings to use.
+ * Note that US-ASCII is a strict subset of UTF-8; any string bytes with
+ * the eighth bit set will be multibyte UTF-8 characters, not ISO-8859/1
+ * characters (which are also widely used in C strings).
+ */
+int
+usb_gadget_get_string (struct usb_gadget_strings *table, int id, u8 *buf)
+{
+	struct usb_string	*s;
+	int			len;
+
+	/* descriptor 0 has the language id */
+	if (id == 0) {
+		buf [0] = 4;
+		buf [1] = USB_DT_STRING;
+		buf [2] = (u8) table->language;
+		buf [3] = (u8) (table->language >> 8);
+		return 4;
+	}
+	for (s = table->strings; s && s->s; s++)
+		if (s->id == id)
+			break;
+
+	/* unrecognized: stall. */
+	if (!s || !s->s)
+		return -EINVAL;
+
+	/* string descriptors have length, tag, then UTF16-LE text */
+	len = min ((size_t) 126, strlen (s->s));
+	len = utf8s_to_utf16s(s->s, len, UTF16_LITTLE_ENDIAN,
+			(wchar_t *) &buf[2], 126);
+	if (len < 0)
+		return -EINVAL;
+	buf [0] = (len + 1) * 2;
+	buf [1] = USB_DT_STRING;
+	return buf [0];
+}
+
-- 
1.7.12.rc3




More information about the devel mailing list