summaryrefslogtreecommitdiff
path: root/drivers/usb/host/xhci-mem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/host/xhci-mem.c')
-rw-r--r--drivers/usb/host/xhci-mem.c122
1 files changed, 112 insertions, 10 deletions
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index bffcef7a5545..d64f5724bfc4 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -22,6 +22,7 @@
#include <linux/usb.h>
#include <linux/pci.h>
+#include <linux/slab.h>
#include <linux/dmapool.h>
#include "xhci.h"
@@ -198,6 +199,31 @@ fail:
return 0;
}
+void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
+ struct xhci_virt_device *virt_dev,
+ unsigned int ep_index)
+{
+ int rings_cached;
+
+ rings_cached = virt_dev->num_rings_cached;
+ if (rings_cached < XHCI_MAX_RINGS_CACHED) {
+ virt_dev->num_rings_cached++;
+ rings_cached = virt_dev->num_rings_cached;
+ virt_dev->ring_cache[rings_cached] =
+ virt_dev->eps[ep_index].ring;
+ xhci_dbg(xhci, "Cached old ring, "
+ "%d ring%s cached\n",
+ rings_cached,
+ (rings_cached > 1) ? "s" : "");
+ } else {
+ xhci_ring_free(xhci, virt_dev->eps[ep_index].ring);
+ xhci_dbg(xhci, "Ring cache full (%d rings), "
+ "freeing ring\n",
+ virt_dev->num_rings_cached);
+ }
+ virt_dev->eps[ep_index].ring = NULL;
+}
+
/* Zero an endpoint ring (except for link TRBs) and move the enqueue and dequeue
* pointers to the beginning of the ring.
*/
@@ -242,6 +268,8 @@ struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
void xhci_free_container_ctx(struct xhci_hcd *xhci,
struct xhci_container_ctx *ctx)
{
+ if (!ctx)
+ return;
dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma);
kfree(ctx);
}
@@ -427,7 +455,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
case USB_SPEED_LOW:
slot_ctx->dev_info |= (u32) SLOT_SPEED_LS;
break;
- case USB_SPEED_VARIABLE:
+ case USB_SPEED_WIRELESS:
xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
return -EINVAL;
break;
@@ -471,7 +499,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
case USB_SPEED_LOW:
ep0_ctx->ep_info2 |= MAX_PACKET(8);
break;
- case USB_SPEED_VARIABLE:
+ case USB_SPEED_WIRELESS:
xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
return -EINVAL;
break;
@@ -539,8 +567,13 @@ static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
if (interval < 3)
interval = 3;
if ((1 << interval) != 8*ep->desc.bInterval)
- dev_warn(&udev->dev, "ep %#x - rounding interval to %d microframes\n",
- ep->desc.bEndpointAddress, 1 << interval);
+ dev_warn(&udev->dev,
+ "ep %#x - rounding interval"
+ " to %d microframes, "
+ "ep desc says %d microframes\n",
+ ep->desc.bEndpointAddress,
+ 1 << interval,
+ 8*ep->desc.bInterval);
}
break;
default:
@@ -549,6 +582,19 @@ static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
return EP_INTERVAL(interval);
}
+/* The "Mult" field in the endpoint context is only set for SuperSpeed devices.
+ * High speed endpoint descriptors can define "the number of additional
+ * transaction opportunities per microframe", but that goes in the Max Burst
+ * endpoint context field.
+ */
+static inline u32 xhci_get_endpoint_mult(struct usb_device *udev,
+ struct usb_host_endpoint *ep)
+{
+ if (udev->speed != USB_SPEED_SUPER || !ep->ss_ep_comp)
+ return 0;
+ return ep->ss_ep_comp->desc.bmAttributes;
+}
+
static inline u32 xhci_get_endpoint_type(struct usb_device *udev,
struct usb_host_endpoint *ep)
{
@@ -579,6 +625,36 @@ static inline u32 xhci_get_endpoint_type(struct usb_device *udev,
return type;
}
+/* Return the maximum endpoint service interval time (ESIT) payload.
+ * Basically, this is the maxpacket size, multiplied by the burst size
+ * and mult size.
+ */
+static inline u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci,
+ struct usb_device *udev,
+ struct usb_host_endpoint *ep)
+{
+ int max_burst;
+ int max_packet;
+
+ /* Only applies for interrupt or isochronous endpoints */
+ if (usb_endpoint_xfer_control(&ep->desc) ||
+ usb_endpoint_xfer_bulk(&ep->desc))
+ return 0;
+
+ if (udev->speed == USB_SPEED_SUPER) {
+ if (ep->ss_ep_comp)
+ return ep->ss_ep_comp->desc.wBytesPerInterval;
+ xhci_warn(xhci, "WARN no SS endpoint companion descriptor.\n");
+ /* Assume no bursts, no multiple opportunities to send. */
+ return ep->desc.wMaxPacketSize;
+ }
+
+ max_packet = ep->desc.wMaxPacketSize & 0x3ff;
+ max_burst = (ep->desc.wMaxPacketSize & 0x1800) >> 11;
+ /* A 0 in max burst means 1 transfer per ESIT */
+ return max_packet * (max_burst + 1);
+}
+
int xhci_endpoint_init(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev,
struct usb_device *udev,
@@ -590,6 +666,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
struct xhci_ring *ep_ring;
unsigned int max_packet;
unsigned int max_burst;
+ u32 max_esit_payload;
ep_index = xhci_get_endpoint_index(&ep->desc);
ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
@@ -611,6 +688,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state;
ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep);
+ ep_ctx->ep_info |= EP_MULT(xhci_get_endpoint_mult(udev, ep));
/* FIXME dig Mult and streams info out of ep companion desc */
@@ -656,6 +734,26 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
default:
BUG();
}
+ max_esit_payload = xhci_get_max_esit_payload(xhci, udev, ep);
+ ep_ctx->tx_info = MAX_ESIT_PAYLOAD_FOR_EP(max_esit_payload);
+
+ /*
+ * XXX no idea how to calculate the average TRB buffer length for bulk
+ * endpoints, as the driver gives us no clue how big each scatter gather
+ * list entry (or buffer) is going to be.
+ *
+ * For isochronous and interrupt endpoints, we set it to the max
+ * available, until we have new API in the USB core to allow drivers to
+ * declare how much bandwidth they actually need.
+ *
+ * Normally, it would be calculated by taking the total of the buffer
+ * lengths in the TD and then dividing by the number of TRBs in a TD,
+ * including link TRBs, No-op TRBs, and Event data TRBs. Since we don't
+ * use Event Data TRBs, and we don't chain in a link TRB on short
+ * transfers, we're basically dividing by 1.
+ */
+ ep_ctx->tx_info |= AVG_TRB_LENGTH_FOR_EP(max_esit_payload);
+
/* FIXME Debug endpoint context */
return 0;
}
@@ -819,7 +917,8 @@ static void scratchpad_free(struct xhci_hcd *xhci)
}
struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
- bool allocate_completion, gfp_t mem_flags)
+ bool allocate_in_ctx, bool allocate_completion,
+ gfp_t mem_flags)
{
struct xhci_command *command;
@@ -827,11 +926,14 @@ struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
if (!command)
return NULL;
- command->in_ctx =
- xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, mem_flags);
- if (!command->in_ctx) {
- kfree(command);
- return NULL;
+ if (allocate_in_ctx) {
+ command->in_ctx =
+ xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT,
+ mem_flags);
+ if (!command->in_ctx) {
+ kfree(command);
+ return NULL;
+ }
}
if (allocate_completion) {