summaryrefslogtreecommitdiff
path: root/drivers/usb/host/xhci-mem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb/host/xhci-mem.c')
-rw-r--r--drivers/usb/host/xhci-mem.c127
1 files changed, 78 insertions, 49 deletions
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index fd9e03afd91c..4e51343ddffc 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -391,49 +391,6 @@ struct xhci_ring *xhci_stream_id_to_ring(
return ep->stream_info->stream_rings[stream_id];
}
-struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
- unsigned int slot_id, unsigned int ep_index,
- unsigned int stream_id)
-{
- struct xhci_virt_ep *ep;
-
- ep = &xhci->devs[slot_id]->eps[ep_index];
- /* Common case: no streams */
- if (!(ep->ep_state & EP_HAS_STREAMS))
- return ep->ring;
-
- if (stream_id == 0) {
- xhci_warn(xhci,
- "WARN: Slot ID %u, ep index %u has streams, "
- "but URB has no stream ID.\n",
- slot_id, ep_index);
- return NULL;
- }
-
- if (stream_id < ep->stream_info->num_streams)
- return ep->stream_info->stream_rings[stream_id];
-
- xhci_warn(xhci,
- "WARN: Slot ID %u, ep index %u has "
- "stream IDs 1 to %u allocated, "
- "but stream ID %u is requested.\n",
- slot_id, ep_index,
- ep->stream_info->num_streams - 1,
- stream_id);
- return NULL;
-}
-
-/* Get the right ring for the given URB.
- * If the endpoint supports streams, boundary check the URB's stream ID.
- * If the endpoint doesn't support streams, return the singular endpoint ring.
- */
-struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
- struct urb *urb)
-{
- return xhci_triad_to_transfer_ring(xhci, urb->dev->slot_id,
- xhci_get_endpoint_index(&urb->ep->desc), urb->stream_id);
-}
-
#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
static int xhci_test_radix_tree(struct xhci_hcd *xhci,
unsigned int num_streams,
@@ -835,6 +792,27 @@ fail:
return 0;
}
+void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci,
+ struct usb_device *udev)
+{
+ struct xhci_virt_device *virt_dev;
+ struct xhci_ep_ctx *ep0_ctx;
+ struct xhci_ring *ep_ring;
+
+ virt_dev = xhci->devs[udev->slot_id];
+ ep0_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, 0);
+ ep_ring = virt_dev->eps[0].ring;
+ /*
+ * FIXME we don't keep track of the dequeue pointer very well after a
+ * Set TR dequeue pointer, so we're setting the dequeue pointer of the
+ * host to our enqueue pointer. This should only be called after a
+ * configured device has reset, so all control transfers should have
+ * been completed or cancelled before the reset.
+ */
+ ep0_ctx->deq = xhci_trb_virt_to_dma(ep_ring->enq_seg, ep_ring->enqueue);
+ ep0_ctx->deq |= ep_ring->cycle_state;
+}
+
/* Setup an xHCI virtual device for a Set Address command */
int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev)
{
@@ -1002,7 +980,7 @@ static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
return EP_INTERVAL(interval);
}
-/* The "Mult" field in the endpoint context is only set for SuperSpeed devices.
+/* The "Mult" field in the endpoint context is only set for SuperSpeed isoc eps.
* High speed endpoint descriptors can define "the number of additional
* transaction opportunities per microframe", but that goes in the Max Burst
* endpoint context field.
@@ -1010,7 +988,8 @@ static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
static inline u32 xhci_get_endpoint_mult(struct usb_device *udev,
struct usb_host_endpoint *ep)
{
- if (udev->speed != USB_SPEED_SUPER)
+ if (udev->speed != USB_SPEED_SUPER ||
+ !usb_endpoint_xfer_isoc(&ep->desc))
return 0;
return ep->ss_ep_comp.bmAttributes;
}
@@ -1090,8 +1069,18 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
/* Set up the endpoint ring */
- virt_dev->eps[ep_index].new_ring =
- xhci_ring_alloc(xhci, 1, true, mem_flags);
+ /*
+ * Isochronous endpoint ring needs bigger size because one isoc URB
+ * carries multiple packets and it will insert multiple tds to the
+ * ring.
+ * This should be replaced with dynamic ring resizing in the future.
+ */
+ if (usb_endpoint_xfer_isoc(&ep->desc))
+ virt_dev->eps[ep_index].new_ring =
+ xhci_ring_alloc(xhci, 8, true, mem_flags);
+ else
+ virt_dev->eps[ep_index].new_ring =
+ xhci_ring_alloc(xhci, 1, true, mem_flags);
if (!virt_dev->eps[ep_index].new_ring) {
/* Attempt to use the ring cache */
if (virt_dev->num_rings_cached == 0)
@@ -1102,6 +1091,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
virt_dev->num_rings_cached--;
xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring);
}
+ virt_dev->eps[ep_index].skip = false;
ep_ring = virt_dev->eps[ep_index].new_ring;
ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state;
@@ -1367,6 +1357,22 @@ struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
return command;
}
+void xhci_urb_free_priv(struct xhci_hcd *xhci, struct urb_priv *urb_priv)
+{
+ int last;
+
+ if (!urb_priv)
+ return;
+
+ last = urb_priv->length - 1;
+ if (last >= 0) {
+ int i;
+ for (i = 0; i <= last; i++)
+ kfree(urb_priv->td[i]);
+ }
+ kfree(urb_priv);
+}
+
void xhci_free_command(struct xhci_hcd *xhci,
struct xhci_command *command)
{
@@ -1566,7 +1572,7 @@ static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci, gfp_t mem_flags)
unsigned int num_tests;
int i, ret;
- num_tests = sizeof(simple_test_vector) / sizeof(simple_test_vector[0]);
+ num_tests = ARRAY_SIZE(simple_test_vector);
for (i = 0; i < num_tests; i++) {
ret = xhci_test_trb_in_td(xhci,
xhci->event_ring->first_seg,
@@ -1579,7 +1585,7 @@ static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci, gfp_t mem_flags)
return ret;
}
- num_tests = sizeof(complex_test_vector) / sizeof(complex_test_vector[0]);
+ num_tests = ARRAY_SIZE(complex_test_vector);
for (i = 0; i < num_tests; i++) {
ret = xhci_test_trb_in_td(xhci,
complex_test_vector[i].input_seg,
@@ -1595,6 +1601,29 @@ static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci, gfp_t mem_flags)
return 0;
}
+static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
+{
+ u64 temp;
+ dma_addr_t deq;
+
+ deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
+ xhci->event_ring->dequeue);
+ if (deq == 0 && !in_interrupt())
+ xhci_warn(xhci, "WARN something wrong with SW event ring "
+ "dequeue ptr.\n");
+ /* Update HC event ring dequeue pointer */
+ temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
+ temp &= ERST_PTR_MASK;
+ /* Don't clear the EHB bit (which is RW1C) because
+ * there might be more events to service.
+ */
+ temp &= ~ERST_EHB;
+ xhci_dbg(xhci, "// Write event ring dequeue pointer, "
+ "preserving EHB bit\n");
+ xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
+ &xhci->ir_set->erst_dequeue);
+}
+
int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
{