diff options
Diffstat (limited to 'drivers/usb/host/xhci-ring.c')
-rw-r--r-- | drivers/usb/host/xhci-ring.c | 365 |
1 files changed, 168 insertions, 197 deletions
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 9e90d2952760..b7517c3c8059 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -250,9 +250,7 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, * AMD 0.96 host, carry over the chain bit of the previous TRB * (which may mean the chain bit is cleared). */ - if (!(ring->type == TYPE_ISOC && - (xhci->quirks & XHCI_AMD_0x96_HOST)) && - !xhci_link_trb_quirk(xhci)) { + if (!xhci_link_chain_quirk(xhci, ring->type)) { next->link.control &= cpu_to_le32(~TRB_CHAIN); next->link.control |= cpu_to_le32(chain); } @@ -283,7 +281,7 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, * Only for transfer and command rings where driver is the producer, not for * event rings. */ -static unsigned int xhci_num_trbs_free(struct xhci_hcd *xhci, struct xhci_ring *ring) +static unsigned int xhci_num_trbs_free(struct xhci_ring *ring) { struct xhci_segment *enq_seg = ring->enq_seg; union xhci_trb *enq = ring->enqueue; @@ -654,25 +652,6 @@ static int xhci_move_dequeue_past_td(struct xhci_hcd *xhci, stream_id); return -ENODEV; } - /* - * A cancelled TD can complete with a stall if HW cached the trb. - * In this case driver can't find td, but if the ring is empty we - * can move the dequeue pointer to the current enqueue position. - * We shouldn't hit this anymore as cached cancelled TRBs are given back - * after clearing the cache, but be on the safe side and keep it anyway - */ - if (!td) { - if (list_empty(&ep_ring->td_list)) { - new_seg = ep_ring->enq_seg; - new_deq = ep_ring->enqueue; - new_cycle = ep_ring->cycle_state; - xhci_dbg(xhci, "ep ring empty, Set new dequeue = enqueue"); - goto deq_found; - } else { - xhci_warn(xhci, "Can't find new dequeue state, missing td\n"); - return -EINVAL; - } - } hw_dequeue = xhci_get_hw_deq(xhci, dev, ep_index, stream_id); new_seg = ep_ring->deq_seg; @@ -709,8 +688,6 @@ static int xhci_move_dequeue_past_td(struct xhci_hcd *xhci, } while (!cycle_found || !td_last_trb_found); -deq_found: - /* Don't update the ring cycle state for the producer (us). */ addr = xhci_trb_virt_to_dma(new_seg, new_deq); if (addr == 0) { @@ -738,7 +715,7 @@ deq_found: lower_32_bits(addr) | trb_sct | new_cycle, upper_32_bits(addr), STREAM_ID_FOR_TRB(stream_id), SLOT_ID_FOR_TRB(slot_id) | - EP_ID_FOR_TRB(ep_index) | TRB_TYPE(TRB_SET_DEQ), false); + EP_INDEX_FOR_TRB(ep_index) | TRB_TYPE(TRB_SET_DEQ), false); if (ret < 0) { xhci_free_command(xhci, cmd); return ret; @@ -783,10 +760,6 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, } } -/* - * Must be called with xhci->lock held in interrupt context, - * releases and re-acquires xhci->lock - */ static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci, struct xhci_td *cur_td, int status) { @@ -1031,13 +1004,27 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep) break; case TD_DIRTY: /* TD is cached, clear it */ case TD_HALTED: + case TD_CLEARING_CACHE_DEFERRED: + if (cached_td) { + if (cached_td->urb->stream_id != td->urb->stream_id) { + /* Multiple streams case, defer move dq */ + xhci_dbg(xhci, + "Move dq deferred: stream %u URB %p\n", + td->urb->stream_id, td->urb); + td->cancel_status = TD_CLEARING_CACHE_DEFERRED; + break; + } + + /* Should never happen, but clear the TD if it does */ + xhci_warn(xhci, + "Found multiple active URBs %p and %p in stream %u?\n", + td->urb, cached_td->urb, + td->urb->stream_id); + td_to_noop(xhci, ring, cached_td, false); + cached_td->cancel_status = TD_CLEARED; + } + td->cancel_status = TD_CLEARING_CACHE; - if (cached_td) - /* FIXME stream case, several stopped rings */ - xhci_dbg(xhci, - "Move dq past stream %u URB %p instead of stream %u URB %p\n", - td->urb->stream_id, td->urb, - cached_td->urb->stream_id, cached_td->urb); cached_td = td; break; } @@ -1057,10 +1044,16 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep) if (err) { /* Failed to move past cached td, just set cached TDs to no-op */ list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, cancelled_td_list) { - if (td->cancel_status != TD_CLEARING_CACHE) + /* + * Deferred TDs need to have the deq pointer set after the above command + * completes, so if that failed we just give up on all of them (and + * complain loudly since this could cause issues due to caching). + */ + if (td->cancel_status != TD_CLEARING_CACHE && + td->cancel_status != TD_CLEARING_CACHE_DEFERRED) continue; - xhci_dbg(xhci, "Failed to clear cancelled cached URB %p, mark clear anyway\n", - td->urb); + xhci_warn(xhci, "Failed to clear cancelled cached URB %p, mark clear anyway\n", + td->urb); td_to_noop(xhci, ring, td, false); td->cancel_status = TD_CLEARED; } @@ -1346,6 +1339,7 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, struct xhci_ep_ctx *ep_ctx; struct xhci_slot_ctx *slot_ctx; struct xhci_td *td, *tmp_td; + bool deferred = false; ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2])); @@ -1432,6 +1426,8 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, xhci_dbg(ep->xhci, "%s: Giveback cancelled URB %p TD\n", __func__, td->urb); xhci_td_cleanup(ep->xhci, td, ep_ring, td->status); + } else if (td->cancel_status == TD_CLEARING_CACHE_DEFERRED) { + deferred = true; } else { xhci_dbg(ep->xhci, "%s: Keep cancelled URB %p TD as cancel_status is %d\n", __func__, td->urb, td->cancel_status); @@ -1441,8 +1437,17 @@ cleanup: ep->ep_state &= ~SET_DEQ_PENDING; ep->queued_deq_seg = NULL; ep->queued_deq_ptr = NULL; - /* Restart any rings with pending URBs */ - ring_doorbell_for_active_rings(xhci, slot_id, ep_index); + + if (deferred) { + /* We have more streams to clear */ + xhci_dbg(ep->xhci, "%s: Pending TDs to clear, continuing with invalidation\n", + __func__); + xhci_invalidate_cancelled_tds(ep); + } else { + /* Restart any rings with pending URBs */ + xhci_dbg(ep->xhci, "%s: All TDs cleared, ring doorbell\n", __func__); + ring_doorbell_for_active_rings(xhci, slot_id, ep_index); + } } static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id, @@ -1479,8 +1484,8 @@ static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id, ring_doorbell_for_active_rings(xhci, slot_id, ep_index); } -static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id, - struct xhci_command *command, u32 cmd_comp_code) +static void xhci_handle_cmd_enable_slot(int slot_id, struct xhci_command *command, + u32 cmd_comp_code) { if (cmd_comp_code == COMP_SUCCESS) command->slot_id = slot_id; @@ -1505,8 +1510,7 @@ static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id) xhci_free_device_endpoint_resources(xhci, virt_dev, true); } -static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id, - u32 cmd_comp_code) +static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id) { struct xhci_virt_device *virt_dev; struct xhci_input_control_ctx *ctrl_ctx; @@ -1748,14 +1752,14 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3])); switch (cmd_type) { case TRB_ENABLE_SLOT: - xhci_handle_cmd_enable_slot(xhci, slot_id, cmd, cmd_comp_code); + xhci_handle_cmd_enable_slot(slot_id, cmd, cmd_comp_code); break; case TRB_DISABLE_SLOT: xhci_handle_cmd_disable_slot(xhci, slot_id); break; case TRB_CONFIG_EP: if (!cmd->completion) - xhci_handle_cmd_config_ep(xhci, slot_id, cmd_comp_code); + xhci_handle_cmd_config_ep(xhci, slot_id); break; case TRB_EVAL_CONTEXT: break; @@ -1873,9 +1877,7 @@ static void xhci_cavium_reset_phy_quirk(struct xhci_hcd *xhci) } while (!(pll_lock_check & 0x1) && --retry_count); } -static void handle_port_status(struct xhci_hcd *xhci, - struct xhci_interrupter *ir, - union xhci_trb *event) +static void handle_port_status(struct xhci_hcd *xhci, union xhci_trb *event) { struct usb_hcd *hcd; u32 port_id; @@ -2124,30 +2126,34 @@ static void xhci_clear_hub_tt_buffer(struct xhci_hcd *xhci, struct xhci_td *td, } } -/* Check if an error has halted the endpoint ring. The class driver will - * cleanup the halt for a non-default control endpoint if we indicate a stall. - * However, a babble and other errors also halt the endpoint ring, and the class - * driver won't clear the halt in that case, so we need to issue a Set Transfer - * Ring Dequeue Pointer command manually. +/* + * Check if xhci internal endpoint state has gone to a "halt" state due to an + * error or stall, including default control pipe protocol stall. + * The internal halt needs to be cleared with a reset endpoint command. + * + * External device side is also halted in functional stall cases. Class driver + * will clear the device halt with a CLEAR_FEATURE(ENDPOINT_HALT) request later. */ -static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci, - struct xhci_ep_ctx *ep_ctx, - unsigned int trb_comp_code) +static bool xhci_halted_host_endpoint(struct xhci_ep_ctx *ep_ctx, unsigned int comp_code) { - /* TRB completion codes that may require a manual halt cleanup */ - if (trb_comp_code == COMP_USB_TRANSACTION_ERROR || - trb_comp_code == COMP_BABBLE_DETECTED_ERROR || - trb_comp_code == COMP_SPLIT_TRANSACTION_ERROR) - /* The 0.95 spec says a babbling control endpoint - * is not halted. The 0.96 spec says it is. Some HW - * claims to be 0.95 compliant, but it halts the control - * endpoint anyway. Check if a babble halted the - * endpoint. + /* Stall halts both internal and device side endpoint */ + if (comp_code == COMP_STALL_ERROR) + return true; + + /* TRB completion codes that may require internal halt cleanup */ + if (comp_code == COMP_USB_TRANSACTION_ERROR || + comp_code == COMP_BABBLE_DETECTED_ERROR || + comp_code == COMP_SPLIT_TRANSACTION_ERROR) + /* + * The 0.95 spec says a babbling control endpoint is not halted. + * The 0.96 spec says it is. Some HW claims to be 0.95 + * compliant, but it halts the control endpoint anyway. + * Check endpoint context if endpoint is halted. */ if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_HALTED) - return 1; + return true; - return 0; + return false; } int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code) @@ -2317,8 +2323,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, case COMP_STOPPED_LENGTH_INVALID: goto finish_td; default: - if (!xhci_requires_manual_halt_cleanup(xhci, - ep_ctx, trb_comp_code)) + if (!xhci_halted_host_endpoint(ep_ctx, trb_comp_code)) break; xhci_dbg(xhci, "TRB error %u, halted endpoint index = %u\n", trb_comp_code, ep->ep_index); @@ -2428,7 +2433,9 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, requested = remaining; break; case COMP_STOPPED_LENGTH_INVALID: - requested = 0; + /* exclude stopped trb with invalid length from length sum */ + sum_trbs_for_length = true; + ep_trb_len = 0; remaining = 0; break; default: @@ -2524,9 +2531,8 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, goto finish_td; case COMP_STOPPED_LENGTH_INVALID: /* stopped on ep trb with invalid length, exclude it */ - ep_trb_len = 0; - remaining = 0; - break; + td->urb->actual_length = sum_trb_lengths(xhci, ep_ring, ep_trb); + goto finish_td; case COMP_USB_TRANSACTION_ERROR: if (xhci->quirks & XHCI_NO_SOFT_RETRY || (ep->err_count++ > MAX_SOFT_RETRY) || @@ -2558,6 +2564,33 @@ finish_td: return finish_td(xhci, ep, ep_ring, td, trb_comp_code); } +/* Transfer events which don't point to a transfer TRB, see xhci 4.17.4 */ +static int handle_transferless_tx_event(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, + u32 trb_comp_code) +{ + switch (trb_comp_code) { + case COMP_STALL_ERROR: + case COMP_USB_TRANSACTION_ERROR: + case COMP_INVALID_STREAM_TYPE_ERROR: + case COMP_INVALID_STREAM_ID_ERROR: + xhci_dbg(xhci, "Stream transaction error ep %u no id\n", ep->ep_index); + if (ep->err_count++ > MAX_SOFT_RETRY) + xhci_handle_halted_endpoint(xhci, ep, NULL, EP_HARD_RESET); + else + xhci_handle_halted_endpoint(xhci, ep, NULL, EP_SOFT_RESET); + break; + case COMP_RING_UNDERRUN: + case COMP_RING_OVERRUN: + case COMP_STOPPED_LENGTH_INVALID: + break; + default: + xhci_err(xhci, "Transfer event %u for unknown stream ring slot %u ep %u\n", + trb_comp_code, ep->vdev->slot_id, ep->ep_index); + return -ENODEV; + } + return 0; +} + /* * If this function returns an error condition, it means it got a Transfer * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address. @@ -2578,7 +2611,6 @@ static int handle_tx_event(struct xhci_hcd *xhci, int status = -EINPROGRESS; struct xhci_ep_ctx *ep_ctx; u32 trb_comp_code; - int td_num = 0; slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags)); ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1; @@ -2601,37 +2633,8 @@ static int handle_tx_event(struct xhci_hcd *xhci, goto err_out; } - /* Some transfer events don't always point to a trb, see xhci 4.17.4 */ - if (!ep_ring) { - switch (trb_comp_code) { - case COMP_STALL_ERROR: - case COMP_USB_TRANSACTION_ERROR: - case COMP_INVALID_STREAM_TYPE_ERROR: - case COMP_INVALID_STREAM_ID_ERROR: - xhci_dbg(xhci, "Stream transaction error ep %u no id\n", - ep_index); - if (ep->err_count++ > MAX_SOFT_RETRY) - xhci_handle_halted_endpoint(xhci, ep, NULL, - EP_HARD_RESET); - else - xhci_handle_halted_endpoint(xhci, ep, NULL, - EP_SOFT_RESET); - break; - case COMP_RING_UNDERRUN: - case COMP_RING_OVERRUN: - case COMP_STOPPED_LENGTH_INVALID: - break; - default: - xhci_err(xhci, "ERROR Transfer event for unknown stream ring slot %u ep %u\n", - slot_id, ep_index); - goto err_out; - } - return 0; - } - - /* Count current td numbers if ep->skip is set */ - if (ep->skip) - td_num += list_count_nodes(&ep_ring->td_list); + if (!ep_ring) + return handle_transferless_tx_event(xhci, ep, trb_comp_code); /* Look for common error cases */ switch (trb_comp_code) { @@ -2713,18 +2716,12 @@ static int handle_tx_event(struct xhci_hcd *xhci, * a Ring Overrun Event for IN Isoch endpoint or Ring * Underrun Event for OUT Isoch endpoint. */ - xhci_dbg(xhci, "underrun event on endpoint\n"); - if (!list_empty(&ep_ring->td_list)) - xhci_dbg(xhci, "Underrun Event for slot %u ep %d still with TDs queued?\n", - slot_id, ep_index); + xhci_dbg(xhci, "Underrun event on slot %u ep %u\n", slot_id, ep_index); if (ep->skip) break; return 0; case COMP_RING_OVERRUN: - xhci_dbg(xhci, "overrun event on endpoint\n"); - if (!list_empty(&ep_ring->td_list)) - xhci_dbg(xhci, "Overrun Event for slot %u ep %d still with TDs queued?\n", - slot_id, ep_index); + xhci_dbg(xhci, "Overrun event on slot %u ep %u\n", slot_id, ep_index); if (ep->skip) break; return 0; @@ -2791,44 +2788,17 @@ static int handle_tx_event(struct xhci_hcd *xhci, xhci_dbg(xhci, "td_list is empty while skip flag set. Clear skip flag for slot %u ep %u.\n", slot_id, ep_index); } - if (trb_comp_code == COMP_STALL_ERROR || - xhci_requires_manual_halt_cleanup(xhci, ep_ctx, - trb_comp_code)) { - xhci_handle_halted_endpoint(xhci, ep, NULL, - EP_HARD_RESET); - } - return 0; - } - /* We've skipped all the TDs on the ep ring when ep->skip set */ - if (ep->skip && td_num == 0) { - ep->skip = false; - xhci_dbg(xhci, "All tds on the ep_ring skipped. Clear skip flag for slot %u ep %u.\n", - slot_id, ep_index); - return 0; + td = NULL; + goto check_endpoint_halted; } td = list_first_entry(&ep_ring->td_list, struct xhci_td, td_list); - if (ep->skip) - td_num--; /* Is this a TRB in the currently executing TD? */ ep_seg = trb_in_td(xhci, td, ep_trb_dma, false); - /* - * Skip the Force Stopped Event. The event_trb(event_dma) of FSE - * is not in the current TD pointed by ep_ring->dequeue because - * that the hardware dequeue pointer still at the previous TRB - * of the current TD. The previous TRB maybe a Link TD or the - * last TRB of the previous TD. The command completion handle - * will take care the rest. - */ - if (!ep_seg && (trb_comp_code == COMP_STOPPED || - trb_comp_code == COMP_STOPPED_LENGTH_INVALID)) { - continue; - } - if (!ep_seg) { if (ep->skip && usb_endpoint_xfer_isoc(&td->urb->ep->desc)) { @@ -2837,6 +2807,18 @@ static int handle_tx_event(struct xhci_hcd *xhci, } /* + * Skip the Force Stopped Event. The 'ep_trb' of FSE is not in the current + * TD pointed by 'ep_ring->dequeue' because that the hardware dequeue + * pointer still at the previous TRB of the current TD. The previous TRB + * maybe a Link TD or the last TRB of the previous TD. The command + * completion handle will take care the rest. + */ + if (trb_comp_code == COMP_STOPPED || + trb_comp_code == COMP_STOPPED_LENGTH_INVALID) { + return 0; + } + + /* * Some hosts give a spurious success event after a short * transfer. Ignore it. */ @@ -2885,10 +2867,6 @@ static int handle_tx_event(struct xhci_hcd *xhci, return -ESHUTDOWN; } } - if (trb_comp_code == COMP_SHORT_PACKET) - ep_ring->last_td_was_short = true; - else - ep_ring->last_td_was_short = false; if (ep->skip) { xhci_dbg(xhci, @@ -2897,37 +2875,6 @@ static int handle_tx_event(struct xhci_hcd *xhci, ep->skip = false; } - ep_trb = &ep_seg->trbs[(ep_trb_dma - ep_seg->dma) / - sizeof(*ep_trb)]; - - trace_xhci_handle_transfer(ep_ring, - (struct xhci_generic_trb *) ep_trb); - - /* - * No-op TRB could trigger interrupts in a case where - * a URB was killed and a STALL_ERROR happens right - * after the endpoint ring stopped. Reset the halted - * endpoint. Otherwise, the endpoint remains stalled - * indefinitely. - */ - - if (trb_is_noop(ep_trb)) { - if (trb_comp_code == COMP_STALL_ERROR || - xhci_requires_manual_halt_cleanup(xhci, ep_ctx, - trb_comp_code)) - xhci_handle_halted_endpoint(xhci, ep, td, - EP_HARD_RESET); - } else { - td->status = status; - - /* update the urb's actual_length and give back to the core */ - if (usb_endpoint_xfer_control(&td->urb->ep->desc)) - process_ctrl_td(xhci, ep, ep_ring, td, ep_trb, event); - else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc)) - process_isoc_td(xhci, ep, ep_ring, td, ep_trb, event); - else - process_bulk_intr_td(xhci, ep, ep_ring, td, ep_trb, event); - } /* * If ep->skip is set, it means there are missed tds on the * endpoint ring need to take care of. @@ -2936,6 +2883,38 @@ static int handle_tx_event(struct xhci_hcd *xhci, */ } while (ep->skip); + if (trb_comp_code == COMP_SHORT_PACKET) + ep_ring->last_td_was_short = true; + else + ep_ring->last_td_was_short = false; + + ep_trb = &ep_seg->trbs[(ep_trb_dma - ep_seg->dma) / sizeof(*ep_trb)]; + trace_xhci_handle_transfer(ep_ring, (struct xhci_generic_trb *) ep_trb); + + /* + * No-op TRB could trigger interrupts in a case where a URB was killed + * and a STALL_ERROR happens right after the endpoint ring stopped. + * Reset the halted endpoint. Otherwise, the endpoint remains stalled + * indefinitely. + */ + + if (trb_is_noop(ep_trb)) + goto check_endpoint_halted; + + td->status = status; + + /* update the urb's actual_length and give back to the core */ + if (usb_endpoint_xfer_control(&td->urb->ep->desc)) + process_ctrl_td(xhci, ep, ep_ring, td, ep_trb, event); + else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc)) + process_isoc_td(xhci, ep, ep_ring, td, ep_trb, event); + else + process_bulk_intr_td(xhci, ep, ep_ring, td, ep_trb, event); + +check_endpoint_halted: + if (xhci_halted_host_endpoint(ep_ctx, trb_comp_code)) + xhci_handle_halted_endpoint(xhci, ep, td, EP_HARD_RESET); + return 0; err_out: @@ -2974,7 +2953,7 @@ static int xhci_handle_event_trb(struct xhci_hcd *xhci, struct xhci_interrupter handle_cmd_completion(xhci, &event->event_cmd); break; case TRB_PORT_STATUS: - handle_port_status(xhci, ir, event); + handle_port_status(xhci, event); break; case TRB_TRANSFER: handle_tx_event(xhci, ir, &event->trans_event); @@ -3034,8 +3013,7 @@ static void xhci_update_erst_dequeue(struct xhci_hcd *xhci, } /* Clear the interrupt pending bit for a specific interrupter. */ -static void xhci_clear_interrupt_pending(struct xhci_hcd *xhci, - struct xhci_interrupter *ir) +static void xhci_clear_interrupt_pending(struct xhci_interrupter *ir) { if (!ir->ip_autoclear) { u32 irq_pending; @@ -3056,7 +3034,7 @@ static int xhci_handle_events(struct xhci_hcd *xhci, struct xhci_interrupter *ir int err; u64 temp; - xhci_clear_interrupt_pending(xhci, ir); + xhci_clear_interrupt_pending(ir); /* Event ring hasn't been allocated yet. */ if (!ir->event_ring || !ir->event_ring->dequeue) { @@ -3229,7 +3207,7 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, if (ep_ring != xhci->cmd_ring) { new_segs = xhci_ring_expansion_needed(xhci, ep_ring, num_trbs); - } else if (xhci_num_trbs_free(xhci, ep_ring) <= num_trbs) { + } else if (xhci_num_trbs_free(ep_ring) <= num_trbs) { xhci_err(xhci, "Do not support expand command ring\n"); return -ENOMEM; } @@ -3247,9 +3225,7 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, /* If we're not dealing with 0.95 hardware or isoc rings * on AMD 0.96 host, clear the chain bit. */ - if (!xhci_link_trb_quirk(xhci) && - !(ep_ring->type == TYPE_ISOC && - (xhci->quirks & XHCI_AMD_0x96_HOST))) + if (!xhci_link_chain_quirk(xhci, ep_ring->type)) ep_ring->enqueue->link.control &= cpu_to_le32(~TRB_CHAIN); else @@ -3404,8 +3380,7 @@ static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id, xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id); } -static void check_interval(struct xhci_hcd *xhci, struct urb *urb, - struct xhci_ep_ctx *ep_ctx) +static void check_interval(struct urb *urb, struct xhci_ep_ctx *ep_ctx) { int xhci_interval; int ep_interval; @@ -3446,7 +3421,7 @@ int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct xhci_ep_ctx *ep_ctx; ep_ctx = xhci_get_ep_ctx(xhci, xhci->devs[slot_id]->out_ctx, ep_index); - check_interval(xhci, urb, ep_ctx); + check_interval(urb, ep_ctx); return xhci_queue_bulk_tx(xhci, mem_flags, urb, slot_id, ep_index); } @@ -3692,7 +3667,6 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, upper_32_bits(send_addr), length_field, field); - td->num_trbs++; addr += trb_buff_len; sent_len = trb_buff_len; @@ -3719,7 +3693,6 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, urb_priv->td[1].last_trb_seg = ring->enq_seg; field = TRB_TYPE(TRB_NORMAL) | ring->cycle_state | TRB_IOC; queue_trb(xhci, ring, 0, 0, 0, TRB_INTR_TARGET(0), field); - urb_priv->td[1].num_trbs++; } check_trb_math(urb, enqd_len); @@ -3770,7 +3743,6 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, urb_priv = urb->hcpriv; td = &urb_priv->td[0]; - td->num_trbs = num_trbs; /* * Don't give the first TRB to the hardware (by toggling the cycle bit) @@ -4091,7 +4063,6 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, goto cleanup; } td = &urb_priv->td[i]; - td->num_trbs = trbs_per_td; /* use SIA as default, if frame id is used overwrite it */ sia_frame_id = TRB_SIA; if (!(urb->transfer_flags & URB_ISO_ASAP) && @@ -4255,7 +4226,7 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags, * Check interval value. This should be done before we start to * calculate the start frame value. */ - check_interval(xhci, urb, ep_ctx); + check_interval(urb, ep_ctx); /* Calculate the start frame and put it in urb->start_frame. */ if (HCC_CFC(xhci->hcc_params) && !list_empty(&ep_ring->td_list)) { @@ -4408,7 +4379,7 @@ int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd, int slot_id, unsigned int ep_index, int suspend) { u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); - u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); + u32 trb_ep_index = EP_INDEX_FOR_TRB(ep_index); u32 type = TRB_TYPE(TRB_STOP_RING); u32 trb_suspend = SUSPEND_PORT_FOR_TRB(suspend); @@ -4421,7 +4392,7 @@ int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd, enum xhci_ep_reset_type reset_type) { u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id); - u32 trb_ep_index = EP_ID_FOR_TRB(ep_index); + u32 trb_ep_index = EP_INDEX_FOR_TRB(ep_index); u32 type = TRB_TYPE(TRB_RESET_EP); if (reset_type == EP_SOFT_RESET) |