From b374487ebe8a91d03c07fe361af375e09285c3b0 Mon Sep 17 00:00:00 2001 From: Thomas Pugliese Date: Mon, 25 Nov 2013 16:17:16 -0600 Subject: usb: wusbcore: add calls to usb_hcd_link_urb_to_ep, usb_hcd_unlink_urb_from_ep, and Add calls to usb_hcd_link_urb_to_ep, usb_hcd_unlink_urb_from_ep, and usb_hcd_check_unlink_urb in the appropriate locations. Signed-off-by: Thomas Pugliese Signed-off-by: Greg Kroah-Hartman --- drivers/usb/wusbcore/wa-xfer.c | 26 ++++++++++++++++++++++++-- 1 file changed, 24 insertions(+), 2 deletions(-) (limited to 'drivers/usb/wusbcore/wa-xfer.c') diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c index ed5abe87b049..5957e486591a 100644 --- a/drivers/usb/wusbcore/wa-xfer.c +++ b/drivers/usb/wusbcore/wa-xfer.c @@ -282,6 +282,7 @@ static void wa_xfer_giveback(struct wa_xfer *xfer) spin_lock_irqsave(&xfer->wa->xfer_list_lock, flags); list_del_init(&xfer->list_node); + usb_hcd_unlink_urb_from_ep(&(xfer->wa->wusb->usb_hcd), xfer->urb); spin_unlock_irqrestore(&xfer->wa->xfer_list_lock, flags); /* FIXME: segmentation broken -- kills DWA */ wusbhc_giveback_urb(xfer->wa->wusb, xfer->urb, xfer->result); @@ -1730,6 +1731,12 @@ int wa_urb_enqueue(struct wahc *wa, struct usb_host_endpoint *ep, dump_stack(); } + spin_lock_irqsave(&wa->xfer_list_lock, my_flags); + result = usb_hcd_link_urb_to_ep(&(wa->wusb->usb_hcd), urb); + spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags); + if (result < 0) + goto error_link_urb; + result = -ENOMEM; xfer = kzalloc(sizeof(*xfer), gfp); if (xfer == NULL) @@ -1769,6 +1776,9 @@ int wa_urb_enqueue(struct wahc *wa, struct usb_host_endpoint *ep, __func__, result); wa_put(xfer->wa); wa_xfer_put(xfer); + spin_lock_irqsave(&wa->xfer_list_lock, my_flags); + usb_hcd_unlink_urb_from_ep(&(wa->wusb->usb_hcd), urb); + spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags); return result; } } @@ -1777,6 +1787,10 @@ int wa_urb_enqueue(struct wahc *wa, struct usb_host_endpoint *ep, error_dequeued: kfree(xfer); error_kmalloc: + spin_lock_irqsave(&wa->xfer_list_lock, my_flags); + usb_hcd_unlink_urb_from_ep(&(wa->wusb->usb_hcd), urb); + spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags); +error_link_urb: return result; } EXPORT_SYMBOL_GPL(wa_urb_enqueue); @@ -1799,7 +1813,7 @@ EXPORT_SYMBOL_GPL(wa_urb_enqueue); * asynch request] and then make sure we cancel each segment. * */ -int wa_urb_dequeue(struct wahc *wa, struct urb *urb) +int wa_urb_dequeue(struct wahc *wa, struct urb *urb, int status) { unsigned long flags, flags2; struct wa_xfer *xfer; @@ -1807,6 +1821,14 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb) struct wa_rpipe *rpipe; unsigned cnt, done = 0, xfer_abort_pending; unsigned rpipe_ready = 0; + int result; + + /* check if it is safe to unlink. */ + spin_lock_irqsave(&wa->xfer_list_lock, flags); + result = usb_hcd_check_unlink_urb(&(wa->wusb->usb_hcd), urb, status); + spin_unlock_irqrestore(&wa->xfer_list_lock, flags); + if (result) + return result; xfer = urb->hcpriv; if (xfer == NULL) { @@ -2172,7 +2194,7 @@ error_complete: error_bad_seg: spin_unlock_irqrestore(&xfer->lock, flags); - wa_urb_dequeue(wa, xfer->urb); + wa_urb_dequeue(wa, xfer->urb, -ENOENT); if (printk_ratelimit()) dev_err(dev, "xfer %p#%u: bad segment\n", xfer, seg_idx); if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) { -- cgit v1.2.3 From bbfc34201fffd8a41c2ecbad2b8fb3bf00d7ee74 Mon Sep 17 00:00:00 2001 From: Thomas Pugliese Date: Mon, 25 Nov 2013 16:17:17 -0600 Subject: usb: wusbcore: add more info to debug prints in urb_unlink path Add more info to debug prints in urb_unlink path Signed-off-by: Thomas Pugliese Signed-off-by: Greg Kroah-Hartman --- drivers/usb/wusbcore/wa-xfer.c | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) (limited to 'drivers/usb/wusbcore/wa-xfer.c') diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c index 5957e486591a..3220c62de317 100644 --- a/drivers/usb/wusbcore/wa-xfer.c +++ b/drivers/usb/wusbcore/wa-xfer.c @@ -373,10 +373,10 @@ static unsigned __wa_xfer_is_done(struct wa_xfer *xfer) seg->result); goto out; case WA_SEG_ABORTED: - dev_dbg(dev, "xfer %p ID %08X#%u ABORTED: result %d\n", - xfer, wa_xfer_id(xfer), seg->index, - urb->status); - xfer->result = urb->status; + xfer->result = seg->result; + dev_dbg(dev, "xfer %p ID %08X#%u: ABORTED result %zu(0x%08zX)\n", + xfer, wa_xfer_id(xfer), seg->index, seg->result, + seg->result); goto out; default: dev_warn(dev, "xfer %p ID %08X#%u: is_done bad state %d\n", @@ -1568,7 +1568,8 @@ static int wa_urb_enqueue_b(struct wa_xfer *xfer) wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, urb->dev); if (wusb_dev == NULL) { mutex_unlock(&wusbhc->mutex); - pr_err("%s: error wusb dev gone\n", __func__); + dev_err(&(urb->dev->dev), "%s: error wusb dev gone\n", + __func__); goto error_dev_gone; } mutex_unlock(&wusbhc->mutex); @@ -1577,18 +1578,18 @@ static int wa_urb_enqueue_b(struct wa_xfer *xfer) xfer->wusb_dev = wusb_dev; result = urb->status; if (urb->status != -EINPROGRESS) { - pr_err("%s: error_dequeued\n", __func__); + dev_err(&(urb->dev->dev), "%s: error_dequeued\n", __func__); goto error_dequeued; } result = __wa_xfer_setup(xfer, urb); if (result < 0) { - pr_err("%s: error_xfer_setup\n", __func__); + dev_err(&(urb->dev->dev), "%s: error_xfer_setup\n", __func__); goto error_xfer_setup; } result = __wa_xfer_submit(xfer); if (result < 0) { - pr_err("%s: error_xfer_submit\n", __func__); + dev_err(&(urb->dev->dev), "%s: error_xfer_submit\n", __func__); goto error_xfer_submit; } spin_unlock_irqrestore(&xfer->lock, flags); @@ -1844,8 +1845,8 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb, int status) pr_debug("%s: DEQUEUE xfer id 0x%08X\n", __func__, wa_xfer_id(xfer)); rpipe = xfer->ep->hcpriv; if (rpipe == NULL) { - pr_debug("%s: xfer id 0x%08X has no RPIPE. %s", - __func__, wa_xfer_id(xfer), + pr_debug("%s: xfer %p id 0x%08X has no RPIPE. %s", + __func__, xfer, wa_xfer_id(xfer), "Probably already aborted.\n" ); goto out_unlock; } -- cgit v1.2.3 From e05a1fd9468bc99bf67bd81601d46d84d93c21c8 Mon Sep 17 00:00:00 2001 From: Thomas Pugliese Date: Mon, 25 Nov 2013 16:17:18 -0600 Subject: usb: wusbcore: return -ENOENT for unlinked URBs. Return -ENOENT for unlinked URBs. Signed-off-by: Thomas Pugliese Signed-off-by: Greg Kroah-Hartman --- drivers/usb/wusbcore/wa-xfer.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'drivers/usb/wusbcore/wa-xfer.c') diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c index 3220c62de317..a88b8c68ce78 100644 --- a/drivers/usb/wusbcore/wa-xfer.c +++ b/drivers/usb/wusbcore/wa-xfer.c @@ -1848,6 +1848,7 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb, int status) pr_debug("%s: xfer %p id 0x%08X has no RPIPE. %s", __func__, xfer, wa_xfer_id(xfer), "Probably already aborted.\n" ); + result = -ENOENT; goto out_unlock; } /* Check the delayed list -> if there, release and complete */ @@ -1878,6 +1879,7 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb, int status) * segments will be completed in the DTI interrupt. */ seg->status = WA_SEG_ABORTED; + seg->result = -ENOENT; spin_lock_irqsave(&rpipe->seg_lock, flags2); list_del(&seg->list_node); xfer->segs_done++; @@ -1917,12 +1919,12 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb, int status) wa_xfer_completion(xfer); if (rpipe_ready) wa_xfer_delayed_run(rpipe); - return 0; + return result; out_unlock: spin_unlock_irqrestore(&xfer->lock, flags); out: - return 0; + return result; dequeue_delayed: list_del_init(&xfer->list_node); @@ -1958,7 +1960,7 @@ static int wa_xfer_status_to_errno(u8 status) [WA_XFER_STATUS_NOT_FOUND] = 0, [WA_XFER_STATUS_INSUFFICIENT_RESOURCE] = -ENOMEM, [WA_XFER_STATUS_TRANSACTION_ERROR] = -EILSEQ, - [WA_XFER_STATUS_ABORTED] = -EINTR, + [WA_XFER_STATUS_ABORTED] = -ENOENT, [WA_XFER_STATUS_RPIPE_NOT_READY] = EINVAL, [WA_XFER_INVALID_FORMAT] = EINVAL, [WA_XFER_UNEXPECTED_SEGMENT_NUMBER] = EINVAL, -- cgit v1.2.3 From 7005234c18f233d3613b09e9ce4b6ce6977bf246 Mon Sep 17 00:00:00 2001 From: Thomas Pugliese Date: Mon, 9 Dec 2013 13:10:41 -0600 Subject: usb: wusbcore: fix short transfers If a URB is broken up into multiple transfer segments and a short transfer occurs in any segment other than the last, the URB will currently get stuck in the driver forever. This patch adds a check for a short transfer and cleans up any pending segments so the URB can complete properly. Signed-off-by: Thomas Pugliese Signed-off-by: Greg Kroah-Hartman --- drivers/usb/wusbcore/wa-xfer.c | 128 ++++++++++++++++++++++++----------------- 1 file changed, 74 insertions(+), 54 deletions(-) (limited to 'drivers/usb/wusbcore/wa-xfer.c') diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c index a88b8c68ce78..673ad80c1b55 100644 --- a/drivers/usb/wusbcore/wa-xfer.c +++ b/drivers/usb/wusbcore/wa-xfer.c @@ -1993,7 +1993,7 @@ static int wa_xfer_status_to_errno(u8 status) * the xfer will complete cleanly. */ static void wa_complete_remaining_xfer_segs(struct wa_xfer *xfer, - struct wa_seg *incoming_seg) + struct wa_seg *incoming_seg, enum wa_seg_status status) { int index; struct wa_rpipe *rpipe = xfer->ep->hcpriv; @@ -2015,7 +2015,7 @@ static void wa_complete_remaining_xfer_segs(struct wa_xfer *xfer, */ case WA_SEG_DELAYED: xfer->segs_done++; - current_seg->status = incoming_seg->status; + current_seg->status = status; break; case WA_SEG_ABORTED: break; @@ -2028,6 +2028,58 @@ static void wa_complete_remaining_xfer_segs(struct wa_xfer *xfer, } } +/* Populate the wa->buf_in_urb based on the current transfer state. */ +static int wa_populate_buf_in_urb(struct wahc *wa, struct wa_xfer *xfer, + unsigned int seg_idx, unsigned int bytes_transferred) +{ + int result = 0; + struct wa_seg *seg = xfer->seg[seg_idx]; + + BUG_ON(wa->buf_in_urb->status == -EINPROGRESS); + /* this should always be 0 before a resubmit. */ + wa->buf_in_urb->num_mapped_sgs = 0; + + if (xfer->is_dma) { + wa->buf_in_urb->transfer_dma = xfer->urb->transfer_dma + + (seg_idx * xfer->seg_size); + wa->buf_in_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; + wa->buf_in_urb->transfer_buffer = NULL; + wa->buf_in_urb->sg = NULL; + wa->buf_in_urb->num_sgs = 0; + } else { + /* do buffer or SG processing. */ + wa->buf_in_urb->transfer_flags &= ~URB_NO_TRANSFER_DMA_MAP; + + if (xfer->urb->transfer_buffer) { + wa->buf_in_urb->transfer_buffer = + xfer->urb->transfer_buffer + + (seg_idx * xfer->seg_size); + wa->buf_in_urb->sg = NULL; + wa->buf_in_urb->num_sgs = 0; + } else { + /* allocate an SG list to store seg_size bytes + and copy the subset of the xfer->urb->sg + that matches the buffer subset we are + about to read. */ + wa->buf_in_urb->sg = wa_xfer_create_subset_sg( + xfer->urb->sg, + seg_idx * xfer->seg_size, + bytes_transferred, + &(wa->buf_in_urb->num_sgs)); + + if (!(wa->buf_in_urb->sg)) { + wa->buf_in_urb->num_sgs = 0; + result = -ENOMEM; + } + wa->buf_in_urb->transfer_buffer = NULL; + } + } + wa->buf_in_urb->transfer_buffer_length = bytes_transferred; + wa->buf_in_urb->context = seg; + + return result; +} + /* * Process a xfer result completion message * @@ -2041,12 +2093,13 @@ static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer, int result; struct device *dev = &wa->usb_iface->dev; unsigned long flags; - u8 seg_idx; + unsigned int seg_idx; struct wa_seg *seg; struct wa_rpipe *rpipe; unsigned done = 0; u8 usb_status; unsigned rpipe_ready = 0; + unsigned bytes_transferred = le32_to_cpu(xfer_result->dwTransferLength); spin_lock_irqsave(&xfer->lock, flags); seg_idx = xfer_result->bTransferSegment & 0x7f; @@ -2079,66 +2132,33 @@ static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer, /* FIXME: we ignore warnings, tally them for stats */ if (usb_status & 0x40) /* Warning?... */ usb_status = 0; /* ... pass */ + /* + * If the last segment bit is set, complete the remaining segments. + * When the current segment is completed, either in wa_buf_in_cb for + * transfers with data or below for no data, the xfer will complete. + */ + if (xfer_result->bTransferSegment & 0x80) + wa_complete_remaining_xfer_segs(xfer, seg, WA_SEG_DONE); if (usb_pipeisoc(xfer->urb->pipe)) { /* set up WA state to read the isoc packet status next. */ wa->dti_isoc_xfer_in_progress = wa_xfer_id(xfer); wa->dti_isoc_xfer_seg = seg_idx; wa->dti_state = WA_DTI_ISOC_PACKET_STATUS_PENDING; - } else if (xfer->is_inbound) { /* IN data phase: read to buffer */ + } else if ((xfer->is_inbound) + && (bytes_transferred > 0)) { + /* IN data phase: read to buffer */ seg->status = WA_SEG_DTI_PENDING; - BUG_ON(wa->buf_in_urb->status == -EINPROGRESS); - /* this should always be 0 before a resubmit. */ - wa->buf_in_urb->num_mapped_sgs = 0; - - if (xfer->is_dma) { - wa->buf_in_urb->transfer_dma = - xfer->urb->transfer_dma - + (seg_idx * xfer->seg_size); - wa->buf_in_urb->transfer_flags - |= URB_NO_TRANSFER_DMA_MAP; - wa->buf_in_urb->transfer_buffer = NULL; - wa->buf_in_urb->sg = NULL; - wa->buf_in_urb->num_sgs = 0; - } else { - /* do buffer or SG processing. */ - wa->buf_in_urb->transfer_flags - &= ~URB_NO_TRANSFER_DMA_MAP; - - if (xfer->urb->transfer_buffer) { - wa->buf_in_urb->transfer_buffer = - xfer->urb->transfer_buffer - + (seg_idx * xfer->seg_size); - wa->buf_in_urb->sg = NULL; - wa->buf_in_urb->num_sgs = 0; - } else { - /* allocate an SG list to store seg_size bytes - and copy the subset of the xfer->urb->sg - that matches the buffer subset we are - about to read. */ - wa->buf_in_urb->sg = wa_xfer_create_subset_sg( - xfer->urb->sg, - seg_idx * xfer->seg_size, - le32_to_cpu( - xfer_result->dwTransferLength), - &(wa->buf_in_urb->num_sgs)); - - if (!(wa->buf_in_urb->sg)) { - wa->buf_in_urb->num_sgs = 0; - goto error_sg_alloc; - } - wa->buf_in_urb->transfer_buffer = NULL; - } - } - wa->buf_in_urb->transfer_buffer_length = - le32_to_cpu(xfer_result->dwTransferLength); - wa->buf_in_urb->context = seg; + result = wa_populate_buf_in_urb(wa, xfer, seg_idx, + bytes_transferred); + if (result < 0) + goto error_buf_in_populate; result = usb_submit_urb(wa->buf_in_urb, GFP_ATOMIC); if (result < 0) goto error_submit_buf_in; } else { - /* OUT data phase, complete it -- */ + /* OUT data phase or no data, complete it -- */ seg->status = WA_SEG_DONE; - seg->result = le32_to_cpu(xfer_result->dwTransferLength); + seg->result = bytes_transferred; xfer->segs_done++; rpipe_ready = rpipe_avail_inc(rpipe); done = __wa_xfer_is_done(xfer); @@ -2162,13 +2182,13 @@ error_submit_buf_in: seg->result = result; kfree(wa->buf_in_urb->sg); wa->buf_in_urb->sg = NULL; -error_sg_alloc: +error_buf_in_populate: __wa_xfer_abort(xfer); seg->status = WA_SEG_ERROR; error_complete: xfer->segs_done++; rpipe_ready = rpipe_avail_inc(rpipe); - wa_complete_remaining_xfer_segs(xfer, seg); + wa_complete_remaining_xfer_segs(xfer, seg, seg->status); done = __wa_xfer_is_done(xfer); /* * queue work item to clear STALL for control endpoints. -- cgit v1.2.3 From ea1af42d3d4da73c9d75984f24e569515261b3fd Mon Sep 17 00:00:00 2001 From: Thomas Pugliese Date: Mon, 9 Dec 2013 14:15:14 -0600 Subject: usb: wusbcore: move isoc_frame_index from wa_xfer to wa_seg If multiple segments belonging to an isoc transfer are submitted concurrently, the isoc_frame_index field in struct wa_xfer can get corrupted. This patch moves the isoc_frame_index field from struct wa_xfer to struct wa_seg to prevent this from happening. Signed-off-by: Thomas Pugliese Signed-off-by: Greg Kroah-Hartman --- drivers/usb/wusbcore/wa-xfer.c | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) (limited to 'drivers/usb/wusbcore/wa-xfer.c') diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c index 673ad80c1b55..6aeb52cdc3fb 100644 --- a/drivers/usb/wusbcore/wa-xfer.c +++ b/drivers/usb/wusbcore/wa-xfer.c @@ -124,6 +124,8 @@ struct wa_seg { u8 index; /* which segment we are */ int isoc_frame_count; /* number of isoc frames in this segment. */ int isoc_frame_offset; /* starting frame offset in the xfer URB. */ + /* Isoc frame that the current transfer buffer corresponds to. */ + int isoc_frame_index; int isoc_size; /* size of all isoc frames sent by this seg. */ enum wa_seg_status status; ssize_t result; /* bytes xfered or error */ @@ -158,8 +160,6 @@ struct wa_xfer { unsigned is_dma:1; size_t seg_size; int result; - /* Isoc frame that the current transfer buffer corresponds to. */ - int dto_isoc_frame_index; gfp_t gfp; /* allocation mask */ @@ -701,23 +701,23 @@ static void wa_seg_dto_cb(struct urb *urb) if (usb_pipeisoc(xfer->urb->pipe)) { /* Alereon HWA sends all isoc frames in a single transfer. */ if (wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC) - xfer->dto_isoc_frame_index += seg->isoc_frame_count; + seg->isoc_frame_index += seg->isoc_frame_count; else - xfer->dto_isoc_frame_index += 1; - if (xfer->dto_isoc_frame_index < seg->isoc_frame_count) { + seg->isoc_frame_index += 1; + if (seg->isoc_frame_index < seg->isoc_frame_count) { data_send_done = 0; holding_dto = 1; /* checked in error cases. */ /* * if this is the last isoc frame of the segment, we * can release DTO after sending this frame. */ - if ((xfer->dto_isoc_frame_index + 1) >= + if ((seg->isoc_frame_index + 1) >= seg->isoc_frame_count) release_dto = 1; } dev_dbg(dev, "xfer 0x%08X#%u: isoc frame = %d, holding_dto = %d, release_dto = %d.\n", - wa_xfer_id(xfer), seg->index, - xfer->dto_isoc_frame_index, holding_dto, release_dto); + wa_xfer_id(xfer), seg->index, seg->isoc_frame_index, + holding_dto, release_dto); } spin_unlock_irqrestore(&xfer->lock, flags); @@ -737,8 +737,7 @@ static void wa_seg_dto_cb(struct urb *urb) * send the URB and release DTO if we no longer need it. */ __wa_populate_dto_urb_isoc(xfer, seg, - seg->isoc_frame_offset + - xfer->dto_isoc_frame_index); + seg->isoc_frame_offset + seg->isoc_frame_index); /* resubmit the URB with the next isoc frame. */ result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC); @@ -1324,12 +1323,12 @@ static int __wa_seg_submit(struct wa_rpipe *rpipe, struct wa_xfer *xfer, struct wahc *wa = xfer->wa; result = usb_submit_urb(seg->isoc_pack_desc_urb, GFP_ATOMIC); + seg->isoc_frame_index = 0; if (result < 0) { pr_err("%s: xfer %p#%u: ISO packet descriptor submit failed: %d\n", __func__, xfer, seg->index, result); goto error_iso_pack_desc_submit; } - xfer->dto_isoc_frame_index = 0; /* * If this segment contains more than one isoc frame, hold * onto the dto resource until we send all frames. -- cgit v1.2.3 From 756a2eed67e61e9596c2b49a787441c2e0daf1e1 Mon Sep 17 00:00:00 2001 From: Thomas Pugliese Date: Mon, 9 Dec 2013 14:15:15 -0600 Subject: usb: wusbcore: set packet count correctly on isoc transfers This patch correctly sets the dwNumOfPackets field of the HWA transfer request for isochronous transfers with multiple segments. Previously all segments used the value that was set for the first segment which may not be correct. Signed-off-by: Thomas Pugliese Signed-off-by: Greg Kroah-Hartman --- drivers/usb/wusbcore/wa-xfer.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers/usb/wusbcore/wa-xfer.c') diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c index 6aeb52cdc3fb..a70e142da330 100644 --- a/drivers/usb/wusbcore/wa-xfer.c +++ b/drivers/usb/wusbcore/wa-xfer.c @@ -1259,8 +1259,11 @@ static int __wa_xfer_setup(struct wa_xfer *xfer, struct urb *urb) for (cnt = 1; cnt < xfer->segs; cnt++) { struct wa_xfer_packet_info_hwaiso *packet_desc; struct wa_seg *seg = xfer->seg[cnt]; + struct wa_xfer_hwaiso *xfer_iso; xfer_hdr = &seg->xfer_hdr; + xfer_iso = container_of(xfer_hdr, + struct wa_xfer_hwaiso, hdr); packet_desc = ((void *)xfer_hdr) + xfer_hdr_size; /* * Copy values from the 0th header. Segment specific @@ -1270,6 +1273,8 @@ static int __wa_xfer_setup(struct wa_xfer *xfer, struct urb *urb) xfer_hdr->bTransferSegment = cnt; xfer_hdr->dwTransferLength = cpu_to_le32(seg->isoc_size); + xfer_iso->dwNumOfPackets = + cpu_to_le32(seg->isoc_frame_count); __wa_setup_isoc_packet_descr(packet_desc, xfer, seg); seg->status = WA_SEG_READY; } -- cgit v1.2.3