From: Mathias Nyman mathias.nyman@linux.intel.com
Expose xhci_stop_endpoint_sync() which is a synchronous variant of xhci_queue_stop_endpoint(). This is useful for client drivers that are using the secondary interrupters, and need to stop/clean up the current session. The stop endpoint command handler will also take care of cleaning up the ring.
Modifications to repurpose the new API into existing stop endpoint sequences was implemented by Wesley Cheng.
Signed-off-by: Mathias Nyman mathias.nyman@linux.intel.com Co-developed-by: Wesley Cheng quic_wcheng@quicinc.com Signed-off-by: Wesley Cheng quic_wcheng@quicinc.com --- drivers/usb/host/xhci.c | 61 ++++++++++++++++++++++++++++++----------- drivers/usb/host/xhci.h | 2 ++ 2 files changed, 47 insertions(+), 16 deletions(-)
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 2c7b888d44fc..6465acca7c79 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -2760,6 +2760,47 @@ static int xhci_reserve_bandwidth(struct xhci_hcd *xhci, return -ENOMEM; }
+/* + * Synchronous XHCI stop endpoint helper. Issues the stop endpoint command and + * waits for the command completion before returning. + */ +int xhci_stop_endpoint_sync(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, int suspend, + gfp_t gfp_flags) +{ + struct xhci_command *command; + unsigned long flags; + int ret; + + command = xhci_alloc_command(xhci, true, GFP_KERNEL); + if (!command) + return -ENOMEM; + + spin_lock_irqsave(&xhci->lock, flags); + ret = xhci_queue_stop_endpoint(xhci, command, ep->vdev->slot_id, + ep->ep_index, suspend); + if (ret < 0) { + spin_unlock_irqrestore(&xhci->lock, flags); + goto out; + } + + xhci_ring_cmd_db(xhci); + spin_unlock_irqrestore(&xhci->lock, flags); + + ret = wait_for_completion_timeout(command->completion, msecs_to_jiffies(3000)); + if (!ret) + xhci_warn(xhci, "%s: Unable to stop endpoint.\n", + __func__); + + if (command->status == COMP_COMMAND_ABORTED || + command->status == COMP_COMMAND_RING_STOPPED) { + xhci_warn(xhci, "Timeout while waiting for stop endpoint command\n"); + ret = -ETIME; + } +out: + xhci_free_command(xhci, command); + + return ret; +}
/* Issue a configure endpoint command or evaluate context command * and wait for it to finish. @@ -3080,7 +3121,7 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd, struct xhci_virt_device *vdev; struct xhci_virt_ep *ep; struct xhci_input_control_ctx *ctrl_ctx; - struct xhci_command *stop_cmd, *cfg_cmd; + struct xhci_command *cfg_cmd; unsigned int ep_index; unsigned long flags; u32 ep_flag; @@ -3120,10 +3161,6 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd, if (ep_flag == SLOT_FLAG || ep_flag == EP0_FLAG) return;
- stop_cmd = xhci_alloc_command(xhci, true, GFP_NOWAIT); - if (!stop_cmd) - return; - cfg_cmd = xhci_alloc_command_with_ctx(xhci, true, GFP_NOWAIT); if (!cfg_cmd) goto cleanup; @@ -3146,23 +3183,16 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd, goto cleanup; }
- err = xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id, - ep_index, 0); + spin_unlock_irqrestore(&xhci->lock, flags); + + err = xhci_stop_endpoint_sync(xhci, ep, 0, GFP_NOWAIT); if (err < 0) { - spin_unlock_irqrestore(&xhci->lock, flags); - xhci_free_command(xhci, cfg_cmd); xhci_dbg(xhci, "%s: Failed to queue stop ep command, %d ", __func__, err); goto cleanup; }
- xhci_ring_cmd_db(xhci); - spin_unlock_irqrestore(&xhci->lock, flags); - - wait_for_completion(stop_cmd->completion); - spin_lock_irqsave(&xhci->lock, flags); - /* config ep command clears toggle if add and drop ep flags are set */ ctrl_ctx = xhci_get_input_control_ctx(cfg_cmd->in_ctx); if (!ctrl_ctx) { @@ -3194,7 +3224,6 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
xhci_free_command(xhci, cfg_cmd); cleanup: - xhci_free_command(xhci, stop_cmd); spin_lock_irqsave(&xhci->lock, flags); if (ep->ep_state & EP_SOFT_CLEAR_TOGGLE) ep->ep_state &= ~EP_SOFT_CLEAR_TOGGLE; diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 3bbb9f018529..f0e90fafdd19 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -2174,6 +2174,8 @@ void xhci_ring_doorbell_for_active_rings(struct xhci_hcd *xhci, void xhci_cleanup_command_queue(struct xhci_hcd *xhci); void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring); unsigned int count_trbs(u64 addr, u64 len); +int xhci_stop_endpoint_sync(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, + int suspend, gfp_t gfp_flags);
/* xHCI roothub code */ void xhci_set_link_state(struct xhci_hcd *xhci, struct xhci_port *port,