Hi Mathias,
On 9/28/2023 6:31 AM, Mathias Nyman wrote:
On 22.9.2023 0.48, Wesley Cheng wrote:
From: Mathias Nyman mathias.nyman@linux.intel.com
Expose xhci_stop_endpoint_sync() which is a synchronous variant of xhci_queue_stop_endpoint(). This is useful for client drivers that are using the secondary interrupters, and need to stop/clean up the current session. The stop endpoint command handler will also take care of cleaning up the ring.
Modifications to repurpose the new API into existing stop endpoint sequences was implemented by Wesley Cheng.
Signed-off-by: Mathias Nyman mathias.nyman@linux.intel.com Co-developed-by: Wesley Cheng quic_wcheng@quicinc.com Signed-off-by: Wesley Cheng quic_wcheng@quicinc.com
drivers/usb/host/xhci-hub.c | 29 +++--------------- drivers/usb/host/xhci.c | 60 +++++++++++++++++++++++++++---------- drivers/usb/host/xhci.h | 2 ++ 3 files changed, 50 insertions(+), 41 deletions(-)
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index 0054d02239e2..2f7309bdc922 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -489,7 +489,6 @@ EXPORT_SYMBOL_GPL(xhci_find_slot_id_by_port); static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend) { struct xhci_virt_device *virt_dev; - struct xhci_command *cmd; unsigned long flags; int ret; int i; @@ -501,10 +500,6 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend) trace_xhci_stop_device(virt_dev); - cmd = xhci_alloc_command(xhci, true, GFP_NOIO); - if (!cmd) - return -ENOMEM;
spin_lock_irqsave(&xhci->lock, flags); for (i = LAST_EP_INDEX; i > 0; i--) { if (virt_dev->eps[i].ring && virt_dev->eps[i].ring->dequeue) { @@ -521,7 +516,7 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend) if (!command) { spin_unlock_irqrestore(&xhci->lock, flags); ret = -ENOMEM; - goto cmd_cleanup; + goto out; } ret = xhci_queue_stop_endpoint(xhci, command, slot_id, @@ -529,30 +524,14 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend) if (ret) { spin_unlock_irqrestore(&xhci->lock, flags); xhci_free_command(xhci, command); - goto cmd_cleanup; + goto out; } } } - ret = xhci_queue_stop_endpoint(xhci, cmd, slot_id, 0, suspend); - if (ret) { - spin_unlock_irqrestore(&xhci->lock, flags); - goto cmd_cleanup; - }
- xhci_ring_cmd_db(xhci); spin_unlock_irqrestore(&xhci->lock, flags); + ret = xhci_stop_endpoint_sync(xhci, &virt_dev->eps[0], suspend);
I didn't take this new xhci_stop_endpoint_sync() helper into use as it causes an extra xhci spinlock release and reacquire here.
Also the memory allocation flags differ, GFP_NOIO is turned into GFP_KERNEL after this change.
Thanks for the review. I agree with the points made. I wasn't sure if the extra unlock/lock would cause issues if we've already queued the stop ep for the other eps used by the device.
I think addressing the flags might be straightforward, we can just pass it in as an argument. At least for this change in particular, is the concern that there could be another XHCI command queued before the stop endpoint command is?
- /* Wait for last stop endpoint command to finish */ - wait_for_completion(cmd->completion);
- if (cmd->status == COMP_COMMAND_ABORTED || - cmd->status == COMP_COMMAND_RING_STOPPED) { - xhci_warn(xhci, "Timeout while waiting for stop endpoint command\n"); - ret = -ETIME; - }
-cmd_cleanup: - xhci_free_command(xhci, cmd); +out: return ret; } diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 3fd2b58ee1d3..163d533d6200 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -2758,6 +2758,46 @@ static int xhci_reserve_bandwidth(struct xhci_hcd *xhci, return -ENOMEM; } +/*
- Synchronous XHCI stop endpoint helper. Issues the stop endpoint
command and
- waits for the command completion before returning.
- */
+int xhci_stop_endpoint_sync(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, int suspend) +{ + struct xhci_command *command; + unsigned long flags; + int ret;
+ command = xhci_alloc_command(xhci, true, GFP_KERNEL); + if (!command) + return -ENOMEM;
+ spin_lock_irqsave(&xhci->lock, flags); + ret = xhci_queue_stop_endpoint(xhci, command, ep->vdev->slot_id, + ep->ep_index, suspend); + if (ret < 0) { + spin_unlock_irqrestore(&xhci->lock, flags); + goto out; + }
+ xhci_ring_cmd_db(xhci); + spin_unlock_irqrestore(&xhci->lock, flags);
+ ret = wait_for_completion_timeout(command->completion, msecs_to_jiffies(3000)); + if (!ret) + xhci_warn(xhci, "%s: Unable to stop endpoint.\n", + __func__);
+ if (command->status == COMP_COMMAND_ABORTED || + command->status == COMP_COMMAND_RING_STOPPED) { + xhci_warn(xhci, "Timeout while waiting for stop endpoint command\n"); + ret = -ETIME; + } +out: + xhci_free_command(xhci, command);
+ return ret; +} /* Issue a configure endpoint command or evaluate context command * and wait for it to finish. @@ -3078,7 +3118,7 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd, struct xhci_virt_device *vdev; struct xhci_virt_ep *ep; struct xhci_input_control_ctx *ctrl_ctx; - struct xhci_command *stop_cmd, *cfg_cmd; + struct xhci_command *cfg_cmd; unsigned int ep_index; unsigned long flags; u32 ep_flag; @@ -3118,10 +3158,6 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd, if (ep_flag == SLOT_FLAG || ep_flag == EP0_FLAG) return; - stop_cmd = xhci_alloc_command(xhci, true, GFP_NOWAIT); - if (!stop_cmd) - return;
cfg_cmd = xhci_alloc_command_with_ctx(xhci, true, GFP_NOWAIT); if (!cfg_cmd) goto cleanup; @@ -3144,23 +3180,16 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd, goto cleanup; } - err = xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id, - ep_index, 0); + spin_unlock_irqrestore(&xhci->lock, flags);
Same here, extra unlock -> lock, and GFP flags differ.
My intention here (minus the GFP flags) was that the locking was mainly for setting the EP state flag -- EP_SOFT_CLEAR_TOGGLE. If that was set, then new TD queues are blocked. Seems like that was why there is a check like this afterwards:
if (!list_empty(&ep->ring->td_list)) {
So I believed that releasing the lock here was going to be ok, because by that point since the flag is set, nothing would be able to be added to the td_list.
Thanks Wesley Cheng