+static irqreturn_t qcom_swrm_wake_irq_handler(int irq, void *dev_id) +{
- struct qcom_swrm_ctrl *swrm = dev_id;
- int ret = IRQ_HANDLED;
- struct sdw_slave *slave;
- clk_prepare_enable(swrm->hclk);
- if (swrm->wake_irq > 0) {
if (!irqd_irq_disabled(irq_get_irq_data(swrm->wake_irq)))
disable_irq_nosync(swrm->wake_irq);
- }
- /*
* resume all the slaves which must have potentially generated this
* interrupt, this should also wake the controller at the same time.
* this is much safer than waking controller directly that will deadlock!
*/
There should be no difference if you first resume the controller and then attached peripherals, or do a loop where you rely on the pm_runtime framework.
The notion that there might be a dead-lock is surprising, you would need to elaborate here.
- list_for_each_entry(slave, &swrm->bus.slaves, node) {
ret = pm_runtime_get_sync(&slave->dev);
In my experience, you don't want to blur layers and take references on the child devices from the parent device. I don't know how many times we end-up with weird behavior.
we've done something similar on the Intel side but implemented in a less directive manner.
ret = device_for_each_child(bus->dev, NULL, intel_resume_child_device);
static int intel_resume_child_device(struct device *dev, void *data) { [...] ret = pm_request_resume(dev); if (ret < 0) dev_err(dev, "%s: pm_request_resume failed: %d\n", __func__, ret);
return ret; }
if (ret < 0 && ret != -EACCES) {
dev_err_ratelimited(swrm->dev,
"pm_runtime_get_sync failed in %s, ret %d\n",
__func__, ret);
pm_runtime_put_noidle(&slave->dev);
ret = IRQ_NONE;
goto err;
}
- }
- list_for_each_entry(slave, &swrm->bus.slaves, node) {
pm_runtime_mark_last_busy(&slave->dev);
pm_runtime_put_autosuspend(&slave->dev);
- }
+err:
- clk_disable_unprepare(swrm->hclk);
- return IRQ_HANDLED;
+}