[alsa-devel] [RFC v2 3/3] ALSA: hda: add hda controller to hda

Vinod Koul vinod.koul at intel.com
Wed Mar 25 09:01:41 CET 2015


On Wed, Mar 25, 2015 at 08:05:21AM +0100, Takashi Iwai wrote:
> At Wed, 25 Mar 2015 11:31:39 +0530,
> Vinod Koul wrote:
> > 
> > From: Jeeja KP <jeeja.kp at intel.com>
> > 
> > This adds hdac_controller into hda core.
> > 
> > Signed-off-by: Jeeja KP <jeeja.kp at intel.com>
> > Signed-off-by: Vinod Koul <vinod.koul at intel.com>
> 
> Now the question is whether we need all codes.
> For example, the bus reset isn't needed for Intel chips but it's a
> workaround for old AMD chips.  Also, polling and single_cmd fallbacks
> are also for buggy chips, and I don't think we'd need them for SKL.
I dont have a reason to think this is required
> 
> The rirb_error flag can be removed and converted to the normal error
> code propagation with the new code.  It's there just because the old
> interface didn't give the error code in get_response op.  And, of
> course, there are superfluous DSP loader for CA0132...
okay, but the DSP loader code would be bought in for us. We need to use that
to load code for the DSP. I was planning to send that out after this

> You can find some code I wrote in a couple of weeks ago in
> test/hda-dev2 branch of sound-unstable git tree.  I rebased it on top
> of hda-regmap branch now, so it's mostly clean.  There you can find
> the subset of HDA controller codes that should be enough for a clean
> controller driver implementation.
> 
> I don't mean that mine is necessarily the best.  But my point is that
> we don't have to copy the whole in sound/hda.  Rather start from a
> smaller and mandatory subset for the new driver, then integrate this
> into the existing legacy driver.

Okay sounds okay to me. The current code set has lots of legacy so deciding
which bits are required and which are not will take a while for us! Thanks
for pointing them out. I will take the code from unstable on top of this
series and see what else do we need to do.

One question I have is regarding conventions to be adopted, do we need
hdac_xxx everywhere, since I though hdac stands for hda codec.

-- 
~Vinod
> 
> 
> thanks,
> 
> Takashi
> 
> > ---
> >  include/sound/hdaudio.h     |  215 ++++++++
> >  sound/hda/Makefile          |    2 +-
> >  sound/hda/hdac_controller.c | 1284 +++++++++++++++++++++++++++++++++++++++++++
> >  3 files changed, 1500 insertions(+), 1 deletion(-)
> >  create mode 100644 sound/hda/hdac_controller.c
> > 
> > diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
> > index 675614dc2b88..2843471efc7a 100644
> > --- a/include/sound/hdaudio.h
> > +++ b/include/sound/hdaudio.h
> > @@ -5,8 +5,11 @@
> >  #ifndef __SOUND_HDAUDIO_H
> >  #define __SOUND_HDAUDIO_H
> >  
> > +#include <linux/timecounter.h>
> > +#include <sound/core.h>
> >  #include <linux/device.h>
> >  #include <sound/hda_verbs.h>
> > +#include <sound/pcm.h>
> >  
> >  /* codec node id */
> >  typedef u16 hda_nid_t;
> > @@ -15,6 +18,7 @@ struct hdac_bus;
> >  struct hdac_device;
> >  struct hdac_driver;
> >  struct hdac_widget_tree;
> > +struct hda;
> >  
> >  /*
> >   * exported bus type
> > @@ -125,6 +129,8 @@ struct hdac_bus_ops {
> >  	/* get a response from the last command */
> >  	int (*get_response)(struct hdac_bus *bus, unsigned int addr,
> >  			    unsigned int *res);
> > +	/* reset bus for retry verb */
> > +	void (*bus_reset)(struct hdac_bus *bus);
> >  };
> >  
> >  #define HDA_UNSOL_QUEUE_SIZE	64
> > @@ -144,6 +150,7 @@ struct hdac_bus {
> >  	u32 unsol_queue[HDA_UNSOL_QUEUE_SIZE * 2]; /* ring buffer */
> >  	unsigned int unsol_rp, unsol_wp;
> >  	struct work_struct unsol_work;
> > +	struct workqueue_struct *workq; /* common workqueue for codecs */
> >  
> >  	/* bit flags of powered codecs */
> >  	unsigned long codec_powered;
> > @@ -153,6 +160,17 @@ struct hdac_bus {
> >  
> >  	/* locks */
> >  	struct mutex cmd_mutex;
> > +
> > +	void *private_data;
> > +
> > +	/* msic op flags */
> > +	unsigned int allow_bus_reset:1; /* allow bus reset at fatal error */
> > +
> > +	/*status for controller */
> > +	unsigned int rirb_error:1;      /* error in codec communication */
> > +	unsigned int response_reset:1;  /* controller was reset */
> > +	unsigned int no_response_fallback:1; /* don't fallback at RIRB error */
> > +	unsigned int in_reset:1;        /* during reset operation */
> >  };
> >  
> >  int snd_hdac_bus_init(struct hdac_bus *bus, struct device *dev,
> > @@ -178,4 +196,201 @@ static inline void snd_hdac_codec_link_down(struct hdac_device *codec)
> >  	clear_bit(codec->addr, &codec->bus->codec_powered);
> >  }
> >  
> > +/*
> > + * HD-audio contoller base device
> > + */
> > +struct hda_device {
> > +	struct snd_dma_buffer bdl;	/* BDL buffer */
> > +	u32 *posbuf;			/* position buffer pointer */
> > +
> > +	unsigned int bufsize;		/* size of the play buffer in bytes */
> > +	unsigned int period_bytes;	 /* size of the period in bytes */
> > +	unsigned int frags;		/* number for period in the play buffer */
> > +	unsigned int fifo_size;		/* FIFO size */
> > +	unsigned long start_wallclk;	/* start + minimum wallclk */
> > +	unsigned long period_wallclk;	/* wallclk for period */
> > +
> > +	void __iomem *sd_addr;		/* stream descriptor pointer */
> > +
> > +	u32 sd_int_sta_mask;		/* stream int status mask */
> > +
> > +	/* pcm support */
> > +	struct snd_pcm_substream *substream;	/* assigned substream,
> > +						 * set in PCM open
> > +						 */
> > +	unsigned int format_val;	/* format value to be set in the
> > +					 * controller and the codec
> > +					 */
> > +	unsigned char stream_tag;	/* assigned stream */
> > +	unsigned char index;		/* stream index */
> > +	int assigned_key;		/* last device# key assigned to */
> > +
> > +	unsigned int opened:1;
> > +	unsigned int running:1;
> > +	unsigned int irq_pending:1;
> > +	unsigned int prepared:1;
> > +	unsigned int locked:1;
> > +	unsigned int wc_marked:1;
> > +	unsigned int no_period_wakeup:1;
> > +
> > +	struct timecounter  tc;
> > +	struct cyclecounter cc;
> > +
> > +	int delay_negative_threshold;
> > +
> > +	/* Allows dsp load to have sole access to the playback stream. */
> > +	struct mutex dsp_mutex;
> > +};
> > +
> > +/* CORB/RIRB */
> > +struct hda_rb {
> > +	u32 *buf;			/* CORB/RIRB buffer
> > +					 * Each CORB entry is 4byte, RIRB is 8byte
> > +					 */
> > +	dma_addr_t addr;		/* physical address of CORB/RIRB buffer */
> > +
> > +	/* for RIRB */
> > +	unsigned short rp, wp;		/* read/write pointers */
> > +	int cmds[HDA_MAX_CODECS];	/* number of pending requests */
> > +	u32 res[HDA_MAX_CODECS];	/* last read value */
> > +};
> > +
> > +/* Functions to read/write to hda registers. */
> > +/* FIXME: should we name this something else ??? */
> > +struct hda_ops {
> > +	void (*reg_writel)(u32 value, u32 __iomem *addr);
> > +	u32 (*reg_readl)(u32 __iomem *addr);
> > +	void (*reg_writew)(u16 value, u16 __iomem *addr);
> > +	u16 (*reg_readw)(u16 __iomem *addr);
> > +	void (*reg_writeb)(u8 value, u8 __iomem *addr);
> > +	u8 (*reg_readb)(u8 __iomem *addr);
> > +	/* Disable msi if supported, PCI only */
> > +	int (*disable_msi_reset_irq)(struct hda *);
> > +	/* Allocation ops */
> > +	int (*dma_alloc_pages)(struct hda *chip,
> > +			       int type,
> > +			       size_t size,
> > +			       struct snd_dma_buffer *buf);
> > +	void (*dma_free_pages)(struct hda *chip, struct snd_dma_buffer *buf);
> > +	int (*substream_alloc_pages)(struct hda *chip,
> > +				     struct snd_pcm_substream *substream,
> > +				     size_t size);
> > +	int (*substream_free_pages)(struct hda *chip,
> > +				    struct snd_pcm_substream *substream);
> > +	void (*pcm_mmap_prepare)(struct snd_pcm_substream *substream,
> > +				 struct vm_area_struct *area);
> > +	/* Check if current position is acceptable */
> > +	int (*position_check)(struct hda *chip, struct hda_device *hda_dev);
> > +};
> > +
> > +typedef unsigned int (*hda_get_pos_callback_t)(struct hda *, struct hda_device *);
> > +typedef int (*hda_get_delay_callback_t)(struct hda *, struct hda_device *,
> > +			 unsigned int pos);
> > +
> > +struct hda {
> > +	struct pci_dev *pci; /* FIXME: should we remove PCI assumption, right now its true for us always */
> > +	struct device *dev;
> > +	int dev_index;
> > +
> > +	/* chip type specific */
> > +	int driver_type;
> > +	unsigned int driver_caps;
> > +	int playback_streams;
> > +	int playback_index_offset;
> > +	int capture_streams;
> > +	int capture_index_offset;
> > +	int num_streams;
> > +
> > +	/* Register interaction. */
> > +	const struct hda_ops *ops;
> > +
> > +	/* position adjustment callbacks */
> > +	hda_get_pos_callback_t get_position[2];
> > +	hda_get_delay_callback_t get_delay[2];
> > +
> > +	/* pci resources */
> > +	unsigned long addr;
> > +	void __iomem *remap_addr;
> > +	int irq;
> > +
> > +	/* locks */
> > +	spinlock_t reg_lock;
> > +	struct mutex open_mutex; /* Prevents concurrent open/close operations */
> > +	struct completion probe_wait;
> > +
> > +	/* streams (x num_streams) */
> > +	struct hda_device *hda_dev;
> > +
> > +	/* HD codec */
> > +	unsigned short codec_mask;
> > +	int  codec_probe_mask; /* copied from probe_mask option */
> > +	struct hdac_bus *bus;
> > +	unsigned int beep_mode;
> > +
> > +	/* CORB/RIRB */
> > +	struct hda_rb corb;
> > +	struct hda_rb rirb;
> > +
> > +	/* CORB/RIRB and position buffers */
> > +	struct snd_dma_buffer rb;
> > +	struct snd_dma_buffer posbuf;
> > +
> > +	/* flags */
> > +	const int *bdl_pos_adj;
> > +	int poll_count;
> > +	unsigned int running:1;
> > +	unsigned int initialized:1;
> > +	unsigned int single_cmd:1;
> > +	unsigned int polling_mode:1;
> > +	unsigned int msi:1;
> > +	unsigned int probing:1; /* codec probing phase */
> > +	unsigned int snoop:1;
> > +	unsigned int align_buffer_size:1;
> > +	unsigned int region_requested:1;
> > +	unsigned int disabled:1; /* disabled by VGA-switcher */
> > +
> > +	/* for debugging */
> > +	unsigned int last_cmd[HDA_MAX_CODECS];
> > +
> > +	/* reboot notifier (for mysterious hangup problem at power-down) */
> > +	struct notifier_block reboot_notifier;
> > +
> > +	struct hda_device saved_hda_dev; /* FIXME: check if we need this */
> > +};
> > +
> > +/* Allocation functions. */
> > +int hda_alloc_stream_pages(struct hda *chip);
> > +void hda_free_stream_pages(struct hda *chip);
> > +
> > +/* pcm helper functions */
> > +int hda_setup_periods(struct hda *chip,
> > +			struct snd_pcm_substream *substream,
> > +			struct hda_device *dev);
> > +void hda_reset_device(struct hda *chip, struct hda_device *hda_dev);
> > +int hda_set_device_params(struct hda *chip, struct snd_pcm_substream *substream,
> > +				unsigned int format_val);
> > +void hda_set_pcm_constrains(struct hda *chip, struct snd_pcm_runtime *runtime);
> > +
> > +/* PCM setup */
> > +static inline struct hda_device *get_hda_dev(struct snd_pcm_substream *substream)
> > +{
> > +	return substream->runtime->private_data;
> > +}
> > +unsigned int hda_get_position(struct hda *chip, struct hda_device *hda_dev,
> > +		int codec_delay);
> > +unsigned int hda_get_pos_lpib(struct hda *chip, struct hda_device *hda_dev);
> > +unsigned int hda_get_pos_posbuf(struct hda *chip, struct hda_device *hda_dev);
> > +
> > +/* Stream control. */
> > +void hda_stream_stop(struct hda *chip, struct hda_device *hda_dev);
> > +
> > +/* Allocation functions. */
> > +int hda_alloc_stream_pages(struct hda *chip);
> > +void hda_free_stream_pages(struct hda *chip);
> > +
> > +/* Low level azx interface */
> > +void hda_init_chip(struct hda *chip, bool full_reset);
> > +void hda_stop_chip(struct hda *chip);
> > +void hda_enter_link_reset(struct hda *chip);
> > +
> >  #endif /* __SOUND_HDAUDIO_H */
> > diff --git a/sound/hda/Makefile b/sound/hda/Makefile
> > index eec5da03b41f..836c5f34b4dd 100644
> > --- a/sound/hda/Makefile
> > +++ b/sound/hda/Makefile
> > @@ -1,4 +1,4 @@
> > -snd-hda-core-objs := hda_bus_type.o hdac_bus.o hdac_device.o hdac_sysfs.o
> > +snd-hda-core-objs := hda_bus_type.o hdac_bus.o hdac_device.o hdac_sysfs.o hdac_controller.o
> >  
> >  snd-hda-core-objs += trace.o
> >  CFLAGS_trace.o := -I$(src)
> > diff --git a/sound/hda/hdac_controller.c b/sound/hda/hdac_controller.c
> > new file mode 100644
> > index 000000000000..3b14f7be43da
> > --- /dev/null
> > +++ b/sound/hda/hdac_controller.c
> > @@ -0,0 +1,1284 @@
> > +/*
> > + *
> > + *  Implementation of Common HDA driver funcitons for Intel HD Audio.
> > + *
> > + *  Copyright (c) 2014 Intel Corporation
> > + *  Copyright(c) 2004 Intel Corporation. All rights reserved.
> > + *
> > + *  Copyright (c) 2004 Takashi Iwai <tiwai at suse.de>
> > + *                     PeiSen Hou <pshou at realtek.com.tw>
> > + *
> > + *  Modified by: KP Jeeja <jeeja.kp at intel.com>
> > + *
> > + *  This program is free software; you can redistribute it and/or modify it
> > + *  under the terms of the GNU General Public License as published by the Free
> > + *  Software Foundation; either version 2 of the License, or (at your option)
> > + *  any later version.
> > + *
> > + *  This program is distributed in the hope that it will be useful, but WITHOUT
> > + *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
> > + *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
> > + *  more details.
> > + *
> > + *
> > + */
> > +
> > +#include <linux/clocksource.h>
> > +#include <linux/delay.h>
> > +#include <linux/interrupt.h>
> > +#include <linux/kernel.h>
> > +#include <linux/module.h>
> > +#include <linux/pm_runtime.h>
> > +#include <linux/slab.h>
> > +#include <linux/reboot.h>
> > +#include <sound/core.h>
> > +#include <sound/initval.h>
> > +#include <sound/hdaudio.h>
> > +#include <sound/hda_registers.h>
> > +
> > +/* DSP lock helpers */
> > +#ifdef CONFIG_SND_HDA_DSP_LOADER
> > +#define dsp_lock_init(dev)	mutex_init(&(dev)->dsp_mutex)
> > +#define dsp_lock(dev)		mutex_lock(&(dev)->dsp_mutex)
> > +#define dsp_unlock(dev)		mutex_unlock(&(dev)->dsp_mutex)
> > +#define dsp_is_locked(dev)	((dev)->locked)
> > +#else
> > +#define dsp_lock_init(dev)	do {} while (0)
> > +#define dsp_lock(dev)		do {} while (0)
> > +#define dsp_unlock(dev)		do {} while (0)
> > +#define dsp_is_locked(dev)	0
> > +#endif
> > +
> > +/*
> > + * AZX stream operations.
> > + */
> > +
> > +/* start a stream */
> > +void hda_stream_start(struct hda *chip, struct hda_device *azx_dev)
> > +{
> > +	/* enable SIE */
> > +	azx_writel(chip, INTCTL,
> > +		   azx_readl(chip, INTCTL) | (1 << azx_dev->index));
> > +	/* set DMA start and interrupt mask */
> > +	azx_sd_writeb(chip, azx_dev, SD_CTL,
> > +		      azx_sd_readb(chip, azx_dev, SD_CTL) |
> > +		      SD_CTL_DMA_START | SD_INT_MASK);
> > +}
> > +EXPORT_SYMBOL_GPL(hda_stream_start);
> > +/* stop DMA */
> > +static void hda_stream_clear(struct hda *chip, struct hda_device *azx_dev)
> > +{
> > +	azx_sd_writeb(chip, azx_dev, SD_CTL,
> > +		      azx_sd_readb(chip, azx_dev, SD_CTL) &
> > +		      ~(SD_CTL_DMA_START | SD_INT_MASK));
> > +	azx_sd_writeb(chip, azx_dev, SD_STS, SD_INT_MASK); /* to be sure */
> > +}
> > +
> > +/* stop a stream */
> > +void hda_stream_stop(struct hda *chip, struct hda_device *azx_dev)
> > +{
> > +	hda_stream_clear(chip, azx_dev);
> > +	/* disable SIE */
> > +	azx_writel(chip, INTCTL,
> > +		   azx_readl(chip, INTCTL) & ~(1 << azx_dev->index));
> > +}
> > +EXPORT_SYMBOL_GPL(hda_stream_stop);
> > +
> > +/* reset stream */
> > +void hda_stream_reset(struct hda *chip, struct hda_device *azx_dev)
> > +{
> > +	unsigned char val;
> > +	int timeout;
> > +
> > +	hda_stream_clear(chip, azx_dev);
> > +
> > +	azx_sd_writeb(chip, azx_dev, SD_CTL,
> > +		      azx_sd_readb(chip, azx_dev, SD_CTL) |
> > +		      SD_CTL_STREAM_RESET);
> > +	udelay(3);
> > +	timeout = 300;
> > +	while (!((val = azx_sd_readb(chip, azx_dev, SD_CTL)) &
> > +		 SD_CTL_STREAM_RESET) && --timeout)
> > +		;
> > +	val &= ~SD_CTL_STREAM_RESET;
> > +	azx_sd_writeb(chip, azx_dev, SD_CTL, val);
> > +	udelay(3);
> > +
> > +	timeout = 300;
> > +	/* waiting for hardware to report that the stream is out of reset */
> > +	while (((val = azx_sd_readb(chip, azx_dev, SD_CTL)) &
> > +		SD_CTL_STREAM_RESET) && --timeout)
> > +		;
> > +
> > +	/* reset first position - may not be synced with hw at this time */
> > +	*azx_dev->posbuf = 0;
> > +}
> > +EXPORT_SYMBOL_GPL(hda_stream_reset);
> > +
> > +/*
> > + * set up the SD for streaming
> > + */
> > +int hda_setup_controller(struct hda *chip, struct hda_device *azx_dev)
> > +{
> > +	unsigned int val;
> > +	/* make sure the run bit is zero for SD */
> > +	hda_stream_clear(chip, azx_dev);
> > +	/* program the stream_tag */
> > +	val = azx_sd_readl(chip, azx_dev, SD_CTL);
> > +	val = (val & ~SD_CTL_STREAM_TAG_MASK) |
> > +		(azx_dev->stream_tag << SD_CTL_STREAM_TAG_SHIFT);
> > +	azx_sd_writel(chip, azx_dev, SD_CTL, val);
> > +
> > +	/* program the length of samples in cyclic buffer */
> > +	azx_sd_writel(chip, azx_dev, SD_CBL, azx_dev->bufsize);
> > +
> > +	/* program the stream format */
> > +	/* this value needs to be the same as the one programmed */
> > +	azx_sd_writew(chip, azx_dev, SD_FORMAT, azx_dev->format_val);
> > +
> > +	/* program the stream LVI (last valid index) of the BDL */
> > +	azx_sd_writew(chip, azx_dev, SD_LVI, azx_dev->frags - 1);
> > +
> > +	/* program the BDL address */
> > +	/* lower BDL address */
> > +	azx_sd_writel(chip, azx_dev, SD_BDLPL, (u32)azx_dev->bdl.addr);
> > +	/* upper BDL address */
> > +	azx_sd_writel(chip, azx_dev, SD_BDLPU,
> > +		      upper_32_bits(azx_dev->bdl.addr));
> > +
> > +	/* enable the position buffer */
> > +	if (chip->get_position[0] != hda_get_pos_lpib ||
> > +	    chip->get_position[1] != hda_get_pos_lpib) {
> > +		if (!(azx_readl(chip, DPLBASE) & AZX_DPLBASE_ENABLE))
> > +			azx_writel(chip, DPLBASE,
> > +				(u32)chip->posbuf.addr | AZX_DPLBASE_ENABLE);
> > +	}
> > +
> > +	/* set the interrupt enable bits in the descriptor control register */
> > +	azx_sd_writel(chip, azx_dev, SD_CTL,
> > +		      azx_sd_readl(chip, azx_dev, SD_CTL) | SD_INT_MASK);
> > +
> > +	return 0;
> > +}
> > +EXPORT_SYMBOL_GPL(hda_setup_controller);
> > +
> > +/* assign a stream for the PCM */
> > +struct hda_device *
> > +hda_assign_device(struct hda *chip, struct snd_pcm_substream *substream)
> > +{
> > +	int dev, i, nums;
> > +	struct hda_device *res = NULL;
> > +	/* make a non-zero unique key for the substream */
> > +	int key = (substream->pcm->device << 16) | (substream->number << 2) |
> > +		(substream->stream + 1);
> > +
> > +	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
> > +		dev = chip->playback_index_offset;
> > +		nums = chip->playback_streams;
> > +	} else {
> > +		dev = chip->capture_index_offset;
> > +		nums = chip->capture_streams;
> > +	}
> > +	for (i = 0; i < nums; i++, dev++) {
> > +		struct hda_device *azx_dev = &chip->hda_dev[dev];
> > +
> > +		dsp_lock(azx_dev);
> > +		if (!azx_dev->opened && !dsp_is_locked(azx_dev)) {
> > +			if (azx_dev->assigned_key == key) {
> > +				azx_dev->opened = 1;
> > +				azx_dev->assigned_key = key;
> > +				dsp_unlock(azx_dev);
> > +				return azx_dev;
> > +			}
> > +			if (!res ||
> > +			    (chip->driver_caps & AZX_DCAPS_REVERSE_ASSIGN))
> > +				res = azx_dev;
> > +		}
> > +		dsp_unlock(azx_dev);
> > +	}
> > +	if (res) {
> > +		dsp_lock(res);
> > +		res->opened = 1;
> > +		res->assigned_key = key;
> > +		dsp_unlock(res);
> > +	}
> > +	return res;
> > +}
> > +EXPORT_SYMBOL_GPL(hda_assign_device);
> > +
> > +/* release the assigned stream */
> > +void hda_release_device(struct hda_device *hda_dev)
> > +{
> > +	hda_dev->opened = 0;
> > +}
> > +EXPORT_SYMBOL_GPL(hda_release_device);
> > +
> > +/*
> > + * set up a BDL entry
> > + */
> > +int hda_setup_bdle(struct hda *chip,
> > +		      struct snd_dma_buffer *dmab,
> > +		      struct hda_device *azx_dev, u32 **bdlp,
> > +		      int ofs, int size, int with_ioc)
> > +{
> > +	u32 *bdl = *bdlp;
> > +
> > +	while (size > 0) {
> > +		dma_addr_t addr;
> > +		int chunk;
> > +
> > +		if (azx_dev->frags >= AZX_MAX_BDL_ENTRIES)
> > +			return -EINVAL;
> > +
> > +		addr = snd_sgbuf_get_addr(dmab, ofs);
> > +		dev_dbg(chip->dev, "buffer address=%#llx\n", (u64)addr);
> > +		/* program the address field of the BDL entry */
> > +		bdl[0] = cpu_to_le32((u32)addr);
> > +		bdl[1] = cpu_to_le32(upper_32_bits(addr));
> > +		/* program the size field of the BDL entry */
> > +		chunk = snd_sgbuf_get_chunk_size(dmab, ofs, size);
> > +		/* one BDLE cannot cross 4K boundary on CTHDA chips */
> > +		if (chip->driver_caps & AZX_DCAPS_4K_BDLE_BOUNDARY) {
> > +			u32 remain = 0x1000 - (ofs & 0xfff);
> > +
> > +			if (chunk > remain)
> > +				chunk = remain;
> > +		}
> > +		bdl[2] = cpu_to_le32(chunk);
> > +		/* program the IOC to enable interrupt
> > +		 * only when the whole fragment is processed
> > +		 */
> > +		size -= chunk;
> > +		bdl[3] = (size || !with_ioc) ? 0 : cpu_to_le32(0x01);
> > +		bdl += 4;
> > +		azx_dev->frags++;
> > +		ofs += chunk;
> > +	}
> > +	*bdlp = bdl;
> > +	dev_dbg(chip->dev, "bdl: 0x%p, ofs:0x%x\n", bdl, ofs);
> > +	return ofs;
> > +}
> > +EXPORT_SYMBOL_GPL(hda_setup_bdle);
> > +
> > +/*
> > + * set up BDL entries
> > + */
> > +int hda_setup_periods(struct hda *chip,
> > +			     struct snd_pcm_substream *substream,
> > +			     struct hda_device *azx_dev)
> > +{
> > +	u32 *bdl;
> > +	int i, ofs, periods, period_bytes;
> > +	int pos_adj = 0;
> > +
> > +	/* reset BDL address */
> > +	azx_sd_writel(chip, azx_dev, SD_BDLPL, 0);
> > +	azx_sd_writel(chip, azx_dev, SD_BDLPU, 0);
> > +
> > +	period_bytes = azx_dev->period_bytes;
> > +	periods = azx_dev->bufsize / period_bytes;
> > +
> > +	/* program the initial BDL entries */
> > +	bdl = (u32 *)azx_dev->bdl.area;
> > +	ofs = 0;
> > +	azx_dev->frags = 0;
> > +
> > +	if (chip->bdl_pos_adj)
> > +		pos_adj = chip->bdl_pos_adj[chip->dev_index];
> > +	if (!azx_dev->no_period_wakeup && pos_adj > 0) {
> > +		struct snd_pcm_runtime *runtime = substream->runtime;
> > +		int pos_align = pos_adj;
> > +
> > +		pos_adj = (pos_adj * runtime->rate + 47999) / 48000;
> > +		if (!pos_adj)
> > +			pos_adj = pos_align;
> > +		else
> > +			pos_adj = ((pos_adj + pos_align - 1) / pos_align) *
> > +				pos_align;
> > +		pos_adj = frames_to_bytes(runtime, pos_adj);
> > +		if (pos_adj >= period_bytes) {
> > +			dev_warn(chip->dev, "Too big adjustment %d\n",
> > +				 pos_adj);
> > +			pos_adj = 0;
> > +		} else {
> > +			ofs = hda_setup_bdle(chip, snd_pcm_get_dma_buf(substream),
> > +					 azx_dev,
> > +					 &bdl, ofs, pos_adj, true);
> > +			if (ofs < 0)
> > +				goto error;
> > +		}
> > +	} else
> > +		pos_adj = 0;
> > +
> > +	for (i = 0; i < periods; i++) {
> > +		if (i == periods - 1 && pos_adj)
> > +			ofs = hda_setup_bdle(chip, snd_pcm_get_dma_buf(substream),
> > +					 azx_dev, &bdl, ofs,
> > +					 period_bytes - pos_adj, 0);
> > +		else
> > +			ofs = hda_setup_bdle(chip, snd_pcm_get_dma_buf(substream),
> > +					 azx_dev, &bdl, ofs,
> > +					 period_bytes,
> > +					 !azx_dev->no_period_wakeup);
> > +		if (ofs < 0)
> > +			goto error;
> > +	}
> > +	return 0;
> > +
> > + error:
> > +	dev_err(chip->dev, "Too many BDL entries: buffer=%d, period=%d\n",
> > +		azx_dev->bufsize, period_bytes);
> > +	return -EINVAL;
> > +}
> > +EXPORT_SYMBOL_GPL(hda_setup_periods);
> > +
> > +unsigned int hda_get_pos_lpib(struct hda *chip, struct hda_device *azx_dev)
> > +{
> > +	return azx_sd_readl(chip, azx_dev, SD_LPIB);
> > +}
> > +EXPORT_SYMBOL_GPL(hda_get_pos_lpib);
> > +
> > +unsigned int hda_get_pos_posbuf(struct hda *chip, struct hda_device *azx_dev)
> > +{
> > +	return le32_to_cpu(*azx_dev->posbuf);
> > +}
> > +EXPORT_SYMBOL_GPL(hda_get_pos_posbuf);
> > +
> > +unsigned int hda_get_position(struct hda *chip,
> > +			      struct hda_device *azx_dev, int codec_delay)
> > +{
> > +	struct snd_pcm_substream *substream = azx_dev->substream;
> > +	unsigned int pos;
> > +	int stream = substream->stream;
> > +	int delay = 0;
> > +
> > +	if (chip->get_position[stream])
> > +		pos = chip->get_position[stream](chip, azx_dev);
> > +	else /* use the position buffer as default */
> > +		pos = hda_get_pos_posbuf(chip, azx_dev);
> > +
> > +	if (pos >= azx_dev->bufsize)
> > +		pos = 0;
> > +
> > +	if (substream->runtime) {
> > +		if (chip->get_delay[stream])
> > +			delay += chip->get_delay[stream](chip, azx_dev, pos);
> > +			delay += codec_delay;
> > +		substream->runtime->delay = delay;
> > +	}
> > +
> > +	return pos;
> > +}
> > +EXPORT_SYMBOL_GPL(hda_get_position);
> > +
> > +void hda_reset_device(struct hda *chip, struct hda_device *azx_dev)
> > +{
> > +	azx_sd_writel(chip, azx_dev, SD_BDLPL, 0);
> > +	azx_sd_writel(chip, azx_dev, SD_BDLPU, 0);
> > +	azx_sd_writel(chip, azx_dev, SD_CTL, 0);
> > +	azx_dev->bufsize = 0;
> > +	azx_dev->period_bytes = 0;
> > +	azx_dev->format_val = 0;
> > +}
> > +EXPORT_SYMBOL_GPL(hda_reset_device);
> > +
> > +int hda_set_device_params(struct hda *chip, struct snd_pcm_substream *substream,
> > +				unsigned int format_val)
> > +{
> > +
> > +	unsigned int bufsize, period_bytes, stream_tag;
> > +	struct hda_device *azx_dev = get_hda_dev(substream);
> > +	struct snd_pcm_runtime *runtime = substream->runtime;
> > +	int err;
> > +
> > +	bufsize = snd_pcm_lib_buffer_bytes(substream);
> > +	period_bytes = snd_pcm_lib_period_bytes(substream);
> > +
> > +	dev_dbg(chip->dev, "azx_pcm_prepare: bufsize=0x%x, format=0x%x\n",
> > +		bufsize, format_val);
> > +
> > +	if (bufsize != azx_dev->bufsize ||
> > +	    period_bytes != azx_dev->period_bytes ||
> > +	    format_val != azx_dev->format_val ||
> > +	    runtime->no_period_wakeup != azx_dev->no_period_wakeup) {
> > +		azx_dev->bufsize = bufsize;
> > +		azx_dev->period_bytes = period_bytes;
> > +		azx_dev->format_val = format_val;
> > +		azx_dev->no_period_wakeup = runtime->no_period_wakeup;
> > +		err = hda_setup_periods(chip, substream, azx_dev);
> > +		if (err < 0)
> > +			return err;
> > +	}
> > +
> > +	/* when LPIB delay correction gives a small negative value,
> > +	 * we ignore it; currently set the threshold statically to
> > +	 * 64 frames
> > +	 */
> > +	if (runtime->period_size > 64)
> > +		azx_dev->delay_negative_threshold = -frames_to_bytes(runtime, 64);
> > +	else
> > +		azx_dev->delay_negative_threshold = 0;
> > +
> > +	/* wallclk has 24Mhz clock source */
> > +	azx_dev->period_wallclk = (((runtime->period_size * 24000) /
> > +						runtime->rate) * 1000);
> > +	hda_setup_controller(chip, azx_dev);
> > +	if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
> > +		azx_dev->fifo_size =
> > +			azx_sd_readw(chip, azx_dev, SD_FIFOSIZE) + 1;
> > +	else
> > +		azx_dev->fifo_size = 0;
> > +
> > +	stream_tag = azx_dev->stream_tag;
> > +	return 0;
> > +}
> > +EXPORT_SYMBOL_GPL(hda_set_device_params);
> > +
> > +void hda_set_pcm_constrains(struct hda *chip, struct snd_pcm_runtime *runtime)
> > +{
> > +	int buff_step;
> > +
> > +	snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
> > +
> > +	/* avoid wrap-around with wall-clock */
> > +	snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_TIME,
> > +				     20,
> > +				     178000000);
> > +
> > +	if (chip->align_buffer_size)
> > +		/* constrain buffer sizes to be multiple of 128
> > +		   bytes. This is more efficient in terms of memory
> > +		   access but isn't required by the HDA spec and
> > +		   prevents users from specifying exact period/buffer
> > +		   sizes. For example for 44.1kHz, a period size set
> > +		   to 20ms will be rounded to 19.59ms. */
> > +		buff_step = 128;
> > +	else
> > +		/* Don't enforce steps on buffer sizes, still need to
> > +		   be multiple of 4 bytes (HDA spec). Tested on Intel
> > +		   HDA controllers, may not work on all devices where
> > +		   option needs to be disabled */
> > +		buff_step = 4;
> > +
> > +	snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
> > +				   buff_step);
> > +	snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
> > +				   buff_step);
> > +
> > +}
> > +EXPORT_SYMBOL_GPL(hda_set_pcm_constrains);
> > +
> > +/*
> > + * CORB / RIRB interface
> > + */
> > +static int hda_alloc_cmd_io(struct hda *chip)
> > +{
> > +	int err;
> > +
> > +	/* single page (at least 4096 bytes) must suffice for both ringbuffes */
> > +	err = chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV,
> > +					 PAGE_SIZE, &chip->rb);
> > +	if (err < 0)
> > +		dev_err(chip->dev, "cannot allocate CORB/RIRB\n");
> > +	return err;
> > +}
> > +EXPORT_SYMBOL_GPL(hda_alloc_cmd_io);
> > +
> > +static void hda_init_cmd_io(struct hda *chip)
> > +{
> > +	int timeout;
> > +
> > +	spin_lock_irq(&chip->reg_lock);
> > +	/* CORB set up */
> > +	chip->corb.addr = chip->rb.addr;
> > +	chip->corb.buf = (u32 *)chip->rb.area;
> > +	azx_writel(chip, CORBLBASE, (u32)chip->corb.addr);
> > +	azx_writel(chip, CORBUBASE, upper_32_bits(chip->corb.addr));
> > +
> > +	/* set the corb size to 256 entries (ULI requires explicitly) */
> > +	azx_writeb(chip, CORBSIZE, 0x02);
> > +	/* set the corb write pointer to 0 */
> > +	azx_writew(chip, CORBWP, 0);
> > +
> > +	/* reset the corb hw read pointer */
> > +	azx_writew(chip, CORBRP, AZX_CORBRP_RST);
> > +	if (!(chip->driver_caps & AZX_DCAPS_CORBRP_SELF_CLEAR)) {
> > +		for (timeout = 1000; timeout > 0; timeout--) {
> > +			if ((azx_readw(chip, CORBRP) & AZX_CORBRP_RST) == AZX_CORBRP_RST)
> > +				break;
> > +			udelay(1);
> > +		}
> > +		if (timeout <= 0)
> > +			dev_err(chip->dev, "CORB reset timeout#1, CORBRP = %d\n",
> > +				azx_readw(chip, CORBRP));
> > +
> > +		azx_writew(chip, CORBRP, 0);
> > +		for (timeout = 1000; timeout > 0; timeout--) {
> > +			if (azx_readw(chip, CORBRP) == 0)
> > +				break;
> > +			udelay(1);
> > +		}
> > +		if (timeout <= 0)
> > +			dev_err(chip->dev, "CORB reset timeout#2, CORBRP = %d\n",
> > +				azx_readw(chip, CORBRP));
> > +	}
> > +
> > +	/* enable corb dma */
> > +	azx_writeb(chip, CORBCTL, AZX_CORBCTL_RUN);
> > +
> > +	/* RIRB set up */
> > +	chip->rirb.addr = chip->rb.addr + 2048;
> > +	chip->rirb.buf = (u32 *)(chip->rb.area + 2048);
> > +	chip->rirb.wp = chip->rirb.rp = 0;
> > +	memset(chip->rirb.cmds, 0, sizeof(chip->rirb.cmds));
> > +	azx_writel(chip, RIRBLBASE, (u32)chip->rirb.addr);
> > +	azx_writel(chip, RIRBUBASE, upper_32_bits(chip->rirb.addr));
> > +
> > +	/* set the rirb size to 256 entries (ULI requires explicitly) */
> > +	azx_writeb(chip, RIRBSIZE, 0x02);
> > +	/* reset the rirb hw write pointer */
> > +	azx_writew(chip, RIRBWP, AZX_RIRBWP_RST);
> > +	/* set N=1, get RIRB response interrupt for new entry */
> > +	if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND)
> > +		azx_writew(chip, RINTCNT, 0xc0);
> > +	else
> > +		azx_writew(chip, RINTCNT, 1);
> > +	/* enable rirb dma and response irq */
> > +	azx_writeb(chip, RIRBCTL, AZX_RBCTL_DMA_EN | AZX_RBCTL_IRQ_EN);
> > +	spin_unlock_irq(&chip->reg_lock);
> > +}
> > +EXPORT_SYMBOL_GPL(hda_init_cmd_io);
> > +
> > +static void hda_free_cmd_io(struct hda *chip)
> > +{
> > +	spin_lock_irq(&chip->reg_lock);
> > +	/* disable ringbuffer DMAs */
> > +	azx_writeb(chip, RIRBCTL, 0);
> > +	azx_writeb(chip, CORBCTL, 0);
> > +	spin_unlock_irq(&chip->reg_lock);
> > +}
> > +EXPORT_SYMBOL_GPL(hda_free_cmd_io);
> > +
> > +static unsigned int hda_command_addr(u32 cmd)
> > +{
> > +	unsigned int addr = cmd >> 28;
> > +
> > +	if (addr >= HDA_MAX_CODECS) {
> > +		snd_BUG();
> > +		addr = 0;
> > +	}
> > +
> > +	return addr;
> > +}
> > +
> > +/* send a command */
> > +static int hda_corb_send_cmd(struct hdac_bus *bus, u32 val)
> > +{
> > +	struct hda *chip = bus->private_data;
> > +	unsigned int addr = hda_command_addr(val);
> > +	unsigned int wp, rp;
> > +
> > +	spin_lock_irq(&chip->reg_lock);
> > +
> > +	/* add command to corb */
> > +	wp = azx_readw(chip, CORBWP);
> > +	if (wp == 0xffff) {
> > +		/* something wrong, controller likely turned to D3 */
> > +		spin_unlock_irq(&chip->reg_lock);
> > +		return -EIO;
> > +	}
> > +	wp++;
> > +	wp %= AZX_MAX_CORB_ENTRIES;
> > +
> > +	rp = azx_readw(chip, CORBRP);
> > +	if (wp == rp) {
> > +		/* oops, it's full */
> > +		spin_unlock_irq(&chip->reg_lock);
> > +		return -EAGAIN;
> > +	}
> > +
> > +	chip->rirb.cmds[addr]++;
> > +	chip->corb.buf[wp] = cpu_to_le32(val);
> > +	azx_writew(chip, CORBWP, wp);
> > +
> > +	spin_unlock_irq(&chip->reg_lock);
> > +
> > +	return 0;
> > +}
> > +
> > +#define AZX_RIRB_EX_UNSOL_EV	(1<<4)
> > +
> > +/**
> > + * hda_queue_unsol_event - add an unsolicited event to queue
> > + * @bus: the BUS
> > + * @res: unsolicited event (lower 32bit of RIRB entry)
> > + * @res_ex: codec addr and flags (upper 32bit or RIRB entry)
> > + *
> > + * Adds the given event to the queue.  The events are processed in
> > + * the workqueue asynchronously.  Call this function in the interrupt
> > + * hanlder when RIRB receives an unsolicited event.
> > + *
> > + * Returns 0 if successful, or a negative error code.
> > + */
> > +int hda_queue_unsol_event(struct hdac_bus *bus, u32 res, u32 res_ex)
> > +{
> > +	unsigned int wp;
> > +
> > +	if (!bus)
> > +		return 0;
> > +
> > +	wp = (bus->unsol_wp + 1) % HDA_UNSOL_QUEUE_SIZE;
> > +
> > +	wp <<= 1;
> > +	bus->unsol_queue[wp] = res;
> > +	bus->unsol_queue[wp + 1] = res_ex;
> > +
> > +	queue_work(bus->workq, &bus->unsol_work);
> > +
> > +	return 0;
> > +}
> > +EXPORT_SYMBOL_GPL(hda_queue_unsol_event);
> > +
> > +/* retrieve RIRB entry - called from interrupt handler */
> > +static void hda_update_rirb(struct hda *chip)
> > +{
> > +	unsigned int rp, wp;
> > +	unsigned int addr;
> > +	u32 res, res_ex;
> > +
> > +	wp = azx_readw(chip, RIRBWP);
> > +	if (wp == 0xffff) {
> > +		/* something wrong, controller likely turned to D3 */
> > +		return;
> > +	}
> > +
> > +	if (wp == chip->rirb.wp)
> > +		return;
> > +	chip->rirb.wp = wp;
> > +
> > +	while (chip->rirb.rp != wp) {
> > +		chip->rirb.rp++;
> > +		chip->rirb.rp %= AZX_MAX_RIRB_ENTRIES;
> > +
> > +		rp = chip->rirb.rp << 1; /* an RIRB entry is 8-bytes */
> > +		res_ex = le32_to_cpu(chip->rirb.buf[rp + 1]);
> > +		res = le32_to_cpu(chip->rirb.buf[rp]);
> > +		addr = res_ex & 0xf;
> > +		if ((addr >= HDA_MAX_CODECS) || !(chip->codec_mask & (1 << addr))) {
> > +			dev_err(chip->dev, "spurious response %#x:%#x, rp = %d, wp = %d",
> > +				res, res_ex,
> > +				chip->rirb.rp, wp);
> > +			snd_BUG();
> > +		} else if (res_ex & AZX_RIRB_EX_UNSOL_EV)
> > +			hda_queue_unsol_event(chip->bus, res, res_ex);
> > +		else if (chip->rirb.cmds[addr]) {
> > +			chip->rirb.res[addr] = res;
> > +			smp_wmb();
> > +			chip->rirb.cmds[addr]--;
> > +		} else if (printk_ratelimit()) {
> > +			dev_err(chip->dev, "spurious response %#x:%#x, last cmd=%#08x\n",
> > +				res, res_ex,
> > +				chip->last_cmd[addr]);
> > +		}
> > +	}
> > +}
> > +
> > +/* receive a response */
> > +static unsigned int hda_rirb_get_response(struct hdac_bus *bus,
> > +					  unsigned int addr)
> > +{
> > +	struct hda *chip = bus->private_data;
> > +	unsigned long timeout;
> > +	unsigned long loopcounter;
> > +	int do_poll = 0;
> > +
> > + again:
> > +	timeout = jiffies + msecs_to_jiffies(1000);
> > +
> > +	for (loopcounter = 0;; loopcounter++) {
> > +		if (chip->polling_mode || do_poll) {
> > +			spin_lock_irq(&chip->reg_lock);
> > +			hda_update_rirb(chip);
> > +			spin_unlock_irq(&chip->reg_lock);
> > +		}
> > +		if (!chip->rirb.cmds[addr]) {
> > +			smp_rmb();
> > +			bus->rirb_error = 0;
> > +
> > +			if (!do_poll)
> > +				chip->poll_count = 0;
> > +			return chip->rirb.res[addr]; /* the last value */
> > +		}
> > +		if (time_after(jiffies, timeout))
> > +			break;
> > +		else {
> > +			udelay(10);
> > +			cond_resched();
> > +		}
> > +	}
> > +
> > +	if (!bus->no_response_fallback)
> > +		return -1;
> > +
> > +	if (!chip->polling_mode && chip->poll_count < 2) {
> > +		dev_dbg(chip->dev,
> > +			"azx_get_response timeout, polling the codec once: last cmd=0x%08x\n",
> > +			chip->last_cmd[addr]);
> > +		do_poll = 1;
> > +		chip->poll_count++;
> > +		goto again;
> > +	}
> > +
> > +
> > +	if (!chip->polling_mode) {
> > +		dev_warn(chip->dev,
> > +			 "azx_get_response timeout, switching to polling mode: last cmd=0x%08x\n",
> > +			 chip->last_cmd[addr]);
> > +		chip->polling_mode = 1;
> > +		goto again;
> > +	}
> > +
> > +	if (chip->msi) {
> > +		dev_warn(chip->dev,
> > +			 "No response from codec, disabling MSI: last cmd=0x%08x\n",
> > +			 chip->last_cmd[addr]);
> > +		if (chip->ops->disable_msi_reset_irq(chip) &&
> > +		    chip->ops->disable_msi_reset_irq(chip) < 0) {
> > +			bus->rirb_error = 1;
> > +			return -1;
> > +		}
> > +		goto again;
> > +	}
> > +
> > +	if (chip->probing) {
> > +		/* If this critical timeout happens during the codec probing
> > +		 * phase, this is likely an access to a non-existing codec
> > +		 * slot.  Better to return an error and reset the system.
> > +		 */
> > +		return -1;
> > +	}
> > +
> > +	/* a fatal communication error; need either to reset or to fallback
> > +	 * to the single_cmd mode
> > +	 */
> > +	bus->rirb_error = 1;
> > +	if (bus->allow_bus_reset && !bus->response_reset && !bus->in_reset) {
> > +		bus->response_reset = 1;
> > +		return -1; /* give a chance to retry */
> > +	}
> > +
> > +	dev_err(chip->dev,
> > +		"azx_get_response timeout, switching to single_cmd mode: last cmd=0x%08x\n",
> > +		chip->last_cmd[addr]);
> > +	chip->single_cmd = 1;
> > +	bus->response_reset = 0;
> > +	/* release CORB/RIRB */
> > +	hda_free_cmd_io(chip);
> > +	/* disable unsolicited responses */
> > +	azx_writel(chip, GCTL, azx_readl(chip, GCTL) & ~AZX_GCTL_UNSOL);
> > +	return -1;
> > +}
> > +
> > +/*
> > + * Use the single immediate command instead of CORB/RIRB for simplicity
> > + *
> > + * Note: according to Intel, this is not preferred use.  The command was
> > + *       intended for the BIOS only, and may get confused with unsolicited
> > + *       responses.  So, we shouldn't use it for normal operation from the
> > + *       driver.
> > + *       I left the codes, however, for debugging/testing purposes.
> > + */
> > +
> > +/* receive a response */
> > +static int hda_single_wait_for_response(struct hda *chip, unsigned int addr)
> > +{
> > +	int timeout = 50;
> > +
> > +	while (timeout--) {
> > +		/* check IRV busy bit */
> > +		if (azx_readw(chip, IRS) & AZX_IRS_VALID) {
> > +			/* reuse rirb.res as the response return value */
> > +			chip->rirb.res[addr] = azx_readl(chip, IR);
> > +			return 0;
> > +		}
> > +		udelay(1);
> > +	}
> > +	if (printk_ratelimit())
> > +		dev_dbg(chip->dev, "get_response timeout: IRS=0x%x\n",
> > +			azx_readw(chip, IRS));
> > +	chip->rirb.res[addr] = -1;
> > +	return -EIO;
> > +}
> > +
> > +/* send a command */
> > +static int hda_single_send_cmd(struct hdac_bus *bus, u32 val)
> > +{
> > +	struct hda *chip = bus->private_data;
> > +	unsigned int addr = hda_command_addr(val);
> > +	int timeout = 50;
> > +
> > +	bus->rirb_error = 0;
> > +	while (timeout--) {
> > +		/* check ICB busy bit */
> > +		if (!((azx_readw(chip, IRS) & AZX_IRS_BUSY))) {
> > +			/* Clear IRV valid bit */
> > +			azx_writew(chip, IRS, azx_readw(chip, IRS) |
> > +				   AZX_IRS_VALID);
> > +			azx_writel(chip, IC, val);
> > +			azx_writew(chip, IRS, azx_readw(chip, IRS) |
> > +				   AZX_IRS_BUSY);
> > +			return hda_single_wait_for_response(chip, addr);
> > +		}
> > +		udelay(1);
> > +	}
> > +	if (printk_ratelimit())
> > +		dev_dbg(chip->dev,
> > +			"send_cmd timeout: IRS=0x%x, val=0x%x\n",
> > +			azx_readw(chip, IRS), val);
> > +	return -EIO;
> > +}
> > +
> > +/* receive a response */
> > +static unsigned int hda_single_get_response(struct hdac_bus *bus,
> > +					    unsigned int addr)
> > +{
> > +	struct hda *chip = bus->private_data;
> > +
> > +	return chip->rirb.res[addr];
> > +}
> > +
> > +/*
> > + * The below are the main callbacks from hda_codec.
> > + *
> > + * They are just the skeleton to call sub-callbacks according to the
> > + * current setting of chip->single_cmd.
> > + */
> > +
> > +/* send a command */
> > +int hda_send_cmd(struct hdac_bus *bus, unsigned int val)
> > +{
> > +	struct hda *chip = bus->private_data;
> > +
> > +	if (chip->disabled)
> > +		return 0;
> > +	chip->last_cmd[hda_command_addr(val)] = val;
> > +	if (chip->single_cmd)
> > +		return hda_single_send_cmd(bus, val);
> > +	else
> > +		return hda_corb_send_cmd(bus, val);
> > +}
> > +EXPORT_SYMBOL_GPL(hda_send_cmd);
> > +
> > +/* get a response */
> > +unsigned int hda_get_response(struct hdac_bus *bus,
> > +				     unsigned int addr)
> > +{
> > +	struct hda *chip = bus->private_data;
> > +
> > +	if (chip->disabled)
> > +		return 0;
> > +	if (chip->single_cmd)
> > +		return hda_single_get_response(bus, addr);
> > +	else
> > +		return hda_rirb_get_response(bus, addr);
> > +}
> > +EXPORT_SYMBOL_GPL(hda_get_response);
> > +
> > +void hda_bus_reset(struct hdac_bus *bus)
> > +{
> > +	struct hda *chip = bus->private_data;
> > +
> > +	bus->in_reset = 1;
> > +	hda_stop_chip(chip);
> > +	hda_init_chip(chip, true);
> > +	bus->in_reset = 0;
> > +}
> > +EXPORT_SYMBOL_GPL(hda_bus_reset);
> > +
> > +int hda_alloc_stream_pages(struct hda *chip)
> > +{
> > +	int i, err;
> > +
> > +	for (i = 0; i < chip->num_streams; i++) {
> > +		dsp_lock_init(&chip->hda_dev[i]);
> > +		/* allocate memory for the BDL for each stream */
> > +		err = chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV,
> > +						 BDL_SIZE,
> > +						 &chip->hda_dev[i].bdl);
> > +		if (err < 0) {
> > +			dev_err(chip->dev, "cannot allocate BDL\n");
> > +			return -ENOMEM;
> > +		}
> > +	}
> > +	/* allocate memory for the position buffer */
> > +	err = chip->ops->dma_alloc_pages(chip, SNDRV_DMA_TYPE_DEV,
> > +					 chip->num_streams * 8, &chip->posbuf);
> > +	if (err < 0) {
> > +		dev_err(chip->dev, "cannot allocate posbuf\n");
> > +		return -ENOMEM;
> > +	}
> > +
> > +	/* allocate CORB/RIRB */
> > +	err = hda_alloc_cmd_io(chip);
> > +	if (err < 0)
> > +		return err;
> > +	return 0;
> > +}
> > +EXPORT_SYMBOL_GPL(hda_alloc_stream_pages);
> > +
> > +void hda_free_stream_pages(struct hda *chip)
> > +{
> > +	int i;
> > +
> > +	if (chip->hda_dev) {
> > +		for (i = 0; i < chip->num_streams; i++)
> > +			if (chip->hda_dev[i].bdl.area)
> > +				chip->ops->dma_free_pages(
> > +					chip, &chip->hda_dev[i].bdl);
> > +	}
> > +	if (chip->rb.area)
> > +		chip->ops->dma_free_pages(chip, &chip->rb);
> > +	if (chip->posbuf.area)
> > +		chip->ops->dma_free_pages(chip, &chip->posbuf);
> > +}
> > +EXPORT_SYMBOL_GPL(hda_free_stream_pages);
> > +
> > +/*
> > + * Lowlevel interface
> > + */
> > +
> > +/* enter link reset */
> > +void hda_enter_link_reset(struct hda *chip)
> > +{
> > +	unsigned long timeout;
> > +
> > +	/* reset controller */
> > +	azx_writel(chip, GCTL, azx_readl(chip, GCTL) & ~AZX_GCTL_RESET);
> > +
> > +	timeout = jiffies + msecs_to_jiffies(100);
> > +	while ((azx_readb(chip, GCTL) & AZX_GCTL_RESET) &&
> > +			time_before(jiffies, timeout))
> > +		usleep_range(500, 1000);
> > +}
> > +EXPORT_SYMBOL_GPL(hda_enter_link_reset);
> > +
> > +/* exit link reset */
> > +void hda_exit_link_reset(struct hda *chip)
> > +{
> > +	unsigned long timeout;
> > +
> > +	azx_writeb(chip, GCTL, azx_readb(chip, GCTL) | AZX_GCTL_RESET);
> > +
> > +	timeout = jiffies + msecs_to_jiffies(100);
> > +	while (!azx_readb(chip, GCTL) &&
> > +			time_before(jiffies, timeout))
> > +		usleep_range(500, 1000);
> > +}
> > +EXPORT_SYMBOL_GPL(hda_exit_link_reset);
> > +
> > +/* reset codec link */
> > +static int hda_reset(struct hda *chip, bool full_reset)
> > +{
> > +	if (!full_reset)
> > +		goto __skip;
> > +
> > +	/* clear STATESTS */
> > +	azx_writew(chip, STATESTS, STATESTS_INT_MASK);
> > +
> > +	/* reset controller */
> > +	hda_enter_link_reset(chip);
> > +
> > +	/* delay for >= 100us for codec PLL to settle per spec
> > +	 * Rev 0.9 section 5.5.1
> > +	 */
> > +	usleep_range(500, 1000);
> > +
> > +	/* Bring controller out of reset */
> > +	hda_exit_link_reset(chip);
> > +
> > +	/* Brent Chartrand said to wait >= 540us for codecs to initialize */
> > +	usleep_range(1000, 1200);
> > +
> > +__skip:
> > +	/* check to see if controller is ready */
> > +	if (!azx_readb(chip, GCTL)) {
> > +		dev_dbg(chip->dev, "azx_reset: controller not ready!\n");
> > +		return -EBUSY;
> > +	}
> > +
> > +	/* Accept unsolicited responses */
> > +	if (!chip->single_cmd)
> > +		azx_writel(chip, GCTL, azx_readl(chip, GCTL) |
> > +			   AZX_GCTL_UNSOL);
> > +
> > +	/* detect codecs */
> > +	if (!chip->codec_mask) {
> > +		chip->codec_mask = azx_readw(chip, STATESTS);
> > +		dev_dbg(chip->dev, "codec_mask = 0x%x\n",
> > +			chip->codec_mask);
> > +	}
> > +
> > +	return 0;
> > +}
> > +
> > +/* enable interrupts */
> > +static void hda_int_enable(struct hda *chip)
> > +{
> > +	/* enable controller CIE and GIE */
> > +	azx_writel(chip, INTCTL, azx_readl(chip, INTCTL) |
> > +		   AZX_INT_CTRL_EN | AZX_INT_GLOBAL_EN);
> > +}
> > +
> > +/* disable interrupts */
> > +static void hda_int_disable(struct hda *chip)
> > +{
> > +	int i;
> > +
> > +	/* disable interrupts in stream descriptor */
> > +	for (i = 0; i < chip->num_streams; i++) {
> > +		struct hda_device *azx_dev = &chip->hda_dev[i];
> > +
> > +		azx_sd_writeb(chip, azx_dev, SD_CTL,
> > +			      azx_sd_readb(chip, azx_dev, SD_CTL) &
> > +					~SD_INT_MASK);
> > +	}
> > +
> > +	/* disable SIE for all streams */
> > +	azx_writeb(chip, INTCTL, 0);
> > +
> > +	/* disable controller CIE and GIE */
> > +	azx_writel(chip, INTCTL, azx_readl(chip, INTCTL) &
> > +		   ~(AZX_INT_CTRL_EN | AZX_INT_GLOBAL_EN));
> > +}
> > +
> > +/* clear interrupts */
> > +static void hda_int_clear(struct hda *chip)
> > +{
> > +	int i;
> > +
> > +	/* clear stream status */
> > +	for (i = 0; i < chip->num_streams; i++) {
> > +		struct hda_device *azx_dev = &chip->hda_dev[i];
> > +
> > +		azx_sd_writeb(chip, azx_dev, SD_STS, SD_INT_MASK);
> > +	}
> > +
> > +	/* clear STATESTS */
> > +	azx_writew(chip, STATESTS, STATESTS_INT_MASK);
> > +
> > +	/* clear rirb status */
> > +	azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
> > +
> > +	/* clear int status */
> > +	azx_writel(chip, INTSTS, AZX_INT_CTRL_EN | AZX_INT_ALL_STREAM);
> > +}
> > +
> > +/*
> > + * reset and start the controller registers
> > + */
> > +void hda_init_chip(struct hda *chip, bool full_reset)
> > +{
> > +	if (chip->initialized)
> > +		return;
> > +
> > +	/* reset controller */
> > +	hda_reset(chip, full_reset);
> > +
> > +	/* initialize interrupts */
> > +	hda_int_clear(chip);
> > +	hda_int_enable(chip);
> > +
> > +	/* initialize the codec command I/O */
> > +	if (!chip->single_cmd)
> > +		hda_init_cmd_io(chip);
> > +
> > +	/* program the position buffer */
> > +	azx_writel(chip, DPLBASE, (u32)chip->posbuf.addr);
> > +	azx_writel(chip, DPUBASE, upper_32_bits(chip->posbuf.addr));
> > +
> > +	chip->initialized = 1;
> > +}
> > +EXPORT_SYMBOL_GPL(hda_init_chip);
> > +
> > +void hda_stop_chip(struct hda *chip)
> > +{
> > +	if (!chip->initialized)
> > +		return;
> > +
> > +	/* disable interrupts */
> > +	hda_int_disable(chip);
> > +	hda_int_clear(chip);
> > +
> > +	/* disable CORB/RIRB */
> > +	hda_free_cmd_io(chip);
> > +
> > +	/* disable position buffer */
> > +	azx_writel(chip, DPLBASE, 0);
> > +	azx_writel(chip, DPUBASE, 0);
> > +
> > +	chip->initialized = 0;
> > +}
> > +EXPORT_SYMBOL_GPL(hda_stop_chip);
> > +
> > +/*
> > + * interrupt handler
> > + */
> > +irqreturn_t hda_interrupt(int irq, void *dev_id)
> > +{
> > +	struct hda *chip = dev_id;
> > +	u32 status;
> > +
> > +#ifdef CONFIG_PM_RUNTIME
> > +	if (chip->driver_caps & AZX_DCAPS_PM_RUNTIME)
> > +		if (!pm_runtime_active(chip->dev))
> > +			return IRQ_NONE;
> > +#endif
> > +
> > +	spin_lock(&chip->reg_lock);
> > +
> > +	if (chip->disabled) {
> > +		spin_unlock(&chip->reg_lock);
> > +		return IRQ_NONE;
> > +	}
> > +
> > +	status = azx_readl(chip, INTSTS);
> > +	if (status == 0 || status == 0xffffffff) {
> > +		spin_unlock(&chip->reg_lock);
> > +		return IRQ_NONE;
> > +	}
> > +	spin_unlock(&chip->reg_lock);
> > +
> > +	return IRQ_WAKE_THREAD;
> > +}
> > +EXPORT_SYMBOL_GPL(hda_interrupt);
> > +
> > +
> > +irqreturn_t hda_threaded_handler(int irq, void *dev_id)
> > +{
> > +	struct hda *chip = dev_id;
> > +	struct hda_device *azx_dev;
> > +	u32 status;
> > +	u8 sd_status;
> > +	int i;
> > +	unsigned long cookie;
> > +
> > +	status = azx_readl(chip, INTSTS);
> > +	spin_lock_irqsave(&chip->reg_lock, cookie);
> > +	for (i = 0; i < chip->num_streams; i++) {
> > +		azx_dev = &chip->hda_dev[i];
> > +		if (status & azx_dev->sd_int_sta_mask) {
> > +			sd_status = azx_sd_readb(chip, azx_dev, SD_STS);
> > +			azx_sd_writeb(chip, azx_dev, SD_STS, SD_INT_MASK);
> > +			if (!azx_dev->substream || !azx_dev->running ||
> > +			    !(sd_status & SD_INT_COMPLETE))
> > +				continue;
> > +			/* check whether this IRQ is really acceptable */
> > +			if (!chip->ops->position_check ||
> > +			    chip->ops->position_check(chip, azx_dev)) {
> > +				spin_unlock_irqrestore(&chip->reg_lock, cookie);
> > +				snd_pcm_period_elapsed(azx_dev->substream);
> > +				spin_lock_irqsave(&chip->reg_lock, cookie);
> > +			}
> > +		}
> > +	}
> > +
> > +	/* clear rirb int */
> > +	status = azx_readb(chip, RIRBSTS);
> > +	if (status & RIRB_INT_MASK) {
> > +		if (status & RIRB_INT_RESPONSE) {
> > +			if (chip->driver_caps & AZX_DCAPS_RIRB_PRE_DELAY)
> > +				udelay(80);
> > +			hda_update_rirb(chip);
> > +		}
> > +		azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
> > +	}
> > +
> > +	spin_unlock_irqrestore(&chip->reg_lock, cookie);
> > +
> > +	return IRQ_HANDLED;
> > +}
> > +EXPORT_SYMBOL_GPL(hda_threaded_handler);
> > +
> > +static bool is_input_stream(struct hda *chip, unsigned char index)
> > +{
> > +	return (index >= chip->capture_index_offset &&
> > +		 index < chip->capture_index_offset + chip->capture_streams);
> > +}
> > +
> > +/* initialize SD streams */
> > +int hda_init_stream(struct hda *chip)
> > +{
> > +	int i;
> > +	int in_stream_tag = 0;
> > +	int out_stream_tag = 0;
> > +
> > +	/* initialize each stream (aka device)
> > +	 * assign the starting bdl address to each stream (device)
> > +	 * and initialize
> > +	 */
> > +	for (i = 0; i < chip->num_streams; i++) {
> > +		struct hda_device *azx_dev = &chip->hda_dev[i];
> > +
> > +		azx_dev->posbuf = (u32 __iomem *)(chip->posbuf.area + i * 8);
> > +		/* offset: SDI0=0x80, SDI1=0xa0, ... SDO3=0x160 */
> > +		azx_dev->sd_addr = chip->remap_addr + (0x20 * i + 0x80);
> > +		/* int mask: SDI0=0x01, SDI1=0x02, ... SDO3=0x80 */
> > +		azx_dev->sd_int_sta_mask = 1 << i;
> > +		azx_dev->index = i;
> > +
> > +		/* stream tag must be unique throughout
> > +		 * the stream direction group,
> > +		 * valid values 1...15
> > +		 * use separate stream tag if the flag
> > +		 * AZX_DCAPS_SEPARATE_STREAM_TAG is used
> > +		 */
> > +		if (chip->driver_caps & AZX_DCAPS_SEPARATE_STREAM_TAG)
> > +			azx_dev->stream_tag =
> > +				is_input_stream(chip, i) ?
> > +				++in_stream_tag :
> > +				++out_stream_tag;
> > +		else
> > +			azx_dev->stream_tag = i + 1;
> > +	}
> > +
> > +	return 0;
> > +}
> > +EXPORT_SYMBOL_GPL(hda_init_stream);
> > +
> > +void hda_timecounter_init(struct snd_pcm_substream *substream,
> > +				bool force, cycle_t last, void *read_fn)
> > +{
> > +	struct hda_device *azx_dev = get_hda_dev(substream);
> > +	struct timecounter *tc = &azx_dev->tc;
> > +	struct cyclecounter *cc = &azx_dev->cc;
> > +	u64 nsec;
> > +
> > +	cc->read = read_fn;
> > +	cc->mask = CLOCKSOURCE_MASK(32);
> > +
> > +	/*
> > +	 * Converting from 24 MHz to ns means applying a 125/3 factor.
> > +	 * To avoid any saturation issues in intermediate operations,
> > +	 * the 125 factor is applied first. The division is applied
> > +	 * last after reading the timecounter value.
> > +	 * Applying the 1/3 factor as part of the multiplication
> > +	 * requires at least 20 bits for a decent precision, however
> > +	 * overflows occur after about 4 hours or less, not a option.
> > +	 */
> > +
> > +	cc->mult = 125; /* saturation after 195 years */
> > +	cc->shift = 0;
> > +
> > +	nsec = 0; /* audio time is elapsed time since trigger */
> > +	timecounter_init(tc, cc, nsec);
> > +	if (force)
> > +		/*
> > +		 * force timecounter to use predefined value,
> > +		 * used for synchronized starts
> > +		 */
> > +		tc->cycle_last = last;
> > +}
> > +EXPORT_SYMBOL_GPL(hda_timecounter_init);
> > +
> > +MODULE_LICENSE("GPL v2");
> > +MODULE_DESCRIPTION("Common HDA driver funcitons");
> > -- 
> > 1.7.9.5
> > 
> > 

-- 


More information about the Alsa-devel mailing list