{
struct dw_spi *dws;
char *buf;
- u32 len = 0;
ssize_t ret;
int reg = 0,value = 0;
}
#endif /* CONFIG_DEBUG_FS */
+
static void wait_till_not_busy(struct dw_spi *dws)
{
unsigned long end = jiffies + 1 + usecs_to_jiffies(1000);
/* Return the max entries we can fill into tx fifo */
static inline u32 tx_max(struct dw_spi *dws)
{
- u32 tx_left, tx_room, rxtx_gap;
+ u32 tx_left, tx_room;
tx_left = (dws->tx_end - dws->tx) / dws->n_bytes;
tx_room = dws->fifo_len - dw_readw(dws, SPIM_TXFLR);
static int reader_all(struct dw_spi *dws)
{
- u16 rxw;
while (!(dw_readw(dws, SPIM_SR) & SR_RF_EMPT)
&& (dws->rx < dws->rx_end)) {
dw_reader(dws);
*/
static int map_dma_buffers(struct dw_spi *dws)
{
- if (!dws->cur_msg->is_dma_mapped
- || !dws->dma_inited
+ if (!dws->dma_inited
|| !dws->cur_chip->enable_dma
|| !dws->dma_ops)
return 0;
dws->prev_chip = dws->cur_chip;
dws->cur_chip = NULL;
dws->dma_mapped = 0;
+ dws->state = 0;
//queue_work(dws->workqueue, &dws->pump_messages);
/*it is important to close intterrupt*/
if (next_msg && next_msg->spi != msg->spi)
next_msg = NULL;
- spi_finalize_current_message(dws->master);
dws->cur_chip = NULL;
-
+ spi_finalize_current_message(dws->master);
- DBG_SPI("%s:line=%d,tx_left=%d\n",__func__,__LINE__, (dws->tx_end - dws->tx) / dws->n_bytes);
+ DBG_SPI("%s:line=%d,tx_left=%ld\n",__func__,__LINE__, (long)(dws->tx_end - dws->tx) / dws->n_bytes);
}
dev_err(&dws->master->dev, "%s\n", msg);
dws->cur_msg->state = ERROR_STATE;
- tasklet_schedule(&dws->pump_transfers);
-
+ tasklet_schedule(&dws->pump_transfers);
DBG_SPI("%s:line=%d\n",__func__,__LINE__);
}
/* Must be called inside pump_transfers() */
static void poll_transfer(struct dw_spi *dws)
{
- DBG_SPI("%s:len=%d\n",__func__, dws->len);
+ DBG_SPI("%s:len=%ld\n",__func__, (long)dws->len);
do {
dw_writer(dws);
u16 rxint_level = 0;
u16 clk_div = 0;
u32 speed = 0;
- u32 cr0 = 0;
-
+ u32 cr0 = 0;
+ u16 dma_ctrl = 0;
+ int i = 0;
/* Get current state information */
message = dws->cur_msg;
spi = message->spi;
if (unlikely(!chip->clk_div))
+ {
chip->clk_div = dws->max_freq / chip->speed_hz;
+ chip->clk_div = (chip->clk_div + 1) & 0xfffe;
+ chip->speed_hz = dws->max_freq / chip->clk_div;
+ }
+
if (message->state == ERROR_STATE) {
message->status = -EIO;
cr0 = chip->cr0;
- DBG_SPI("%s:len=%d\n",__func__,dws->len);
/* Handle per transfer options for bpw and speed */
if (transfer->speed_hz) {
clk_div = dws->max_freq / speed;
clk_div = (clk_div + 1) & 0xfffe;
- chip->speed_hz = speed;
+ chip->speed_hz = dws->max_freq / clk_div;
chip->clk_div = clk_div;
}
}
+ DBG_SPI("%s:len=%ld,clk_div=%d,speed_hz=%d\n",__func__, (long)dws->len,chip->clk_div,chip->speed_hz);
if (transfer->bits_per_word) {
bits = transfer->bits_per_word;
else
chip->tmode = SPI_TMOD_TO;
- //cr0 &= ~SPI_TMOD_MASK;
- //cr0 |= (chip->tmode << SPI_TMOD_OFFSET);
cr0 &= ~(0x3 << SPI_MODE_OFFSET);
cr0 &= ~(0x3 << SPI_TMOD_OFFSET);
* 2. clk_div is changed
* 3. control value changes
*/
- if (dw_readw(dws, SPIM_CTRLR0) != cr0 || cs_change || clk_div || imask) {
+ //if (dw_readw(dws, SPIM_CTRLR0) != cr0 || cs_change || clk_div || imask)
+ if(dws->tx || dws->rx)
+ {
spi_enable_chip(dws, 0);
if (dw_readl(dws, SPIM_CTRLR0) != cr0)
dw_writel(dws, SPIM_CTRLR0, cr0);
+
spi_set_clk(dws, clk_div ? clk_div : chip->clk_div);
spi_chip_sel(dws, spi->chip_select);
- dw_writew(dws, SPIM_CTRLR1, dws->len-1);
-
- if (txint_level)
+ dw_writew(dws, SPIM_CTRLR1, dws->len-1);
+
+ if (txint_level != dw_readl(dws, SPIM_TXFTLR))
dw_writew(dws, SPIM_TXFTLR, txint_level);
- if (rxint_level)
+ if (rxint_level != dw_readl(dws, SPIM_RXFTLR))
{
dw_writew(dws, SPIM_RXFTLR, rxint_level);
DBG_SPI("%s:rxint_level=%d\n",__func__,rxint_level);
}
+
+ /* setup DMA related registers */
+ if(dws->dma_mapped)
+ {
+ dws->dmatdlr = dws->n_bytes;
+ dws->dmardlr = dws->n_bytes - 1;
+ for(i=dws->n_bytes; i<=dws->fifo_len / 4; i++)
+ {
+ if((dws->len / dws->n_bytes) % i == 0)
+ dws->dmatdlr = i;
+ }
+
+ /* Set the interrupt mask, for poll mode just diable all int */
+ spi_mask_intr(dws, 0xff);
+ if(dws->tx)
+ {
+ dma_ctrl |= SPI_DMACR_TX_ENABLE;
+ dw_writew(dws, SPIM_DMATDLR, dws->dmatdlr);
+ dw_writew(dws, SPIM_CTRLR1, dws->len-1);
+ }
+
+ dws->dmardlr = (dws->dmatdlr != dws->n_bytes)?(dws->dmatdlr-1):(dws->n_bytes-1);
+
+ if (dws->rx)
+ {
+ dma_ctrl |= SPI_DMACR_RX_ENABLE;
+ dw_writew(dws, SPIM_DMARDLR, dws->dmardlr);
+ dw_writew(dws, SPIM_CTRLR1, dws->len-1);
+ }
+ dw_writew(dws, SPIM_DMACR, dma_ctrl);
+
+ DBG_SPI("%s:dma_ctrl=0x%x,dmatdlr=%d,dmardlr=%d\n",__func__,dw_readw(dws, SPIM_DMACR),dws->dmatdlr, dws->dmardlr);
+
+ }
+
+ if((!dws->dma_mapped) || (dws->dma_mapped && dws->tx))
+ spi_enable_chip(dws, 1);
+
+ DBG_SPI("%s:ctrl0=0x%x\n",__func__,dw_readw(dws, SPIM_CTRLR0));
+
/* Set the interrupt mask, for poll mode just diable all int */
spi_mask_intr(dws, 0xff);
if (imask)
spi_umask_intr(dws, imask);
-
- spi_enable_chip(dws, 1);
if (cs_change)
dws->prev_chip = chip;
+
+ }
+ else
+ {
+ printk("%s:warning tx and rx is null\n",__func__);
}
+ /*dma should be ready before spi_enable_chip*/
if (dws->dma_mapped)
- dws->dma_ops->dma_transfer(dws, cs_change);
+ dws->dma_ops->dma_transfer(dws, cs_change);
if (chip->poll_mode)
poll_transfer(dws);
return;
}
-static void pump_messages(struct work_struct *work)
-{
- struct dw_spi *dws =
- container_of(work, struct dw_spi, pump_messages);
- unsigned long flags;
-
- /* Lock queue and check for queue work */
- spin_lock_irqsave(&dws->lock, flags);
- if (list_empty(&dws->queue) || dws->run == QUEUE_STOPPED) {
- dws->busy = 0;
- spin_unlock_irqrestore(&dws->lock, flags);
- return;
- }
-
- /* Make sure we are not already running a message */
- if (dws->cur_msg) {
- spin_unlock_irqrestore(&dws->lock, flags);
- return;
- }
-
- /* Extract head of queue */
- dws->cur_msg = list_entry(dws->queue.next, struct spi_message, queue);
- list_del_init(&dws->cur_msg->queue);
-
- /* Initial message state*/
- dws->cur_msg->state = START_STATE;
- dws->cur_transfer = list_entry(dws->cur_msg->transfers.next,
- struct spi_transfer,
- transfer_list);
- dws->cur_chip = spi_get_ctldata(dws->cur_msg->spi);
-
- /* Mark as busy and launch transfers */
- tasklet_schedule(&dws->pump_transfers);
-
- dws->busy = 1;
- spin_unlock_irqrestore(&dws->lock, flags);
-}
-
-
static int dw_spi_transfer_one_message(struct spi_master *master,
struct spi_message *msg)
{
struct dw_spi *dws = spi_master_get_devdata(master);
-
+ int ret = 0;
+
dws->cur_msg = msg;
/* Initial message state*/
dws->cur_msg->state = START_STATE;
/* prepare to setup the SSP, in pump_transfers, using the per
* chip configuration */
dws->cur_chip = spi_get_ctldata(dws->cur_msg->spi);
-
+
+ dws->dma_mapped = map_dma_buffers(dws);
+ INIT_COMPLETION(dws->xfer_completion);
+
/* Mark as busy and launch transfers */
tasklet_schedule(&dws->pump_transfers);
DBG_SPI("%s:line=%d\n",__func__,__LINE__);
+ if (dws->dma_mapped)
+ {
+ ret = wait_for_completion_timeout(&dws->xfer_completion,
+ msecs_to_jiffies(2000));
+ if(ret == 0)
+ {
+ dev_err(&dws->master->dev, "dma transfer timeout\n");
+ giveback(dws);
+ return 0;
+ }
+
+ DBG_SPI("%s:wait %d\n",__func__, ret);
+ }
+
return 0;
}
dw_writew(dws, SPIM_TXFTLR, 0);
}
- spi_enable_chip(dws, 1);
+ //spi_enable_chip(dws, 1);
flush(dws);
DBG_SPI("%s:fifo_len=%d\n",__func__, dws->fifo_len);
}
dws->type = SSI_MOTO_SPI;
dws->prev_chip = NULL;
dws->dma_inited = 0;
- dws->dma_addr = (dma_addr_t)(dws->paddr + 0x60);
+ dws->tx_dma_addr = (dma_addr_t)(dws->paddr + SPIM_TXDR);
+ dws->rx_dma_addr = (dma_addr_t)(dws->paddr + SPIM_RXDR);
snprintf(dws->name, sizeof(dws->name), "dw_spi%d",
dws->bus_num);
dev_err(&master->dev, "can not get IRQ\n");
goto err_free_master;
}
-
- master->mode_bits = SPI_CPOL | SPI_CPHA;
+
+ master->dev.parent = dws->parent_dev;
+ master->dev.of_node = dws->parent_dev->of_node;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
master->bus_num = dws->bus_num;
master->num_chipselect = dws->num_cs;
master->cleanup = dw_spi_cleanup;
if (dws->dma_ops && dws->dma_ops->dma_init) {
ret = dws->dma_ops->dma_init(dws);
if (ret) {
- dev_warn(&master->dev, "DMA init failed\n");
+ dev_warn(&master->dev, "DMA init failed,ret=%d\n",ret);
dws->dma_inited = 0;
}
}
err_queue_alloc:
if (dws->dma_ops && dws->dma_ops->dma_exit)
dws->dma_ops->dma_exit(dws);
-err_diable_hw:
+/* err_diable_hw: */
spi_enable_chip(dws, 0);
free_irq(dws->irq, dws);
err_free_master:
void dw_spi_remove_host(struct dw_spi *dws)
{
- int status = 0;
-
if (!dws)
return;