\r
//#define QUICK_TRANSFER \r
\r
-#if 0\r
-#define DBG(x...) printk(x)\r
+#if 1\r
+#define DBG printk\r
#else\r
#define DBG(x...)\r
#endif\r
"DW SPI: Status keeps busy for 1000us after a read/write!\n");\r
}\r
\r
+#if defined(QUICK_TRANSFER)\r
static void wait_till_tf_empty(struct rk29xx_spi *dws)\r
{\r
unsigned long end = jiffies + 1 + usecs_to_jiffies(1000);\r
dev_err(&dws->master->dev,\r
"DW SPI: Status keeps busy for 1000us after a read/write!\n");\r
}\r
+#endif\r
\r
static void flush(struct rk29xx_spi *dws)\r
{\r
return 0;\r
rk29xx_writew(dws, SPIM_TXDR, 0);\r
dws->tx += n_bytes;\r
- wait_till_not_busy(dws);\r
+ //wait_till_not_busy(dws);\r
\r
return 1;\r
}\r
if ((rk29xx_readw(dws, SPIM_SR) & SR_TF_FULL)\r
|| (dws->tx == dws->tx_end))\r
return 0;\r
- DBG(KERN_INFO "ctrl0: 0x%x\n", rk29xx_readl(dws, SPIM_CTRLR0));\r
rk29xx_writew(dws, SPIM_TXDR, *(u8 *)(dws->tx));\r
- DBG(KERN_INFO "dws->tx:%x\n", *(u8 *)(dws->tx));\r
++dws->tx;\r
- wait_till_not_busy(dws);\r
+ //wait_till_not_busy(dws);\r
\r
return 1;\r
}\r
{\r
while (!(rk29xx_readw(dws, SPIM_SR) & SR_RF_EMPT)\r
&& (dws->rx < dws->rx_end)) {\r
- printk(KERN_INFO "ctrl0: 0x%x\n", rk29xx_readl(dws, SPIM_CTRLR0));\r
*(u8 *)(dws->rx) = rk29xx_readw(dws, SPIM_RXDR) & 0xFFU;\r
- printk(KERN_INFO "dws->rx:%x\n", *(u8 *)(dws->rx));\r
++dws->rx;\r
}\r
\r
\r
rk29xx_writew(dws, SPIM_TXDR, *(u16 *)(dws->tx));\r
dws->tx += 2;\r
- wait_till_not_busy(dws);\r
+ //wait_till_not_busy(dws);\r
\r
return 1;\r
}\r
while (!(rk29xx_readw(dws, SPIM_SR) & SR_RF_EMPT)\r
&& (dws->rx < dws->rx_end)) {\r
temp = rk29xx_readw(dws, SPIM_RXDR);\r
- printk(KERN_INFO "dws->rx:%02x\n", temp);\r
*(u16 *)(dws->rx) = temp;\r
dws->rx += 2;\r
}\r
return DONE_STATE;\r
}\r
\r
-static void rk29_spi_dma_rxcb(struct rk29_dma_chan *chan, void *buf_id,\r
+static void rk29_spi_dma_rxcb(void *buf_id,\r
int size, enum rk29_dma_buffresult res)\r
{\r
struct rk29xx_spi *dws = buf_id;\r
if (res == RK29_RES_OK)\r
dws->state &= ~RXBUSY;\r
else\r
- dev_err(&dws->master->dev, "DmaAbrtRx-%d\n", size);\r
+ dev_err(&dws->master->dev, "DmaAbrtRx-%d, size: %d\n", res, size);\r
\r
/* If the other done */\r
if (!(dws->state & TXBUSY))\r
spin_unlock_irqrestore(&dws->lock, flags);\r
}\r
\r
-static void rk29_spi_dma_txcb(struct rk29_dma_chan *chan, void *buf_id,\r
+static void rk29_spi_dma_txcb(void *buf_id,\r
int size, enum rk29_dma_buffresult res)\r
{\r
struct rk29xx_spi *dws = buf_id;\r
dev_err(&dws->master->dev, "DmaAbrtTx-%d \n", size);\r
\r
/* If the other done */\r
- if (!(dws->state & RXBUSY))\r
+ if (!(dws->state & RXBUSY)) \r
complete(&dws->xfer_completion);\r
\r
spin_unlock_irqrestore(&dws->lock, flags);\r
\r
static int acquire_dma(struct rk29xx_spi *dws)\r
{ \r
- #if 1\r
- dws->dma_inited = 0;\r
- return 1;\r
- #endif\r
- \r
if (dws->dma_inited) {\r
- return 1;\r
+ return 0;\r
}\r
\r
if(rk29_dma_request(dws->rx_dmach, \r
&rk29_spi_dma_client, NULL) < 0) {\r
dev_err(&dws->master->dev, "dws->rx_dmach : %d, cannot get RxDMA\n", dws->rx_dmach);\r
- return 0;\r
+ return -1;\r
}\r
- DBG(KERN_INFO "request dma success\n");\r
-\r
- rk29_dma_set_buffdone_fn(dws->rx_dmach, rk29_spi_dma_rxcb);\r
- rk29_dma_devconfig(dws->rx_dmach, RK29_DMASRC_HW,\r
- dws->sfr_start + SPIM_RXDR);\r
\r
if (rk29_dma_request(dws->tx_dmach,\r
&rk29_spi_dma_client, NULL) < 0) {\r
dev_err(&dws->master->dev, "dws->tx_dmach : %d, cannot get TxDMA\n", dws->tx_dmach);\r
rk29_dma_free(dws->rx_dmach, &rk29_spi_dma_client);\r
- return 0;\r
+ return -1;\r
}\r
- rk29_dma_set_buffdone_fn(dws->tx_dmach, rk29_spi_dma_txcb);\r
- rk29_dma_devconfig(dws->tx_dmach, RK29_DMASRC_MEM,\r
- dws->sfr_start + SPIM_TXDR);\r
-\r
+ \r
dws->dma_inited = 1;\r
- return 1;\r
+ return 0;\r
+}\r
+\r
+static void release_dma(struct rk29xx_spi *dws)\r
+{\r
+ if(!dws && dws->dma_inited) {\r
+ rk29_dma_free(dws->rx_dmach, &rk29_spi_dma_client);\r
+ rk29_dma_free(dws->tx_dmach, &rk29_spi_dma_client);\r
+ }\r
}\r
\r
/*\r
{\r
if (!dws->cur_msg->is_dma_mapped || !dws->dma_inited\r
|| !dws->cur_chip->enable_dma)\r
- return 0;\r
+ return -1;\r
\r
- if (dws->cur_transfer->tx_dma)\r
+ if (dws->cur_transfer->tx_dma) {\r
dws->tx_dma = dws->cur_transfer->tx_dma;\r
+ if (rk29_dma_set_buffdone_fn(dws->tx_dmach, rk29_spi_dma_txcb)) {\r
+ dev_err(&dws->master->dev, "rk29_dma_set_buffdone_fn fail\n");\r
+ return -1;\r
+ }\r
+ if (rk29_dma_devconfig(dws->tx_dmach, RK29_DMASRC_MEM,\r
+ (unsigned long)dws->sfr_start + SPIM_TXDR)) {\r
+ dev_err(&dws->master->dev, "rk29_dma_devconfig fail\n");\r
+ return -1;\r
+ }\r
+ }\r
\r
- if (dws->cur_transfer->rx_dma)\r
+ if (dws->cur_transfer->rx_dma) {\r
dws->rx_dma = dws->cur_transfer->rx_dma;\r
+ if (rk29_dma_set_buffdone_fn(dws->rx_dmach, rk29_spi_dma_rxcb)) {\r
+ dev_err(&dws->master->dev, "rk29_dma_set_buffdone_fn fail\n");\r
+ return -1;\r
+ }\r
+ if (rk29_dma_devconfig(dws->rx_dmach, RK29_DMASRC_HW,\r
+ (unsigned long)dws->sfr_start + SPIM_RXDR)) {\r
+ dev_err(&dws->master->dev, "rk29_dma_devconfig fail\n");\r
+ return -1;\r
+ }\r
+ }\r
\r
- return 1;\r
+ return 0;\r
}\r
\r
/* Caller already set message->status; dma and pio irqs are blocked */\r
transfer_list);\r
\r
if (!last_transfer->cs_change)\r
- dws->cs_control(dws,msg->spi->chip_select,MRST_SPI_DEASSERT);\r
+ dws->cs_control(dws,msg->spi->chip_select, MRST_SPI_DEASSERT);\r
\r
msg->state = NULL;\r
if (msg->complete)\r
/* Must be called inside pump_transfers() */\r
static void poll_transfer(struct rk29xx_spi *dws)\r
{\r
- DBG(KERN_INFO "poll_transfer\n");\r
while (dws->write(dws)) {\r
wait_till_not_busy(dws);\r
dws->read(dws);\r
}\r
transfer_complete(dws);\r
}\r
-\r
-static void dma_transfer(struct rk29xx_spi *dws, struct spi_transfer *xfer) //int cs_change)\r
-{\r
- unsigned long val;\r
- int ms;\r
-\r
- DBG(KERN_INFO "dma_transfer\n");\r
- \r
- if (xfer->tx_buf != NULL) {\r
- dws->state |= TXBUSY;\r
- rk29_dma_config(dws->tx_dmach, 4);\r
- rk29_dma_enqueue(dws->tx_dmach, (void *)dws,\r
- xfer->tx_dma, xfer->len);\r
- rk29_dma_ctrl(dws->tx_dmach, RK29_DMAOP_START);\r
- }\r
-\r
- if (xfer->rx_buf != NULL) {\r
- dws->state |= RXBUSY;\r
- rk29_dma_config(dws->rx_dmach, 4);\r
- rk29_dma_enqueue(dws->rx_dmach, (void *)dws,\r
- xfer->rx_dma, xfer->len);\r
- rk29_dma_ctrl(dws->rx_dmach, RK29_DMAOP_START);\r
- }\r
-\r
- /* millisecs to xfer 'len' bytes @ 'cur_speed' */\r
- ms = xfer->len * 8 * 1000 / dws->cur_speed;\r
- ms += 10; /* some tolerance */\r
-\r
- val = msecs_to_jiffies(ms) + 10;\r
- val = wait_for_completion_timeout(&dws->xfer_completion, val);\r
-}\r
-\r
static void spi_chip_sel(struct rk29xx_spi *dws, u16 cs)\r
{\r
if(cs >= dws->master->num_chipselect)\r
chip = dws->cur_chip;\r
spi = message->spi; \r
if (unlikely(!chip->clk_div))\r
- chip->clk_div = clk_get_rate(dws->clock_spim) / chip->speed_hz; \r
+ //chip->clk_div = clk_get_rate(dws->clock_spim) / chip->speed_hz; \r
+ chip->clk_div = 40000000 / chip->speed_hz; \r
if (message->state == ERROR_STATE) {\r
message->status = -EIO;\r
goto early_exit;\r
chip->clk_div = clk_div;\r
}\r
}\r
+ \r
if (transfer->bits_per_word) {\r
bits = transfer->bits_per_word;\r
\r
}\r
\r
cr0 = (spi_dfs << SPI_DFS_OFFSET)\r
+ | (SPI_HALF_WORLD_OFF << SPI_HALF_WORLD_TX_OFFSET)\r
+ | (SPI_SSN_DELAY_ONE << SPI_SSN_DELAY_OFFSET)\r
| (chip->type << SPI_FRF_OFFSET)\r
| (spi->mode << SPI_MODE_OFFSET)\r
| (chip->tmode << SPI_TMOD_OFFSET);\r
\r
cr0 &= ~(0x3 << SPI_MODE_OFFSET);\r
cr0 |= (chip->tmode << SPI_TMOD_OFFSET);\r
- }\r
- /* Check if current transfer is a DMA transaction */\r
- dws->dma_mapped = map_dma_buffers(dws);\r
+ } \r
\r
/*\r
* Interrupt mode\r
if (txint_level)\r
rk29xx_writew(dws, SPIM_TXFTLR, txint_level);\r
\r
+ rk29xx_writew(dws, SPIM_CTRLR1, dws->len-1);\r
spi_enable_chip(dws, 1);\r
if (cs_change)\r
dws->prev_chip = chip;\r
- }\r
-\r
- if (dws->dma_mapped && transfer->len > DMA_MIN_BYTES)\r
- dma_transfer(dws, transfer); ///cs_change);\r
+ } \r
\r
if (chip->poll_mode)\r
poll_transfer(dws);\r
return;\r
}\r
\r
+static void dma_transfer(struct rk29xx_spi *dws) //int cs_change)\r
+{\r
+ struct spi_message *message = NULL;\r
+ struct spi_transfer *transfer = NULL;\r
+ struct spi_transfer *previous = NULL;\r
+ struct spi_device *spi = NULL;\r
+ struct chip_data *chip = NULL;\r
+ unsigned long val;\r
+ int ms;\r
+ int iRet;\r
+ u8 bits = 0;\r
+ u8 spi_dfs = 0;\r
+ u8 cs_change = 0;\r
+ u16 clk_div = 0;\r
+ u32 speed = 0;\r
+ u32 cr0 = 0;\r
+ u32 dmacr = 0;\r
+\r
+ DBG(KERN_INFO "dma_transfer\n");\r
+\r
+ if (acquire_dma(dws)) {\r
+ dev_err(&dws->master->dev, "acquire dma failed\n");\r
+ goto err_out;\r
+ }\r
+\r
+ if (map_dma_buffers(dws)) {\r
+ dev_err(&dws->master->dev, "acquire dma failed\n");\r
+ goto err_out;\r
+ }\r
+\r
+ /* Get current state information */\r
+ message = dws->cur_msg;\r
+ transfer = dws->cur_transfer;\r
+ chip = dws->cur_chip;\r
+ spi = message->spi; \r
+ if (unlikely(!chip->clk_div))\r
+ chip->clk_div = 40000000 / chip->speed_hz; \r
+ if (message->state == ERROR_STATE) {\r
+ message->status = -EIO;\r
+ goto err_out;\r
+ }\r
+\r
+ /* Handle end of message */\r
+ if (message->state == DONE_STATE) {\r
+ message->status = 0;\r
+ goto err_out;\r
+ }\r
+\r
+ /* Delay if requested at end of transfer*/\r
+ if (message->state == RUNNING_STATE) {\r
+ previous = list_entry(transfer->transfer_list.prev,\r
+ struct spi_transfer,\r
+ transfer_list);\r
+ if (previous->delay_usecs)\r
+ udelay(previous->delay_usecs);\r
+ }\r
+\r
+ dws->n_bytes = chip->n_bytes;\r
+ dws->dma_width = chip->dma_width;\r
+ dws->cs_control = chip->cs_control;\r
+\r
+ dws->rx_dma = transfer->rx_dma;\r
+ dws->tx_dma = transfer->tx_dma;\r
+ dws->tx = (void *)transfer->tx_buf;\r
+ dws->tx_end = dws->tx + transfer->len;\r
+ dws->rx = transfer->rx_buf;\r
+ dws->rx_end = dws->rx + transfer->len;\r
+ dws->write = dws->tx ? chip->write : null_writer;\r
+ dws->read = dws->rx ? chip->read : null_reader;\r
+ dws->cs_change = transfer->cs_change;\r
+ dws->len = dws->cur_transfer->len;\r
+ if (chip != dws->prev_chip)\r
+ cs_change = 1;\r
+\r
+ cr0 = chip->cr0;\r
+\r
+ /* Handle per transfer options for bpw and speed */\r
+ if (transfer->speed_hz) {\r
+ speed = chip->speed_hz;\r
+\r
+ if (transfer->speed_hz != speed) {\r
+ speed = transfer->speed_hz;\r
+ if (speed > clk_get_rate(dws->clock_spim)) {\r
+ dev_err(&dws->master->dev, "MRST SPI0: unsupported"\r
+ "freq: %dHz\n", speed);\r
+ message->status = -EIO;\r
+ goto err_out;\r
+ }\r
+\r
+ /* clk_div doesn't support odd number */\r
+ clk_div = clk_get_rate(dws->clock_spim) / speed;\r
+ clk_div = (clk_div + 1) & 0xfffe;\r
+\r
+ chip->speed_hz = speed;\r
+ chip->clk_div = clk_div;\r
+ }\r
+ }\r
+\r
+ if (transfer->bits_per_word) {\r
+ bits = transfer->bits_per_word;\r
+\r
+ switch (bits) {\r
+ case 8:\r
+ dws->n_bytes = 1;\r
+ dws->dma_width = 1;\r
+ spi_dfs = SPI_DFS_8BIT;\r
+ break;\r
+ case 16:\r
+ dws->n_bytes = 2;\r
+ dws->dma_width = 2;\r
+ spi_dfs = SPI_DFS_16BIT;\r
+ break;\r
+ default:\r
+ dev_err(&dws->master->dev, "MRST SPI0: unsupported bits:"\r
+ "%db\n", bits);\r
+ message->status = -EIO;\r
+ goto err_out;\r
+ }\r
+\r
+ cr0 = (spi_dfs << SPI_DFS_OFFSET)\r
+ | (SPI_HALF_WORLD_OFF << SPI_HALF_WORLD_TX_OFFSET)\r
+ | (SPI_SSN_DELAY_ONE << SPI_SSN_DELAY_OFFSET)\r
+ | (chip->type << SPI_FRF_OFFSET)\r
+ | (spi->mode << SPI_MODE_OFFSET)\r
+ | (chip->tmode << SPI_TMOD_OFFSET);\r
+ }\r
+ message->state = RUNNING_STATE;\r
+ \r
+ /*\r
+ * Adjust transfer mode if necessary. Requires platform dependent\r
+ * chipselect mechanism.\r
+ */\r
+ if (dws->cs_control) {\r
+ if (dws->rx && dws->tx)\r
+ chip->tmode = SPI_TMOD_TR;\r
+ else if (dws->rx)\r
+ chip->tmode = SPI_TMOD_RO;\r
+ else\r
+ chip->tmode = SPI_TMOD_TO;\r
+\r
+ cr0 &= ~(0x3 << SPI_MODE_OFFSET);\r
+ cr0 |= (chip->tmode << SPI_TMOD_OFFSET);\r
+ }\r
+\r
+ /*\r
+ * Reprogram registers only if\r
+ * 1. chip select changes\r
+ * 2. clk_div is changed\r
+ * 3. control value changes\r
+ */\r
+ if (rk29xx_readw(dws, SPIM_CTRLR0) != cr0 || cs_change || clk_div) {\r
+ spi_enable_chip(dws, 0);\r
+ if (rk29xx_readw(dws, SPIM_CTRLR0) != cr0) {\r
+ rk29xx_writew(dws, SPIM_CTRLR0, cr0);\r
+ }\r
+\r
+ spi_set_clk(dws, clk_div ? clk_div : chip->clk_div); \r
+ spi_chip_sel(dws, spi->chip_select);\r
+ /* Set the interrupt mask, for poll mode just diable all int */\r
+ spi_mask_intr(dws, 0xff);\r
+ \r
+ if (transfer->tx_buf != NULL) {\r
+ dmacr |= SPI_DMACR_TX_ENABLE;\r
+ rk29xx_writew(dws, SPIM_DMATDLR, 0);\r
+ }\r
+ if (transfer->rx_buf != NULL) {\r
+ dmacr |= SPI_DMACR_RX_ENABLE;\r
+ rk29xx_writew(dws, SPIM_DMARDLR, 0);\r
+ rk29xx_writew(dws, SPIM_CTRLR1, transfer->len-1);\r
+ }\r
+ rk29xx_writew(dws, SPIM_DMACR, dmacr);\r
+ spi_enable_chip(dws, 1);\r
+ if (cs_change)\r
+ dws->prev_chip = chip;\r
+ } \r
+\r
+ INIT_COMPLETION(dws->xfer_completion);\r
+ \r
+ if (transfer->tx_buf != NULL) {\r
+ dws->state |= TXBUSY;\r
+ if (rk29_dma_config(dws->tx_dmach, 1)) {\r
+ dev_err(&dws->master->dev, "function: %s, line: %d\n", __FUNCTION__, __LINE__);\r
+ goto err_out;\r
+ }\r
+ \r
+ iRet = rk29_dma_enqueue(dws->tx_dmach, (void *)dws,\r
+ transfer->tx_dma, transfer->len);\r
+ if (iRet) {\r
+ dev_err(&dws->master->dev, "function: %s, line: %d, iRet: %d(dws->tx_dmach: %d, transfer->tx_dma: 0x%x)\n", __FUNCTION__, __LINE__, iRet, \r
+ dws->tx_dmach, (unsigned int)transfer->tx_dma);\r
+ goto err_out;\r
+ }\r
+ \r
+ if (rk29_dma_ctrl(dws->tx_dmach, RK29_DMAOP_START)) {\r
+ dev_err(&dws->master->dev, "function: %s, line: %d\n", __FUNCTION__, __LINE__);\r
+ goto err_out;\r
+ }\r
+ }\r
+\r
+ if (transfer->rx_buf != NULL) {\r
+ dws->state |= RXBUSY;\r
+ if (rk29_dma_config(dws->rx_dmach, 1)) {\r
+ dev_err(&dws->master->dev, "function: %s, line: %d\n", __FUNCTION__, __LINE__);\r
+ goto err_out;\r
+ }\r
+ \r
+ iRet = rk29_dma_enqueue(dws->rx_dmach, (void *)dws,\r
+ transfer->rx_dma, transfer->len);\r
+ if (iRet) {\r
+ dev_err(&dws->master->dev, "function: %s, line: %d\n", __FUNCTION__, __LINE__);\r
+ goto err_out;\r
+ }\r
+ \r
+ if (rk29_dma_ctrl(dws->rx_dmach, RK29_DMAOP_START)) {\r
+ dev_err(&dws->master->dev, "function: %s, line: %d\n", __FUNCTION__, __LINE__);\r
+ goto err_out;\r
+ }\r
+ }\r
+\r
+ /* millisecs to xfer 'len' bytes @ 'cur_speed' */\r
+ ms = transfer->len * 8 * 1000 / dws->cur_chip->speed_hz;\r
+ ms += 10; \r
+\r
+ val = msecs_to_jiffies(ms) + 500;\r
+ if (!wait_for_completion_timeout(&dws->xfer_completion, val)) {\r
+ if (transfer->rx_buf != NULL && (dws->state & RXBUSY)) {\r
+ rk29_dma_ctrl(dws->rx_dmach, RK29_DMAOP_FLUSH);\r
+ dws->state &= ~RXBUSY;\r
+ dev_err(&dws->master->dev, "function: %s, line: %d\n", __FUNCTION__, __LINE__);\r
+ goto NEXT_TRANSFER;\r
+ }\r
+ if (transfer->tx_buf != NULL && (dws->state & TXBUSY)) {\r
+ rk29_dma_ctrl(dws->tx_dmach, RK29_DMAOP_FLUSH);\r
+ dws->state &= ~TXBUSY;\r
+ dev_err(&dws->master->dev, "function: %s, line: %d\n", __FUNCTION__, __LINE__);\r
+ goto NEXT_TRANSFER;\r
+ }\r
+ }\r
+\r
+ wait_till_not_busy(dws);\r
+\r
+NEXT_TRANSFER:\r
+ /* Update total byte transfered return count actual bytes read */\r
+ dws->cur_msg->actual_length += dws->len;\r
+\r
+ /* Move to next transfer */\r
+ dws->cur_msg->state = next_transfer(dws);\r
+\r
+ /* Handle end of message */\r
+ if (dws->cur_msg->state == DONE_STATE) {\r
+ dws->cur_msg->status = 0;\r
+ giveback(dws);\r
+ } else\r
+ dma_transfer(dws);\r
+ \r
+ return;\r
+\r
+err_out:\r
+ giveback(dws);\r
+ return;\r
+\r
+}\r
+\r
static void pump_messages(struct work_struct *work)\r
{\r
struct rk29xx_spi *dws =\r
\r
DBG(KERN_INFO "pump_messages\n");\r
\r
- while (!acquire_dma(dws))\r
- msleep(10);\r
-\r
/* Lock queue and check for queue work */\r
spin_lock_irqsave(&dws->lock, flags);\r
if (list_empty(&dws->queue) || dws->run == QUEUE_STOPPED) {\r
dws->prev_chip = NULL; //ÿ¸öpump messageÊ±Ç¿ÖÆ¸üÐÂcs dxj\r
\r
/* Mark as busy and launch transfers */\r
- tasklet_schedule(&dws->pump_transfers);\r
+ if(dws->cur_msg->is_dma_mapped && dws->cur_transfer->len > DMA_MIN_BYTES) {\r
+ dws->busy = 1;\r
+ spin_unlock_irqrestore(&dws->lock, flags);\r
+ dma_transfer(dws);\r
+ return;\r
+ }\r
+ else {\r
+ tasklet_schedule(&dws->pump_transfers);\r
+ }\r
\r
dws->busy = 1;\r
spin_unlock_irqrestore(&dws->lock, flags);\r
if (cs_change)\r
dws->prev_chip = chip;\r
\r
- if (dws->dma_mapped && transfer->len > DMA_MIN_BYTES) {\r
- dma_transfer(dws, transfer);\r
- }\r
- else {\r
- if (mode)\r
- return do_full_transfer(dws);\r
- else\r
- return do_half_transfer(dws);\r
- }\r
+ if (mode)\r
+ return do_full_transfer(dws);\r
+ else\r
+ return do_half_transfer(dws); \r
\r
early_exit:\r
\r
chip->tmode = 0; /* Tx & Rx */\r
/* Default SPI mode is SCPOL = 0, SCPH = 0 */\r
chip->cr0 = (spi_dfs << SPI_DFS_OFFSET)\r
+ | (SPI_HALF_WORLD_OFF << SPI_HALF_WORLD_TX_OFFSET)\r
+ | (SPI_SSN_DELAY_ONE << SPI_SSN_DELAY_OFFSET)\r
| (chip->type << SPI_FRF_OFFSET)\r
| (spi->mode << SPI_MODE_OFFSET)\r
| (chip->tmode << SPI_TMOD_OFFSET);\r
dws->run = QUEUE_STOPPED;\r
dws->busy = 0;\r
\r
+ init_completion(&dws->xfer_completion);\r
+\r
tasklet_init(&dws->pump_transfers,\r
pump_transfers, (unsigned long)dws);\r
\r
\r
ret =rk29xx_spim_cpufreq_register(dws);\r
if (ret < 0) {\r
- printk(KERN_ERR"rk29xx spim failed to init cpufreq support\n");\r
+ dev_err(&master->dev, "rk29xx spim failed to init cpufreq support\n");\r
goto err_queue_alloc;\r
}\r
DBG(KERN_INFO "rk29xx_spim: driver initialized\n");\r
rk29xx_spim_cpufreq_deregister(dws);\r
mrst_spi_debugfs_remove(dws);\r
\r
+ release_dma(dws);\r
+\r
/* Remove the queue */\r
status = destroy_queue(dws);\r
if (status != 0)\r