]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/spi/dw_spi.c
Merge branch 'for-rmk' of git://git.pengutronix.de/git/imx/linux-2.6
[net-next-2.6.git] / drivers / spi / dw_spi.c
CommitLineData
e24c7452
FT
1/*
2 * dw_spi.c - Designware SPI core controller driver (refer pxa2xx_spi.c)
3 *
4 * Copyright (c) 2009, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 */
19
20#include <linux/dma-mapping.h>
21#include <linux/interrupt.h>
22#include <linux/highmem.h>
23#include <linux/delay.h>
5a0e3ad6 24#include <linux/slab.h>
e24c7452
FT
25
26#include <linux/spi/dw_spi.h>
27#include <linux/spi/spi.h>
28
29#ifdef CONFIG_DEBUG_FS
30#include <linux/debugfs.h>
31#endif
32
33#define START_STATE ((void *)0)
34#define RUNNING_STATE ((void *)1)
35#define DONE_STATE ((void *)2)
36#define ERROR_STATE ((void *)-1)
37
38#define QUEUE_RUNNING 0
39#define QUEUE_STOPPED 1
40
41#define MRST_SPI_DEASSERT 0
42#define MRST_SPI_ASSERT 1
43
44/* Slave spi_dev related */
45struct chip_data {
46 u16 cr0;
47 u8 cs; /* chip select pin */
48 u8 n_bytes; /* current is a 1/2/4 byte op */
49 u8 tmode; /* TR/TO/RO/EEPROM */
50 u8 type; /* SPI/SSP/MicroWire */
51
52 u8 poll_mode; /* 1 means use poll mode */
53
54 u32 dma_width;
55 u32 rx_threshold;
56 u32 tx_threshold;
57 u8 enable_dma;
58 u8 bits_per_word;
59 u16 clk_div; /* baud rate divider */
60 u32 speed_hz; /* baud rate */
61 int (*write)(struct dw_spi *dws);
62 int (*read)(struct dw_spi *dws);
63 void (*cs_control)(u32 command);
64};
65
66#ifdef CONFIG_DEBUG_FS
67static int spi_show_regs_open(struct inode *inode, struct file *file)
68{
69 file->private_data = inode->i_private;
70 return 0;
71}
72
73#define SPI_REGS_BUFSIZE 1024
74static ssize_t spi_show_regs(struct file *file, char __user *user_buf,
75 size_t count, loff_t *ppos)
76{
77 struct dw_spi *dws;
78 char *buf;
79 u32 len = 0;
80 ssize_t ret;
81
82 dws = file->private_data;
83
84 buf = kzalloc(SPI_REGS_BUFSIZE, GFP_KERNEL);
85 if (!buf)
86 return 0;
87
88 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
89 "MRST SPI0 registers:\n");
90 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
91 "=================================\n");
92 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
93 "CTRL0: \t\t0x%08x\n", dw_readl(dws, ctrl0));
94 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
95 "CTRL1: \t\t0x%08x\n", dw_readl(dws, ctrl1));
96 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
97 "SSIENR: \t0x%08x\n", dw_readl(dws, ssienr));
98 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
99 "SER: \t\t0x%08x\n", dw_readl(dws, ser));
100 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
101 "BAUDR: \t\t0x%08x\n", dw_readl(dws, baudr));
102 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
103 "TXFTLR: \t0x%08x\n", dw_readl(dws, txfltr));
104 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
105 "RXFTLR: \t0x%08x\n", dw_readl(dws, rxfltr));
106 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
107 "TXFLR: \t\t0x%08x\n", dw_readl(dws, txflr));
108 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
109 "RXFLR: \t\t0x%08x\n", dw_readl(dws, rxflr));
110 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
111 "SR: \t\t0x%08x\n", dw_readl(dws, sr));
112 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
113 "IMR: \t\t0x%08x\n", dw_readl(dws, imr));
114 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
115 "ISR: \t\t0x%08x\n", dw_readl(dws, isr));
116 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
117 "DMACR: \t\t0x%08x\n", dw_readl(dws, dmacr));
118 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
119 "DMATDLR: \t0x%08x\n", dw_readl(dws, dmatdlr));
120 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
121 "DMARDLR: \t0x%08x\n", dw_readl(dws, dmardlr));
122 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
123 "=================================\n");
124
125 ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
126 kfree(buf);
127 return ret;
128}
129
130static const struct file_operations mrst_spi_regs_ops = {
131 .owner = THIS_MODULE,
132 .open = spi_show_regs_open,
133 .read = spi_show_regs,
134};
135
136static int mrst_spi_debugfs_init(struct dw_spi *dws)
137{
138 dws->debugfs = debugfs_create_dir("mrst_spi", NULL);
139 if (!dws->debugfs)
140 return -ENOMEM;
141
142 debugfs_create_file("registers", S_IFREG | S_IRUGO,
143 dws->debugfs, (void *)dws, &mrst_spi_regs_ops);
144 return 0;
145}
146
147static void mrst_spi_debugfs_remove(struct dw_spi *dws)
148{
149 if (dws->debugfs)
150 debugfs_remove_recursive(dws->debugfs);
151}
152
153#else
154static inline int mrst_spi_debugfs_init(struct dw_spi *dws)
155{
20a588fc 156 return 0;
e24c7452
FT
157}
158
159static inline void mrst_spi_debugfs_remove(struct dw_spi *dws)
160{
161}
162#endif /* CONFIG_DEBUG_FS */
163
164static void wait_till_not_busy(struct dw_spi *dws)
165{
b490e370 166 unsigned long end = jiffies + 1 + usecs_to_jiffies(1000);
e24c7452
FT
167
168 while (time_before(jiffies, end)) {
169 if (!(dw_readw(dws, sr) & SR_BUSY))
170 return;
171 }
172 dev_err(&dws->master->dev,
426c0093 173 "DW SPI: Status keeps busy for 1000us after a read/write!\n");
e24c7452
FT
174}
175
176static void flush(struct dw_spi *dws)
177{
178 while (dw_readw(dws, sr) & SR_RF_NOT_EMPT)
179 dw_readw(dws, dr);
180
181 wait_till_not_busy(dws);
182}
183
e24c7452
FT
184static int null_writer(struct dw_spi *dws)
185{
186 u8 n_bytes = dws->n_bytes;
187
188 if (!(dw_readw(dws, sr) & SR_TF_NOT_FULL)
189 || (dws->tx == dws->tx_end))
190 return 0;
191 dw_writew(dws, dr, 0);
192 dws->tx += n_bytes;
193
194 wait_till_not_busy(dws);
195 return 1;
196}
197
198static int null_reader(struct dw_spi *dws)
199{
200 u8 n_bytes = dws->n_bytes;
201
202 while ((dw_readw(dws, sr) & SR_RF_NOT_EMPT)
203 && (dws->rx < dws->rx_end)) {
204 dw_readw(dws, dr);
205 dws->rx += n_bytes;
206 }
207 wait_till_not_busy(dws);
208 return dws->rx == dws->rx_end;
209}
210
211static int u8_writer(struct dw_spi *dws)
212{
213 if (!(dw_readw(dws, sr) & SR_TF_NOT_FULL)
214 || (dws->tx == dws->tx_end))
215 return 0;
216
217 dw_writew(dws, dr, *(u8 *)(dws->tx));
218 ++dws->tx;
219
220 wait_till_not_busy(dws);
221 return 1;
222}
223
224static int u8_reader(struct dw_spi *dws)
225{
226 while ((dw_readw(dws, sr) & SR_RF_NOT_EMPT)
227 && (dws->rx < dws->rx_end)) {
228 *(u8 *)(dws->rx) = dw_readw(dws, dr);
229 ++dws->rx;
230 }
231
232 wait_till_not_busy(dws);
233 return dws->rx == dws->rx_end;
234}
235
236static int u16_writer(struct dw_spi *dws)
237{
238 if (!(dw_readw(dws, sr) & SR_TF_NOT_FULL)
239 || (dws->tx == dws->tx_end))
240 return 0;
241
242 dw_writew(dws, dr, *(u16 *)(dws->tx));
243 dws->tx += 2;
244
245 wait_till_not_busy(dws);
246 return 1;
247}
248
249static int u16_reader(struct dw_spi *dws)
250{
251 u16 temp;
252
253 while ((dw_readw(dws, sr) & SR_RF_NOT_EMPT)
254 && (dws->rx < dws->rx_end)) {
255 temp = dw_readw(dws, dr);
256 *(u16 *)(dws->rx) = temp;
257 dws->rx += 2;
258 }
259
260 wait_till_not_busy(dws);
261 return dws->rx == dws->rx_end;
262}
263
264static void *next_transfer(struct dw_spi *dws)
265{
266 struct spi_message *msg = dws->cur_msg;
267 struct spi_transfer *trans = dws->cur_transfer;
268
269 /* Move to next transfer */
270 if (trans->transfer_list.next != &msg->transfers) {
271 dws->cur_transfer =
272 list_entry(trans->transfer_list.next,
273 struct spi_transfer,
274 transfer_list);
275 return RUNNING_STATE;
276 } else
277 return DONE_STATE;
278}
279
280/*
281 * Note: first step is the protocol driver prepares
282 * a dma-capable memory, and this func just need translate
283 * the virt addr to physical
284 */
285static int map_dma_buffers(struct dw_spi *dws)
286{
287 if (!dws->cur_msg->is_dma_mapped || !dws->dma_inited
288 || !dws->cur_chip->enable_dma)
289 return 0;
290
291 if (dws->cur_transfer->tx_dma)
292 dws->tx_dma = dws->cur_transfer->tx_dma;
293
294 if (dws->cur_transfer->rx_dma)
295 dws->rx_dma = dws->cur_transfer->rx_dma;
296
297 return 1;
298}
299
300/* Caller already set message->status; dma and pio irqs are blocked */
301static void giveback(struct dw_spi *dws)
302{
303 struct spi_transfer *last_transfer;
304 unsigned long flags;
305 struct spi_message *msg;
306
307 spin_lock_irqsave(&dws->lock, flags);
308 msg = dws->cur_msg;
309 dws->cur_msg = NULL;
310 dws->cur_transfer = NULL;
311 dws->prev_chip = dws->cur_chip;
312 dws->cur_chip = NULL;
313 dws->dma_mapped = 0;
314 queue_work(dws->workqueue, &dws->pump_messages);
315 spin_unlock_irqrestore(&dws->lock, flags);
316
317 last_transfer = list_entry(msg->transfers.prev,
318 struct spi_transfer,
319 transfer_list);
320
e3e55ff5 321 if (!last_transfer->cs_change && dws->cs_control)
e24c7452
FT
322 dws->cs_control(MRST_SPI_DEASSERT);
323
324 msg->state = NULL;
325 if (msg->complete)
326 msg->complete(msg->context);
327}
328
329static void int_error_stop(struct dw_spi *dws, const char *msg)
330{
331 /* Stop and reset hw */
332 flush(dws);
333 spi_enable_chip(dws, 0);
334
335 dev_err(&dws->master->dev, "%s\n", msg);
336 dws->cur_msg->state = ERROR_STATE;
337 tasklet_schedule(&dws->pump_transfers);
338}
339
340static void transfer_complete(struct dw_spi *dws)
341{
342 /* Update total byte transfered return count actual bytes read */
343 dws->cur_msg->actual_length += dws->len;
344
345 /* Move to next transfer */
346 dws->cur_msg->state = next_transfer(dws);
347
348 /* Handle end of message */
349 if (dws->cur_msg->state == DONE_STATE) {
350 dws->cur_msg->status = 0;
351 giveback(dws);
352 } else
353 tasklet_schedule(&dws->pump_transfers);
354}
355
356static irqreturn_t interrupt_transfer(struct dw_spi *dws)
357{
358 u16 irq_status, irq_mask = 0x3f;
552e4509
FT
359 u32 int_level = dws->fifo_len / 2;
360 u32 left;
e24c7452
FT
361
362 irq_status = dw_readw(dws, isr) & irq_mask;
363 /* Error handling */
364 if (irq_status & (SPI_INT_TXOI | SPI_INT_RXOI | SPI_INT_RXUI)) {
365 dw_readw(dws, txoicr);
366 dw_readw(dws, rxoicr);
367 dw_readw(dws, rxuicr);
368 int_error_stop(dws, "interrupt_transfer: fifo overrun");
369 return IRQ_HANDLED;
370 }
371
552e4509
FT
372 if (irq_status & SPI_INT_TXEI) {
373 spi_mask_intr(dws, SPI_INT_TXEI);
374
375 left = (dws->tx_end - dws->tx) / dws->n_bytes;
376 left = (left > int_level) ? int_level : left;
377
378 while (left--)
e24c7452 379 dws->write(dws);
552e4509 380 dws->read(dws);
e24c7452 381
552e4509
FT
382 /* Re-enable the IRQ if there is still data left to tx */
383 if (dws->tx_end > dws->tx)
384 spi_umask_intr(dws, SPI_INT_TXEI);
385 else
e24c7452 386 transfer_complete(dws);
e24c7452
FT
387 }
388
e24c7452
FT
389 return IRQ_HANDLED;
390}
391
392static irqreturn_t dw_spi_irq(int irq, void *dev_id)
393{
394 struct dw_spi *dws = dev_id;
cbcc062a
YW
395 u16 irq_status, irq_mask = 0x3f;
396
397 irq_status = dw_readw(dws, isr) & irq_mask;
398 if (!irq_status)
399 return IRQ_NONE;
e24c7452
FT
400
401 if (!dws->cur_msg) {
402 spi_mask_intr(dws, SPI_INT_TXEI);
403 /* Never fail */
404 return IRQ_HANDLED;
405 }
406
407 return dws->transfer_handler(dws);
408}
409
410/* Must be called inside pump_transfers() */
411static void poll_transfer(struct dw_spi *dws)
412{
f4aec798
GS
413 while (dws->write(dws))
414 dws->read(dws);
e24c7452 415
e24c7452
FT
416 transfer_complete(dws);
417}
418
419static void dma_transfer(struct dw_spi *dws, int cs_change)
420{
421}
422
423static void pump_transfers(unsigned long data)
424{
425 struct dw_spi *dws = (struct dw_spi *)data;
426 struct spi_message *message = NULL;
427 struct spi_transfer *transfer = NULL;
428 struct spi_transfer *previous = NULL;
429 struct spi_device *spi = NULL;
430 struct chip_data *chip = NULL;
431 u8 bits = 0;
432 u8 imask = 0;
433 u8 cs_change = 0;
552e4509 434 u16 txint_level = 0;
e24c7452
FT
435 u16 clk_div = 0;
436 u32 speed = 0;
437 u32 cr0 = 0;
438
439 /* Get current state information */
440 message = dws->cur_msg;
441 transfer = dws->cur_transfer;
442 chip = dws->cur_chip;
443 spi = message->spi;
444
552e4509
FT
445 if (unlikely(!chip->clk_div))
446 chip->clk_div = dws->max_freq / chip->speed_hz;
447
e24c7452
FT
448 if (message->state == ERROR_STATE) {
449 message->status = -EIO;
450 goto early_exit;
451 }
452
453 /* Handle end of message */
454 if (message->state == DONE_STATE) {
455 message->status = 0;
456 goto early_exit;
457 }
458
459 /* Delay if requested at end of transfer*/
460 if (message->state == RUNNING_STATE) {
461 previous = list_entry(transfer->transfer_list.prev,
462 struct spi_transfer,
463 transfer_list);
464 if (previous->delay_usecs)
465 udelay(previous->delay_usecs);
466 }
467
468 dws->n_bytes = chip->n_bytes;
469 dws->dma_width = chip->dma_width;
470 dws->cs_control = chip->cs_control;
471
472 dws->rx_dma = transfer->rx_dma;
473 dws->tx_dma = transfer->tx_dma;
474 dws->tx = (void *)transfer->tx_buf;
475 dws->tx_end = dws->tx + transfer->len;
476 dws->rx = transfer->rx_buf;
477 dws->rx_end = dws->rx + transfer->len;
478 dws->write = dws->tx ? chip->write : null_writer;
479 dws->read = dws->rx ? chip->read : null_reader;
480 dws->cs_change = transfer->cs_change;
481 dws->len = dws->cur_transfer->len;
482 if (chip != dws->prev_chip)
483 cs_change = 1;
484
485 cr0 = chip->cr0;
486
487 /* Handle per transfer options for bpw and speed */
488 if (transfer->speed_hz) {
489 speed = chip->speed_hz;
490
491 if (transfer->speed_hz != speed) {
492 speed = transfer->speed_hz;
493 if (speed > dws->max_freq) {
494 printk(KERN_ERR "MRST SPI0: unsupported"
495 "freq: %dHz\n", speed);
496 message->status = -EIO;
497 goto early_exit;
498 }
499
500 /* clk_div doesn't support odd number */
501 clk_div = dws->max_freq / speed;
552e4509 502 clk_div = (clk_div + 1) & 0xfffe;
e24c7452
FT
503
504 chip->speed_hz = speed;
505 chip->clk_div = clk_div;
506 }
507 }
508 if (transfer->bits_per_word) {
509 bits = transfer->bits_per_word;
510
511 switch (bits) {
512 case 8:
513 dws->n_bytes = 1;
514 dws->dma_width = 1;
515 dws->read = (dws->read != null_reader) ?
516 u8_reader : null_reader;
517 dws->write = (dws->write != null_writer) ?
518 u8_writer : null_writer;
519 break;
520 case 16:
521 dws->n_bytes = 2;
522 dws->dma_width = 2;
523 dws->read = (dws->read != null_reader) ?
524 u16_reader : null_reader;
525 dws->write = (dws->write != null_writer) ?
526 u16_writer : null_writer;
527 break;
528 default:
529 printk(KERN_ERR "MRST SPI0: unsupported bits:"
530 "%db\n", bits);
531 message->status = -EIO;
532 goto early_exit;
533 }
534
535 cr0 = (bits - 1)
536 | (chip->type << SPI_FRF_OFFSET)
537 | (spi->mode << SPI_MODE_OFFSET)
538 | (chip->tmode << SPI_TMOD_OFFSET);
539 }
540 message->state = RUNNING_STATE;
541
052dc7c4
GS
542 /*
543 * Adjust transfer mode if necessary. Requires platform dependent
544 * chipselect mechanism.
545 */
546 if (dws->cs_control) {
547 if (dws->rx && dws->tx)
e3e55ff5 548 chip->tmode = SPI_TMOD_TR;
052dc7c4 549 else if (dws->rx)
e3e55ff5 550 chip->tmode = SPI_TMOD_RO;
052dc7c4 551 else
e3e55ff5 552 chip->tmode = SPI_TMOD_TO;
052dc7c4 553
e3e55ff5 554 cr0 &= ~SPI_TMOD_MASK;
052dc7c4
GS
555 cr0 |= (chip->tmode << SPI_TMOD_OFFSET);
556 }
557
e24c7452
FT
558 /* Check if current transfer is a DMA transaction */
559 dws->dma_mapped = map_dma_buffers(dws);
560
552e4509
FT
561 /*
562 * Interrupt mode
563 * we only need set the TXEI IRQ, as TX/RX always happen syncronizely
564 */
e24c7452 565 if (!dws->dma_mapped && !chip->poll_mode) {
552e4509
FT
566 int templen = dws->len / dws->n_bytes;
567 txint_level = dws->fifo_len / 2;
568 txint_level = (templen > txint_level) ? txint_level : templen;
569
570 imask |= SPI_INT_TXEI;
e24c7452
FT
571 dws->transfer_handler = interrupt_transfer;
572 }
573
574 /*
575 * Reprogram registers only if
576 * 1. chip select changes
577 * 2. clk_div is changed
578 * 3. control value changes
579 */
552e4509 580 if (dw_readw(dws, ctrl0) != cr0 || cs_change || clk_div || imask) {
e24c7452
FT
581 spi_enable_chip(dws, 0);
582
583 if (dw_readw(dws, ctrl0) != cr0)
584 dw_writew(dws, ctrl0, cr0);
585
552e4509
FT
586 spi_set_clk(dws, clk_div ? clk_div : chip->clk_div);
587 spi_chip_sel(dws, spi->chip_select);
588
e24c7452
FT
589 /* Set the interrupt mask, for poll mode just diable all int */
590 spi_mask_intr(dws, 0xff);
552e4509 591 if (imask)
e24c7452 592 spi_umask_intr(dws, imask);
552e4509
FT
593 if (txint_level)
594 dw_writew(dws, txfltr, txint_level);
e24c7452 595
e24c7452 596 spi_enable_chip(dws, 1);
e24c7452
FT
597 if (cs_change)
598 dws->prev_chip = chip;
599 }
600
601 if (dws->dma_mapped)
602 dma_transfer(dws, cs_change);
603
604 if (chip->poll_mode)
605 poll_transfer(dws);
606
607 return;
608
609early_exit:
610 giveback(dws);
611 return;
612}
613
614static void pump_messages(struct work_struct *work)
615{
616 struct dw_spi *dws =
617 container_of(work, struct dw_spi, pump_messages);
618 unsigned long flags;
619
620 /* Lock queue and check for queue work */
621 spin_lock_irqsave(&dws->lock, flags);
622 if (list_empty(&dws->queue) || dws->run == QUEUE_STOPPED) {
623 dws->busy = 0;
624 spin_unlock_irqrestore(&dws->lock, flags);
625 return;
626 }
627
628 /* Make sure we are not already running a message */
629 if (dws->cur_msg) {
630 spin_unlock_irqrestore(&dws->lock, flags);
631 return;
632 }
633
634 /* Extract head of queue */
635 dws->cur_msg = list_entry(dws->queue.next, struct spi_message, queue);
636 list_del_init(&dws->cur_msg->queue);
637
638 /* Initial message state*/
639 dws->cur_msg->state = START_STATE;
640 dws->cur_transfer = list_entry(dws->cur_msg->transfers.next,
641 struct spi_transfer,
642 transfer_list);
643 dws->cur_chip = spi_get_ctldata(dws->cur_msg->spi);
644
645 /* Mark as busy and launch transfers */
646 tasklet_schedule(&dws->pump_transfers);
647
648 dws->busy = 1;
649 spin_unlock_irqrestore(&dws->lock, flags);
650}
651
652/* spi_device use this to queue in their spi_msg */
653static int dw_spi_transfer(struct spi_device *spi, struct spi_message *msg)
654{
655 struct dw_spi *dws = spi_master_get_devdata(spi->master);
656 unsigned long flags;
657
658 spin_lock_irqsave(&dws->lock, flags);
659
660 if (dws->run == QUEUE_STOPPED) {
661 spin_unlock_irqrestore(&dws->lock, flags);
662 return -ESHUTDOWN;
663 }
664
665 msg->actual_length = 0;
666 msg->status = -EINPROGRESS;
667 msg->state = START_STATE;
668
669 list_add_tail(&msg->queue, &dws->queue);
670
671 if (dws->run == QUEUE_RUNNING && !dws->busy) {
672
673 if (dws->cur_transfer || dws->cur_msg)
674 queue_work(dws->workqueue,
675 &dws->pump_messages);
676 else {
677 /* If no other data transaction in air, just go */
678 spin_unlock_irqrestore(&dws->lock, flags);
679 pump_messages(&dws->pump_messages);
680 return 0;
681 }
682 }
683
684 spin_unlock_irqrestore(&dws->lock, flags);
685 return 0;
686}
687
688/* This may be called twice for each spi dev */
689static int dw_spi_setup(struct spi_device *spi)
690{
691 struct dw_spi_chip *chip_info = NULL;
692 struct chip_data *chip;
693
694 if (spi->bits_per_word != 8 && spi->bits_per_word != 16)
695 return -EINVAL;
696
697 /* Only alloc on first setup */
698 chip = spi_get_ctldata(spi);
699 if (!chip) {
700 chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
701 if (!chip)
702 return -ENOMEM;
e24c7452
FT
703 }
704
705 /*
706 * Protocol drivers may change the chip settings, so...
707 * if chip_info exists, use it
708 */
709 chip_info = spi->controller_data;
710
711 /* chip_info doesn't always exist */
712 if (chip_info) {
713 if (chip_info->cs_control)
714 chip->cs_control = chip_info->cs_control;
715
716 chip->poll_mode = chip_info->poll_mode;
717 chip->type = chip_info->type;
718
719 chip->rx_threshold = 0;
720 chip->tx_threshold = 0;
721
722 chip->enable_dma = chip_info->enable_dma;
723 }
724
725 if (spi->bits_per_word <= 8) {
726 chip->n_bytes = 1;
727 chip->dma_width = 1;
728 chip->read = u8_reader;
729 chip->write = u8_writer;
730 } else if (spi->bits_per_word <= 16) {
731 chip->n_bytes = 2;
732 chip->dma_width = 2;
733 chip->read = u16_reader;
734 chip->write = u16_writer;
735 } else {
736 /* Never take >16b case for MRST SPIC */
737 dev_err(&spi->dev, "invalid wordsize\n");
738 return -EINVAL;
739 }
740 chip->bits_per_word = spi->bits_per_word;
741
552e4509
FT
742 if (!spi->max_speed_hz) {
743 dev_err(&spi->dev, "No max speed HZ parameter\n");
744 return -EINVAL;
745 }
e24c7452 746 chip->speed_hz = spi->max_speed_hz;
e24c7452
FT
747
748 chip->tmode = 0; /* Tx & Rx */
749 /* Default SPI mode is SCPOL = 0, SCPH = 0 */
750 chip->cr0 = (chip->bits_per_word - 1)
751 | (chip->type << SPI_FRF_OFFSET)
752 | (spi->mode << SPI_MODE_OFFSET)
753 | (chip->tmode << SPI_TMOD_OFFSET);
754
755 spi_set_ctldata(spi, chip);
756 return 0;
757}
758
759static void dw_spi_cleanup(struct spi_device *spi)
760{
761 struct chip_data *chip = spi_get_ctldata(spi);
762 kfree(chip);
763}
764
99147b5c 765static int __devinit init_queue(struct dw_spi *dws)
e24c7452
FT
766{
767 INIT_LIST_HEAD(&dws->queue);
768 spin_lock_init(&dws->lock);
769
770 dws->run = QUEUE_STOPPED;
771 dws->busy = 0;
772
773 tasklet_init(&dws->pump_transfers,
774 pump_transfers, (unsigned long)dws);
775
776 INIT_WORK(&dws->pump_messages, pump_messages);
777 dws->workqueue = create_singlethread_workqueue(
778 dev_name(dws->master->dev.parent));
779 if (dws->workqueue == NULL)
780 return -EBUSY;
781
782 return 0;
783}
784
785static int start_queue(struct dw_spi *dws)
786{
787 unsigned long flags;
788
789 spin_lock_irqsave(&dws->lock, flags);
790
791 if (dws->run == QUEUE_RUNNING || dws->busy) {
792 spin_unlock_irqrestore(&dws->lock, flags);
793 return -EBUSY;
794 }
795
796 dws->run = QUEUE_RUNNING;
797 dws->cur_msg = NULL;
798 dws->cur_transfer = NULL;
799 dws->cur_chip = NULL;
800 dws->prev_chip = NULL;
801 spin_unlock_irqrestore(&dws->lock, flags);
802
803 queue_work(dws->workqueue, &dws->pump_messages);
804
805 return 0;
806}
807
808static int stop_queue(struct dw_spi *dws)
809{
810 unsigned long flags;
811 unsigned limit = 50;
812 int status = 0;
813
814 spin_lock_irqsave(&dws->lock, flags);
815 dws->run = QUEUE_STOPPED;
816 while (!list_empty(&dws->queue) && dws->busy && limit--) {
817 spin_unlock_irqrestore(&dws->lock, flags);
818 msleep(10);
819 spin_lock_irqsave(&dws->lock, flags);
820 }
821
822 if (!list_empty(&dws->queue) || dws->busy)
823 status = -EBUSY;
824 spin_unlock_irqrestore(&dws->lock, flags);
825
826 return status;
827}
828
829static int destroy_queue(struct dw_spi *dws)
830{
831 int status;
832
833 status = stop_queue(dws);
834 if (status != 0)
835 return status;
836 destroy_workqueue(dws->workqueue);
837 return 0;
838}
839
840/* Restart the controller, disable all interrupts, clean rx fifo */
841static void spi_hw_init(struct dw_spi *dws)
842{
843 spi_enable_chip(dws, 0);
844 spi_mask_intr(dws, 0xff);
845 spi_enable_chip(dws, 1);
846 flush(dws);
c587b6fa
FT
847
848 /*
849 * Try to detect the FIFO depth if not set by interface driver,
850 * the depth could be from 2 to 256 from HW spec
851 */
852 if (!dws->fifo_len) {
853 u32 fifo;
854 for (fifo = 2; fifo <= 257; fifo++) {
855 dw_writew(dws, txfltr, fifo);
856 if (fifo != dw_readw(dws, txfltr))
857 break;
858 }
859
860 dws->fifo_len = (fifo == 257) ? 0 : fifo;
861 dw_writew(dws, txfltr, 0);
862 }
e24c7452
FT
863}
864
865int __devinit dw_spi_add_host(struct dw_spi *dws)
866{
867 struct spi_master *master;
868 int ret;
869
870 BUG_ON(dws == NULL);
871
872 master = spi_alloc_master(dws->parent_dev, 0);
873 if (!master) {
874 ret = -ENOMEM;
875 goto exit;
876 }
877
878 dws->master = master;
879 dws->type = SSI_MOTO_SPI;
880 dws->prev_chip = NULL;
881 dws->dma_inited = 0;
882 dws->dma_addr = (dma_addr_t)(dws->paddr + 0x60);
883
cbcc062a 884 ret = request_irq(dws->irq, dw_spi_irq, IRQF_SHARED,
e24c7452
FT
885 "dw_spi", dws);
886 if (ret < 0) {
887 dev_err(&master->dev, "can not get IRQ\n");
888 goto err_free_master;
889 }
890
891 master->mode_bits = SPI_CPOL | SPI_CPHA;
892 master->bus_num = dws->bus_num;
893 master->num_chipselect = dws->num_cs;
894 master->cleanup = dw_spi_cleanup;
895 master->setup = dw_spi_setup;
896 master->transfer = dw_spi_transfer;
897
898 dws->dma_inited = 0;
899
900 /* Basic HW init */
901 spi_hw_init(dws);
902
903 /* Initial and start queue */
904 ret = init_queue(dws);
905 if (ret) {
906 dev_err(&master->dev, "problem initializing queue\n");
907 goto err_diable_hw;
908 }
909 ret = start_queue(dws);
910 if (ret) {
911 dev_err(&master->dev, "problem starting queue\n");
912 goto err_diable_hw;
913 }
914
915 spi_master_set_devdata(master, dws);
916 ret = spi_register_master(master);
917 if (ret) {
918 dev_err(&master->dev, "problem registering spi master\n");
919 goto err_queue_alloc;
920 }
921
922 mrst_spi_debugfs_init(dws);
923 return 0;
924
925err_queue_alloc:
926 destroy_queue(dws);
927err_diable_hw:
928 spi_enable_chip(dws, 0);
929 free_irq(dws->irq, dws);
930err_free_master:
931 spi_master_put(master);
932exit:
933 return ret;
934}
935EXPORT_SYMBOL(dw_spi_add_host);
936
937void __devexit dw_spi_remove_host(struct dw_spi *dws)
938{
939 int status = 0;
940
941 if (!dws)
942 return;
943 mrst_spi_debugfs_remove(dws);
944
945 /* Remove the queue */
946 status = destroy_queue(dws);
947 if (status != 0)
948 dev_err(&dws->master->dev, "dw_spi_remove: workqueue will not "
949 "complete, message memory not freed\n");
950
951 spi_enable_chip(dws, 0);
952 /* Disable clk */
953 spi_set_clk(dws, 0);
954 free_irq(dws->irq, dws);
955
956 /* Disconnect from the SPI framework */
957 spi_unregister_master(dws->master);
958}
8bcb4a88 959EXPORT_SYMBOL(dw_spi_remove_host);
e24c7452
FT
960
961int dw_spi_suspend_host(struct dw_spi *dws)
962{
963 int ret = 0;
964
965 ret = stop_queue(dws);
966 if (ret)
967 return ret;
968 spi_enable_chip(dws, 0);
969 spi_set_clk(dws, 0);
970 return ret;
971}
972EXPORT_SYMBOL(dw_spi_suspend_host);
973
974int dw_spi_resume_host(struct dw_spi *dws)
975{
976 int ret;
977
978 spi_hw_init(dws);
979 ret = start_queue(dws);
980 if (ret)
981 dev_err(&dws->master->dev, "fail to start queue (%d)\n", ret);
982 return ret;
983}
984EXPORT_SYMBOL(dw_spi_resume_host);
985
986MODULE_AUTHOR("Feng Tang <feng.tang@intel.com>");
987MODULE_DESCRIPTION("Driver for DesignWare SPI controller core");
988MODULE_LICENSE("GPL v2");