2 * bcm63xx_udc.c -- BCM63xx UDC high/full speed USB device controller
4 * Copyright (C) 2012 Kevin Cernekee <cernekee@gmail.com>
5 * Copyright (C) 2012 Broadcom Corporation
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
13 #include <linux/bitops.h>
14 #include <linux/bug.h>
15 #include <linux/clk.h>
16 #include <linux/compiler.h>
17 #include <linux/debugfs.h>
18 #include <linux/delay.h>
19 #include <linux/device.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/errno.h>
22 #include <linux/init.h>
23 #include <linux/interrupt.h>
24 #include <linux/ioport.h>
25 #include <linux/kconfig.h>
26 #include <linux/kernel.h>
27 #include <linux/list.h>
28 #include <linux/module.h>
29 #include <linux/moduleparam.h>
30 #include <linux/platform_device.h>
31 #include <linux/sched.h>
32 #include <linux/seq_file.h>
33 #include <linux/slab.h>
34 #include <linux/timer.h>
35 #include <linux/usb/ch9.h>
36 #include <linux/usb/gadget.h>
37 #include <linux/workqueue.h>
39 #include <bcm63xx_cpu.h>
40 #include <bcm63xx_iudma.h>
41 #include <bcm63xx_dev_usb_usbd.h>
42 #include <bcm63xx_io.h>
43 #include <bcm63xx_regs.h>
45 #define DRV_MODULE_NAME "bcm63xx_udc"
47 static const char bcm63xx_ep0name[] = "ep0";
48 static const char *const bcm63xx_ep_name[] = {
50 "ep1in-bulk", "ep2out-bulk", "ep3in-int", "ep4out-int",
53 static bool use_fullspeed;
54 module_param(use_fullspeed, bool, S_IRUGO);
55 MODULE_PARM_DESC(use_fullspeed, "true for fullspeed only");
58 * RX IRQ coalescing options:
60 * false (default) - one IRQ per DATAx packet. Slow but reliable. The
61 * driver is able to pass the "testusb" suite and recover from conditions like:
63 * 1) Device queues up a 2048-byte RX IUDMA transaction on an OUT bulk ep
64 * 2) Host sends 512 bytes of data
65 * 3) Host decides to reconfigure the device and sends SET_INTERFACE
66 * 4) Device shuts down the endpoint and cancels the RX transaction
68 * true - one IRQ per transfer, for transfers <= 2048B. Generates
69 * considerably fewer IRQs, but error recovery is less robust. Does not
70 * reliably pass "testusb".
72 * TX always uses coalescing, because we can cancel partially complete TX
73 * transfers by repeatedly flushing the FIFO. The hardware doesn't allow
76 static bool irq_coalesce;
77 module_param(irq_coalesce, bool, S_IRUGO);
78 MODULE_PARM_DESC(irq_coalesce, "take one IRQ per RX transfer");
80 #define BCM63XX_NUM_EP 5
81 #define BCM63XX_NUM_IUDMA 6
82 #define BCM63XX_NUM_FIFO_PAIRS 3
84 #define IUDMA_RESET_TIMEOUT_US 10000
86 #define IUDMA_EP0_RXCHAN 0
87 #define IUDMA_EP0_TXCHAN 1
89 #define IUDMA_MAX_FRAGMENT 2048
90 #define BCM63XX_MAX_CTRL_PKT 64
92 #define BCMEP_CTRL 0x00
93 #define BCMEP_ISOC 0x01
94 #define BCMEP_BULK 0x02
95 #define BCMEP_INTR 0x03
97 #define BCMEP_OUT 0x00
100 #define BCM63XX_SPD_FULL 1
101 #define BCM63XX_SPD_HIGH 0
103 #define IUDMA_DMAC_OFFSET 0x200
104 #define IUDMA_DMAS_OFFSET 0x400
106 enum bcm63xx_ep0_state {
109 EP0_IN_DATA_PHASE_SETUP,
110 EP0_IN_DATA_PHASE_COMPLETE,
111 EP0_OUT_DATA_PHASE_SETUP,
112 EP0_OUT_DATA_PHASE_COMPLETE,
113 EP0_OUT_STATUS_PHASE,
114 EP0_IN_FAKE_STATUS_PHASE,
118 static const char __maybe_unused bcm63xx_ep0_state_names[][32] = {
121 "IN_DATA_PHASE_SETUP",
122 "IN_DATA_PHASE_COMPLETE",
123 "OUT_DATA_PHASE_SETUP",
124 "OUT_DATA_PHASE_COMPLETE",
126 "IN_FAKE_STATUS_PHASE",
131 * struct iudma_ch_cfg - Static configuration for an IUDMA channel.
132 * @ep_num: USB endpoint number.
133 * @n_bds: Number of buffer descriptors in the ring.
134 * @ep_type: Endpoint type (control, bulk, interrupt).
135 * @dir: Direction (in, out).
136 * @n_fifo_slots: Number of FIFO entries to allocate for this channel.
137 * @max_pkt_hs: Maximum packet size in high speed mode.
138 * @max_pkt_fs: Maximum packet size in full speed mode.
140 struct iudma_ch_cfg {
150 static const struct iudma_ch_cfg iudma_defaults[] = {
152 /* This controller was designed to support a CDC/RNDIS application.
153 It may be possible to reconfigure some of the endpoints, but
154 the hardware limitations (FIFO sizing and number of DMA channels)
155 may significantly impact flexibility and/or stability. Change
156 these values at your own risk.
158 ep_num ep_type n_fifo_slots max_pkt_fs
159 idx | n_bds | dir | max_pkt_hs |
161 [0] = { -1, 4, BCMEP_CTRL, BCMEP_OUT, 32, 64, 64 },
162 [1] = { 0, 4, BCMEP_CTRL, BCMEP_OUT, 32, 64, 64 },
163 [2] = { 2, 16, BCMEP_BULK, BCMEP_OUT, 128, 512, 64 },
164 [3] = { 1, 16, BCMEP_BULK, BCMEP_IN, 128, 512, 64 },
165 [4] = { 4, 4, BCMEP_INTR, BCMEP_OUT, 32, 64, 64 },
166 [5] = { 3, 4, BCMEP_INTR, BCMEP_IN, 32, 64, 64 },
172 * struct iudma_ch - Represents the current state of a single IUDMA channel.
173 * @ch_idx: IUDMA channel index (0 to BCM63XX_NUM_IUDMA-1).
174 * @ep_num: USB endpoint number. -1 for ep0 RX.
175 * @enabled: Whether bcm63xx_ep_enable() has been called.
176 * @max_pkt: "Chunk size" on the USB interface. Based on interface speed.
177 * @is_tx: true for TX, false for RX.
178 * @bep: Pointer to the associated endpoint. NULL for ep0 RX.
179 * @udc: Reference to the device controller.
180 * @read_bd: Next buffer descriptor to reap from the hardware.
181 * @write_bd: Next BD available for a new packet.
182 * @end_bd: Points to the final BD in the ring.
183 * @n_bds_used: Number of BD entries currently occupied.
184 * @bd_ring: Base pointer to the BD ring.
185 * @bd_ring_dma: Physical (DMA) address of bd_ring.
186 * @n_bds: Total number of BDs in the ring.
188 * ep0 has two IUDMA channels (IUDMA_EP0_RXCHAN and IUDMA_EP0_TXCHAN), as it is
189 * bidirectional. The "struct usb_ep" associated with ep0 is for TX (IN)
192 * Each bulk/intr endpoint has a single IUDMA channel and a single
201 struct bcm63xx_ep *bep;
202 struct bcm63xx_udc *udc;
204 struct bcm_enet_desc *read_bd;
205 struct bcm_enet_desc *write_bd;
206 struct bcm_enet_desc *end_bd;
209 struct bcm_enet_desc *bd_ring;
210 dma_addr_t bd_ring_dma;
215 * struct bcm63xx_ep - Internal (driver) state of a single endpoint.
216 * @ep_num: USB endpoint number.
217 * @iudma: Pointer to IUDMA channel state.
218 * @ep: USB gadget layer representation of the EP.
219 * @udc: Reference to the device controller.
220 * @queue: Linked list of outstanding requests for this EP.
221 * @halted: 1 if the EP is stalled; 0 otherwise.
225 struct iudma_ch *iudma;
227 struct bcm63xx_udc *udc;
228 struct list_head queue;
233 * struct bcm63xx_req - Internal (driver) state of a single request.
234 * @queue: Links back to the EP's request list.
235 * @req: USB gadget layer representation of the request.
236 * @offset: Current byte offset into the data buffer (next byte to queue).
237 * @bd_bytes: Number of data bytes in outstanding BD entries.
238 * @iudma: IUDMA channel used for the request.
241 struct list_head queue; /* ep's requests */
242 struct usb_request req;
244 unsigned int bd_bytes;
245 struct iudma_ch *iudma;
249 * struct bcm63xx_udc - Driver/hardware private context.
250 * @lock: Spinlock to mediate access to this struct, and (most) HW regs.
251 * @dev: Generic Linux device structure.
252 * @pd: Platform data (board/port info).
253 * @usbd_clk: Clock descriptor for the USB device block.
254 * @usbh_clk: Clock descriptor for the USB host block.
255 * @gadget: USB slave device.
256 * @driver: Driver for USB slave devices.
257 * @usbd_regs: Base address of the USBD/USB20D block.
258 * @iudma_regs: Base address of the USBD's associated IUDMA block.
259 * @bep: Array of endpoints, including ep0.
260 * @iudma: Array of all IUDMA channels used by this controller.
261 * @cfg: USB configuration number, from SET_CONFIGURATION wValue.
262 * @iface: USB interface number, from SET_INTERFACE wIndex.
263 * @alt_iface: USB alt interface number, from SET_INTERFACE wValue.
264 * @ep0_ctrl_req: Request object for bcm63xx_udc-initiated ep0 transactions.
265 * @ep0_ctrl_buf: Data buffer for ep0_ctrl_req.
266 * @ep0state: Current state of the ep0 state machine.
267 * @ep0_wq: Workqueue struct used to wake up the ep0 state machine.
268 * @wedgemap: Bitmap of wedged endpoints.
269 * @ep0_req_reset: USB reset is pending.
270 * @ep0_req_set_cfg: Need to spoof a SET_CONFIGURATION packet.
271 * @ep0_req_set_iface: Need to spoof a SET_INTERFACE packet.
272 * @ep0_req_shutdown: Driver is shutting down; requesting ep0 to halt activity.
273 * @ep0_req_completed: ep0 request has completed; worker has not seen it yet.
274 * @ep0_reply: Pending reply from gadget driver.
275 * @ep0_request: Outstanding ep0 request.
276 * @debugfs_root: debugfs directory: /sys/kernel/debug/<DRV_MODULE_NAME>.
277 * @debugfs_usbd: debugfs file "usbd" for controller state.
278 * @debugfs_iudma: debugfs file "usbd" for IUDMA state.
284 struct bcm63xx_usbd_platform_data *pd;
285 struct clk *usbd_clk;
286 struct clk *usbh_clk;
288 struct usb_gadget gadget;
289 struct usb_gadget_driver *driver;
291 void __iomem *usbd_regs;
292 void __iomem *iudma_regs;
294 struct bcm63xx_ep bep[BCM63XX_NUM_EP];
295 struct iudma_ch iudma[BCM63XX_NUM_IUDMA];
301 struct bcm63xx_req ep0_ctrl_req;
305 struct work_struct ep0_wq;
307 unsigned long wedgemap;
309 unsigned ep0_req_reset:1;
310 unsigned ep0_req_set_cfg:1;
311 unsigned ep0_req_set_iface:1;
312 unsigned ep0_req_shutdown:1;
314 unsigned ep0_req_completed:1;
315 struct usb_request *ep0_reply;
316 struct usb_request *ep0_request;
318 struct dentry *debugfs_root;
319 struct dentry *debugfs_usbd;
320 struct dentry *debugfs_iudma;
323 static const struct usb_ep_ops bcm63xx_udc_ep_ops;
325 /***********************************************************************
326 * Convenience functions
327 ***********************************************************************/
329 static inline struct bcm63xx_udc *gadget_to_udc(struct usb_gadget *g)
331 return container_of(g, struct bcm63xx_udc, gadget);
334 static inline struct bcm63xx_ep *our_ep(struct usb_ep *ep)
336 return container_of(ep, struct bcm63xx_ep, ep);
339 static inline struct bcm63xx_req *our_req(struct usb_request *req)
341 return container_of(req, struct bcm63xx_req, req);
344 static inline u32 usbd_readl(struct bcm63xx_udc *udc, u32 off)
346 return bcm_readl(udc->usbd_regs + off);
349 static inline void usbd_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
351 bcm_writel(val, udc->usbd_regs + off);
354 static inline u32 usb_dma_readl(struct bcm63xx_udc *udc, u32 off)
356 return bcm_readl(udc->iudma_regs + off);
359 static inline void usb_dma_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
361 bcm_writel(val, udc->iudma_regs + off);
364 static inline u32 usb_dmac_readl(struct bcm63xx_udc *udc, u32 off, int chan)
366 return bcm_readl(udc->iudma_regs + IUDMA_DMAC_OFFSET + off +
367 (ENETDMA_CHAN_WIDTH * chan));
370 static inline void usb_dmac_writel(struct bcm63xx_udc *udc, u32 val, u32 off,
373 bcm_writel(val, udc->iudma_regs + IUDMA_DMAC_OFFSET + off +
374 (ENETDMA_CHAN_WIDTH * chan));
377 static inline u32 usb_dmas_readl(struct bcm63xx_udc *udc, u32 off, int chan)
379 return bcm_readl(udc->iudma_regs + IUDMA_DMAS_OFFSET + off +
380 (ENETDMA_CHAN_WIDTH * chan));
383 static inline void usb_dmas_writel(struct bcm63xx_udc *udc, u32 val, u32 off,
386 bcm_writel(val, udc->iudma_regs + IUDMA_DMAS_OFFSET + off +
387 (ENETDMA_CHAN_WIDTH * chan));
390 static inline void set_clocks(struct bcm63xx_udc *udc, bool is_enabled)
393 clk_enable(udc->usbh_clk);
394 clk_enable(udc->usbd_clk);
397 clk_disable(udc->usbd_clk);
398 clk_disable(udc->usbh_clk);
402 /***********************************************************************
403 * Low-level IUDMA / FIFO operations
404 ***********************************************************************/
407 * bcm63xx_ep_dma_select - Helper function to set up the init_sel signal.
408 * @udc: Reference to the device controller.
409 * @idx: Desired init_sel value.
411 * The "init_sel" signal is used as a selection index for both endpoints
412 * and IUDMA channels. Since these do not map 1:1, the use of this signal
413 * depends on the context.
415 static void bcm63xx_ep_dma_select(struct bcm63xx_udc *udc, int idx)
417 u32 val = usbd_readl(udc, USBD_CONTROL_REG);
419 val &= ~USBD_CONTROL_INIT_SEL_MASK;
420 val |= idx << USBD_CONTROL_INIT_SEL_SHIFT;
421 usbd_writel(udc, val, USBD_CONTROL_REG);
425 * bcm63xx_set_stall - Enable/disable stall on one endpoint.
426 * @udc: Reference to the device controller.
427 * @bep: Endpoint on which to operate.
428 * @is_stalled: true to enable stall, false to disable.
430 * See notes in bcm63xx_update_wedge() regarding automatic clearing of
431 * halt/stall conditions.
433 static void bcm63xx_set_stall(struct bcm63xx_udc *udc, struct bcm63xx_ep *bep,
438 val = USBD_STALL_UPDATE_MASK |
439 (is_stalled ? USBD_STALL_ENABLE_MASK : 0) |
440 (bep->ep_num << USBD_STALL_EPNUM_SHIFT);
441 usbd_writel(udc, val, USBD_STALL_REG);
445 * bcm63xx_fifo_setup - (Re)initialize FIFO boundaries and settings.
446 * @udc: Reference to the device controller.
448 * These parameters depend on the USB link speed. Settings are
449 * per-IUDMA-channel-pair.
451 static void bcm63xx_fifo_setup(struct bcm63xx_udc *udc)
453 int is_hs = udc->gadget.speed == USB_SPEED_HIGH;
454 u32 i, val, rx_fifo_slot, tx_fifo_slot;
456 /* set up FIFO boundaries and packet sizes; this is done in pairs */
457 rx_fifo_slot = tx_fifo_slot = 0;
458 for (i = 0; i < BCM63XX_NUM_IUDMA; i += 2) {
459 const struct iudma_ch_cfg *rx_cfg = &iudma_defaults[i];
460 const struct iudma_ch_cfg *tx_cfg = &iudma_defaults[i + 1];
462 bcm63xx_ep_dma_select(udc, i >> 1);
464 val = (rx_fifo_slot << USBD_RXFIFO_CONFIG_START_SHIFT) |
465 ((rx_fifo_slot + rx_cfg->n_fifo_slots - 1) <<
466 USBD_RXFIFO_CONFIG_END_SHIFT);
467 rx_fifo_slot += rx_cfg->n_fifo_slots;
468 usbd_writel(udc, val, USBD_RXFIFO_CONFIG_REG);
470 is_hs ? rx_cfg->max_pkt_hs : rx_cfg->max_pkt_fs,
471 USBD_RXFIFO_EPSIZE_REG);
473 val = (tx_fifo_slot << USBD_TXFIFO_CONFIG_START_SHIFT) |
474 ((tx_fifo_slot + tx_cfg->n_fifo_slots - 1) <<
475 USBD_TXFIFO_CONFIG_END_SHIFT);
476 tx_fifo_slot += tx_cfg->n_fifo_slots;
477 usbd_writel(udc, val, USBD_TXFIFO_CONFIG_REG);
479 is_hs ? tx_cfg->max_pkt_hs : tx_cfg->max_pkt_fs,
480 USBD_TXFIFO_EPSIZE_REG);
482 usbd_readl(udc, USBD_TXFIFO_EPSIZE_REG);
487 * bcm63xx_fifo_reset_ep - Flush a single endpoint's FIFO.
488 * @udc: Reference to the device controller.
489 * @ep_num: Endpoint number.
491 static void bcm63xx_fifo_reset_ep(struct bcm63xx_udc *udc, int ep_num)
495 bcm63xx_ep_dma_select(udc, ep_num);
497 val = usbd_readl(udc, USBD_CONTROL_REG);
498 val |= USBD_CONTROL_FIFO_RESET_MASK;
499 usbd_writel(udc, val, USBD_CONTROL_REG);
500 usbd_readl(udc, USBD_CONTROL_REG);
504 * bcm63xx_fifo_reset - Flush all hardware FIFOs.
505 * @udc: Reference to the device controller.
507 static void bcm63xx_fifo_reset(struct bcm63xx_udc *udc)
511 for (i = 0; i < BCM63XX_NUM_FIFO_PAIRS; i++)
512 bcm63xx_fifo_reset_ep(udc, i);
516 * bcm63xx_ep_init - Initial (one-time) endpoint initialization.
517 * @udc: Reference to the device controller.
519 static void bcm63xx_ep_init(struct bcm63xx_udc *udc)
523 for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
524 const struct iudma_ch_cfg *cfg = &iudma_defaults[i];
529 bcm63xx_ep_dma_select(udc, cfg->ep_num);
530 val = (cfg->ep_type << USBD_EPNUM_TYPEMAP_TYPE_SHIFT) |
531 ((i >> 1) << USBD_EPNUM_TYPEMAP_DMA_CH_SHIFT);
532 usbd_writel(udc, val, USBD_EPNUM_TYPEMAP_REG);
537 * bcm63xx_ep_setup - Configure per-endpoint settings.
538 * @udc: Reference to the device controller.
540 * This needs to be rerun if the speed/cfg/intf/altintf changes.
542 static void bcm63xx_ep_setup(struct bcm63xx_udc *udc)
546 usbd_writel(udc, USBD_CSR_SETUPADDR_DEF, USBD_CSR_SETUPADDR_REG);
548 for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
549 const struct iudma_ch_cfg *cfg = &iudma_defaults[i];
550 int max_pkt = udc->gadget.speed == USB_SPEED_HIGH ?
551 cfg->max_pkt_hs : cfg->max_pkt_fs;
552 int idx = cfg->ep_num;
554 udc->iudma[i].max_pkt = max_pkt;
558 udc->bep[idx].ep.maxpacket = max_pkt;
560 val = (idx << USBD_CSR_EP_LOG_SHIFT) |
561 (cfg->dir << USBD_CSR_EP_DIR_SHIFT) |
562 (cfg->ep_type << USBD_CSR_EP_TYPE_SHIFT) |
563 (udc->cfg << USBD_CSR_EP_CFG_SHIFT) |
564 (udc->iface << USBD_CSR_EP_IFACE_SHIFT) |
565 (udc->alt_iface << USBD_CSR_EP_ALTIFACE_SHIFT) |
566 (max_pkt << USBD_CSR_EP_MAXPKT_SHIFT);
567 usbd_writel(udc, val, USBD_CSR_EP_REG(idx));
572 * iudma_write - Queue a single IUDMA transaction.
573 * @udc: Reference to the device controller.
574 * @iudma: IUDMA channel to use.
575 * @breq: Request containing the transaction data.
577 * For RX IUDMA, this will queue a single buffer descriptor, as RX IUDMA
578 * does not honor SOP/EOP so the handling of multiple buffers is ambiguous.
579 * So iudma_write() may be called several times to fulfill a single
582 * For TX IUDMA, this can queue multiple buffer descriptors if needed.
584 static void iudma_write(struct bcm63xx_udc *udc, struct iudma_ch *iudma,
585 struct bcm63xx_req *breq)
587 int first_bd = 1, last_bd = 0, extra_zero_pkt = 0;
588 unsigned int bytes_left = breq->req.length - breq->offset;
589 const int max_bd_bytes = !irq_coalesce && !iudma->is_tx ?
590 iudma->max_pkt : IUDMA_MAX_FRAGMENT;
592 iudma->n_bds_used = 0;
596 if ((bytes_left % iudma->max_pkt == 0) && bytes_left && breq->req.zero)
600 struct bcm_enet_desc *d = iudma->write_bd;
602 unsigned int n_bytes;
604 if (d == iudma->end_bd) {
605 dmaflags |= DMADESC_WRAP_MASK;
606 iudma->write_bd = iudma->bd_ring;
612 n_bytes = min_t(int, bytes_left, max_bd_bytes);
614 dmaflags |= n_bytes << DMADESC_LENGTH_SHIFT;
616 dmaflags |= (1 << DMADESC_LENGTH_SHIFT) |
617 DMADESC_USB_ZERO_MASK;
619 dmaflags |= DMADESC_OWNER_MASK;
621 dmaflags |= DMADESC_SOP_MASK;
626 * extra_zero_pkt forces one more iteration through the loop
627 * after all data is queued up, to send the zero packet
629 if (extra_zero_pkt && !bytes_left)
632 if (!iudma->is_tx || iudma->n_bds_used == iudma->n_bds ||
633 (n_bytes == bytes_left && !extra_zero_pkt)) {
635 dmaflags |= DMADESC_EOP_MASK;
638 d->address = breq->req.dma + breq->offset;
640 d->len_stat = dmaflags;
642 breq->offset += n_bytes;
643 breq->bd_bytes += n_bytes;
644 bytes_left -= n_bytes;
647 usb_dmac_writel(udc, ENETDMAC_CHANCFG_EN_MASK,
648 ENETDMAC_CHANCFG_REG, iudma->ch_idx);
652 * iudma_read - Check for IUDMA buffer completion.
653 * @udc: Reference to the device controller.
654 * @iudma: IUDMA channel to use.
656 * This checks to see if ALL of the outstanding BDs on the DMA channel
657 * have been filled. If so, it returns the actual transfer length;
658 * otherwise it returns -EBUSY.
660 static int iudma_read(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
662 int i, actual_len = 0;
663 struct bcm_enet_desc *d = iudma->read_bd;
665 if (!iudma->n_bds_used)
668 for (i = 0; i < iudma->n_bds_used; i++) {
671 dmaflags = d->len_stat;
673 if (dmaflags & DMADESC_OWNER_MASK)
676 actual_len += (dmaflags & DMADESC_LENGTH_MASK) >>
677 DMADESC_LENGTH_SHIFT;
678 if (d == iudma->end_bd)
685 iudma->n_bds_used = 0;
690 * iudma_reset_channel - Stop DMA on a single channel.
691 * @udc: Reference to the device controller.
692 * @iudma: IUDMA channel to reset.
694 static void iudma_reset_channel(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
696 int timeout = IUDMA_RESET_TIMEOUT_US;
697 struct bcm_enet_desc *d;
698 int ch_idx = iudma->ch_idx;
701 bcm63xx_fifo_reset_ep(udc, max(0, iudma->ep_num));
703 /* stop DMA, then wait for the hardware to wrap up */
704 usb_dmac_writel(udc, 0, ENETDMAC_CHANCFG_REG, ch_idx);
706 while (usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx) &
707 ENETDMAC_CHANCFG_EN_MASK) {
710 /* repeatedly flush the FIFO data until the BD completes */
711 if (iudma->is_tx && iudma->ep_num >= 0)
712 bcm63xx_fifo_reset_ep(udc, iudma->ep_num);
715 dev_err(udc->dev, "can't reset IUDMA channel %d\n",
719 if (timeout == IUDMA_RESET_TIMEOUT_US / 2) {
720 dev_warn(udc->dev, "forcibly halting IUDMA channel %d\n",
722 usb_dmac_writel(udc, ENETDMAC_CHANCFG_BUFHALT_MASK,
723 ENETDMAC_CHANCFG_REG, ch_idx);
726 usb_dmac_writel(udc, ~0, ENETDMAC_IR_REG, ch_idx);
728 /* don't leave "live" HW-owned entries for the next guy to step on */
729 for (d = iudma->bd_ring; d <= iudma->end_bd; d++)
733 iudma->read_bd = iudma->write_bd = iudma->bd_ring;
734 iudma->n_bds_used = 0;
736 /* set up IRQs, UBUS burst size, and BD base for this channel */
737 usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
738 ENETDMAC_IRMASK_REG, ch_idx);
739 usb_dmac_writel(udc, 8, ENETDMAC_MAXBURST_REG, ch_idx);
741 usb_dmas_writel(udc, iudma->bd_ring_dma, ENETDMAS_RSTART_REG, ch_idx);
742 usb_dmas_writel(udc, 0, ENETDMAS_SRAM2_REG, ch_idx);
746 * iudma_init_channel - One-time IUDMA channel initialization.
747 * @udc: Reference to the device controller.
748 * @ch_idx: Channel to initialize.
750 static int iudma_init_channel(struct bcm63xx_udc *udc, unsigned int ch_idx)
752 struct iudma_ch *iudma = &udc->iudma[ch_idx];
753 const struct iudma_ch_cfg *cfg = &iudma_defaults[ch_idx];
754 unsigned int n_bds = cfg->n_bds;
755 struct bcm63xx_ep *bep = NULL;
757 iudma->ep_num = cfg->ep_num;
758 iudma->ch_idx = ch_idx;
759 iudma->is_tx = !!(ch_idx & 0x01);
760 if (iudma->ep_num >= 0) {
761 bep = &udc->bep[iudma->ep_num];
763 INIT_LIST_HEAD(&bep->queue);
769 /* ep0 is always active; others are controlled by the gadget driver */
770 if (iudma->ep_num <= 0)
771 iudma->enabled = true;
773 iudma->n_bds = n_bds;
774 iudma->bd_ring = dmam_alloc_coherent(udc->dev,
775 n_bds * sizeof(struct bcm_enet_desc),
776 &iudma->bd_ring_dma, GFP_KERNEL);
779 iudma->end_bd = &iudma->bd_ring[n_bds - 1];
785 * iudma_init - One-time initialization of all IUDMA channels.
786 * @udc: Reference to the device controller.
788 * Enable DMA, flush channels, and enable global IUDMA IRQs.
790 static int iudma_init(struct bcm63xx_udc *udc)
794 usb_dma_writel(udc, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
796 for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
797 rc = iudma_init_channel(udc, i);
800 iudma_reset_channel(udc, &udc->iudma[i]);
803 usb_dma_writel(udc, BIT(BCM63XX_NUM_IUDMA)-1, ENETDMA_GLB_IRQMASK_REG);
808 * iudma_uninit - Uninitialize IUDMA channels.
809 * @udc: Reference to the device controller.
811 * Kill global IUDMA IRQs, flush channels, and kill DMA.
813 static void iudma_uninit(struct bcm63xx_udc *udc)
817 usb_dma_writel(udc, 0, ENETDMA_GLB_IRQMASK_REG);
819 for (i = 0; i < BCM63XX_NUM_IUDMA; i++)
820 iudma_reset_channel(udc, &udc->iudma[i]);
822 usb_dma_writel(udc, 0, ENETDMA_CFG_REG);
825 /***********************************************************************
826 * Other low-level USBD operations
827 ***********************************************************************/
830 * bcm63xx_set_ctrl_irqs - Mask/unmask control path interrupts.
831 * @udc: Reference to the device controller.
832 * @enable_irqs: true to enable, false to disable.
834 static void bcm63xx_set_ctrl_irqs(struct bcm63xx_udc *udc, bool enable_irqs)
838 usbd_writel(udc, 0, USBD_STATUS_REG);
840 val = BIT(USBD_EVENT_IRQ_USB_RESET) |
841 BIT(USBD_EVENT_IRQ_SETUP) |
842 BIT(USBD_EVENT_IRQ_SETCFG) |
843 BIT(USBD_EVENT_IRQ_SETINTF) |
844 BIT(USBD_EVENT_IRQ_USB_LINK);
845 usbd_writel(udc, enable_irqs ? val : 0, USBD_EVENT_IRQ_MASK_REG);
846 usbd_writel(udc, val, USBD_EVENT_IRQ_STATUS_REG);
850 * bcm63xx_select_phy_mode - Select between USB device and host mode.
851 * @udc: Reference to the device controller.
852 * @is_device: true for device, false for host.
854 * This should probably be reworked to use the drivers/usb/otg
857 * By default, the AFE/pullups are disabled in device mode, until
858 * bcm63xx_select_pullup() is called.
860 static void bcm63xx_select_phy_mode(struct bcm63xx_udc *udc, bool is_device)
862 u32 val, portmask = BIT(udc->pd->port_no);
864 if (BCMCPU_IS_6328()) {
865 /* configure pinmux to sense VBUS signal */
866 val = bcm_gpio_readl(GPIO_PINMUX_OTHR_REG);
867 val &= ~GPIO_PINMUX_OTHR_6328_USB_MASK;
868 val |= is_device ? GPIO_PINMUX_OTHR_6328_USB_DEV :
869 GPIO_PINMUX_OTHR_6328_USB_HOST;
870 bcm_gpio_writel(val, GPIO_PINMUX_OTHR_REG);
873 val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_UTMI_CTL_6368_REG);
875 val |= (portmask << USBH_PRIV_UTMI_CTL_HOSTB_SHIFT);
876 val |= (portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
878 val &= ~(portmask << USBH_PRIV_UTMI_CTL_HOSTB_SHIFT);
879 val &= ~(portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
881 bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_UTMI_CTL_6368_REG);
883 val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_SWAP_6368_REG);
885 val |= USBH_PRIV_SWAP_USBD_MASK;
887 val &= ~USBH_PRIV_SWAP_USBD_MASK;
888 bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_SWAP_6368_REG);
892 * bcm63xx_select_pullup - Enable/disable the pullup on D+
893 * @udc: Reference to the device controller.
894 * @is_on: true to enable the pullup, false to disable.
896 * If the pullup is active, the host will sense a FS/HS device connected to
897 * the port. If the pullup is inactive, the host will think the USB
898 * device has been disconnected.
900 static void bcm63xx_select_pullup(struct bcm63xx_udc *udc, bool is_on)
902 u32 val, portmask = BIT(udc->pd->port_no);
904 val = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_UTMI_CTL_6368_REG);
906 val &= ~(portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
908 val |= (portmask << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT);
909 bcm_rset_writel(RSET_USBH_PRIV, val, USBH_PRIV_UTMI_CTL_6368_REG);
913 * bcm63xx_uninit_udc_hw - Shut down the hardware prior to driver removal.
914 * @udc: Reference to the device controller.
916 * This just masks the IUDMA IRQs and releases the clocks. It is assumed
917 * that bcm63xx_udc_stop() has already run, and the clocks are stopped.
919 static void bcm63xx_uninit_udc_hw(struct bcm63xx_udc *udc)
921 set_clocks(udc, true);
923 set_clocks(udc, false);
925 clk_put(udc->usbd_clk);
926 clk_put(udc->usbh_clk);
930 * bcm63xx_init_udc_hw - Initialize the controller hardware and data structures.
931 * @udc: Reference to the device controller.
933 static int bcm63xx_init_udc_hw(struct bcm63xx_udc *udc)
938 udc->ep0_ctrl_buf = devm_kzalloc(udc->dev, BCM63XX_MAX_CTRL_PKT,
940 if (!udc->ep0_ctrl_buf)
943 INIT_LIST_HEAD(&udc->gadget.ep_list);
944 for (i = 0; i < BCM63XX_NUM_EP; i++) {
945 struct bcm63xx_ep *bep = &udc->bep[i];
947 bep->ep.name = bcm63xx_ep_name[i];
949 bep->ep.ops = &bcm63xx_udc_ep_ops;
950 list_add_tail(&bep->ep.ep_list, &udc->gadget.ep_list);
952 bep->ep.maxpacket = BCM63XX_MAX_CTRL_PKT;
955 INIT_LIST_HEAD(&bep->queue);
958 udc->gadget.ep0 = &udc->bep[0].ep;
959 list_del(&udc->bep[0].ep.ep_list);
961 udc->gadget.speed = USB_SPEED_UNKNOWN;
962 udc->ep0state = EP0_SHUTDOWN;
964 udc->usbh_clk = clk_get(udc->dev, "usbh");
965 if (IS_ERR(udc->usbh_clk))
968 udc->usbd_clk = clk_get(udc->dev, "usbd");
969 if (IS_ERR(udc->usbd_clk)) {
970 clk_put(udc->usbh_clk);
974 set_clocks(udc, true);
976 val = USBD_CONTROL_AUTO_CSRS_MASK |
977 USBD_CONTROL_DONE_CSRS_MASK |
978 (irq_coalesce ? USBD_CONTROL_RXZSCFG_MASK : 0);
979 usbd_writel(udc, val, USBD_CONTROL_REG);
981 val = USBD_STRAPS_APP_SELF_PWR_MASK |
982 USBD_STRAPS_APP_RAM_IF_MASK |
983 USBD_STRAPS_APP_CSRPRGSUP_MASK |
984 USBD_STRAPS_APP_8BITPHY_MASK |
985 USBD_STRAPS_APP_RMTWKUP_MASK;
987 if (udc->gadget.max_speed == USB_SPEED_HIGH)
988 val |= (BCM63XX_SPD_HIGH << USBD_STRAPS_SPEED_SHIFT);
990 val |= (BCM63XX_SPD_FULL << USBD_STRAPS_SPEED_SHIFT);
991 usbd_writel(udc, val, USBD_STRAPS_REG);
993 bcm63xx_set_ctrl_irqs(udc, false);
995 usbd_writel(udc, 0, USBD_EVENT_IRQ_CFG_LO_REG);
997 val = USBD_EVENT_IRQ_CFG_FALLING(USBD_EVENT_IRQ_ENUM_ON) |
998 USBD_EVENT_IRQ_CFG_FALLING(USBD_EVENT_IRQ_SET_CSRS);
999 usbd_writel(udc, val, USBD_EVENT_IRQ_CFG_HI_REG);
1001 rc = iudma_init(udc);
1002 set_clocks(udc, false);
1004 bcm63xx_uninit_udc_hw(udc);
1009 /***********************************************************************
1010 * Standard EP gadget operations
1011 ***********************************************************************/
1014 * bcm63xx_ep_enable - Enable one endpoint.
1015 * @ep: Endpoint to enable.
1016 * @desc: Contains max packet, direction, etc.
1018 * Most of the endpoint parameters are fixed in this controller, so there
1019 * isn't much for this function to do.
1021 static int bcm63xx_ep_enable(struct usb_ep *ep,
1022 const struct usb_endpoint_descriptor *desc)
1024 struct bcm63xx_ep *bep = our_ep(ep);
1025 struct bcm63xx_udc *udc = bep->udc;
1026 struct iudma_ch *iudma = bep->iudma;
1027 unsigned long flags;
1029 if (!ep || !desc || ep->name == bcm63xx_ep0name)
1035 spin_lock_irqsave(&udc->lock, flags);
1036 if (iudma->enabled) {
1037 spin_unlock_irqrestore(&udc->lock, flags);
1041 iudma->enabled = true;
1042 BUG_ON(!list_empty(&bep->queue));
1044 iudma_reset_channel(udc, iudma);
1047 bcm63xx_set_stall(udc, bep, false);
1048 clear_bit(bep->ep_num, &udc->wedgemap);
1051 ep->maxpacket = usb_endpoint_maxp(desc);
1053 spin_unlock_irqrestore(&udc->lock, flags);
1058 * bcm63xx_ep_disable - Disable one endpoint.
1059 * @ep: Endpoint to disable.
1061 static int bcm63xx_ep_disable(struct usb_ep *ep)
1063 struct bcm63xx_ep *bep = our_ep(ep);
1064 struct bcm63xx_udc *udc = bep->udc;
1065 struct iudma_ch *iudma = bep->iudma;
1066 struct list_head *pos, *n;
1067 unsigned long flags;
1069 if (!ep || !ep->desc)
1072 spin_lock_irqsave(&udc->lock, flags);
1073 if (!iudma->enabled) {
1074 spin_unlock_irqrestore(&udc->lock, flags);
1077 iudma->enabled = false;
1079 iudma_reset_channel(udc, iudma);
1081 if (!list_empty(&bep->queue)) {
1082 list_for_each_safe(pos, n, &bep->queue) {
1083 struct bcm63xx_req *breq =
1084 list_entry(pos, struct bcm63xx_req, queue);
1086 usb_gadget_unmap_request(&udc->gadget, &breq->req,
1088 list_del(&breq->queue);
1089 breq->req.status = -ESHUTDOWN;
1091 spin_unlock_irqrestore(&udc->lock, flags);
1092 breq->req.complete(&iudma->bep->ep, &breq->req);
1093 spin_lock_irqsave(&udc->lock, flags);
1098 spin_unlock_irqrestore(&udc->lock, flags);
1103 * bcm63xx_udc_alloc_request - Allocate a new request.
1104 * @ep: Endpoint associated with the request.
1105 * @mem_flags: Flags to pass to kzalloc().
1107 static struct usb_request *bcm63xx_udc_alloc_request(struct usb_ep *ep,
1110 struct bcm63xx_req *breq;
1112 breq = kzalloc(sizeof(*breq), mem_flags);
1119 * bcm63xx_udc_free_request - Free a request.
1120 * @ep: Endpoint associated with the request.
1121 * @req: Request to free.
1123 static void bcm63xx_udc_free_request(struct usb_ep *ep,
1124 struct usb_request *req)
1126 struct bcm63xx_req *breq = our_req(req);
1131 * bcm63xx_udc_queue - Queue up a new request.
1132 * @ep: Endpoint associated with the request.
1133 * @req: Request to add.
1134 * @mem_flags: Unused.
1136 * If the queue is empty, start this request immediately. Otherwise, add
1139 * ep0 replies are sent through this function from the gadget driver, but
1140 * they are treated differently because they need to be handled by the ep0
1141 * state machine. (Sometimes they are replies to control requests that
1142 * were spoofed by this driver, and so they shouldn't be transmitted at all.)
1144 static int bcm63xx_udc_queue(struct usb_ep *ep, struct usb_request *req,
1147 struct bcm63xx_ep *bep = our_ep(ep);
1148 struct bcm63xx_udc *udc = bep->udc;
1149 struct bcm63xx_req *breq = our_req(req);
1150 unsigned long flags;
1153 if (unlikely(!req || !req->complete || !req->buf || !ep))
1160 if (bep == &udc->bep[0]) {
1161 /* only one reply per request, please */
1165 udc->ep0_reply = req;
1166 schedule_work(&udc->ep0_wq);
1170 spin_lock_irqsave(&udc->lock, flags);
1171 if (!bep->iudma->enabled) {
1176 rc = usb_gadget_map_request(&udc->gadget, req, bep->iudma->is_tx);
1178 list_add_tail(&breq->queue, &bep->queue);
1179 if (list_is_singular(&bep->queue))
1180 iudma_write(udc, bep->iudma, breq);
1184 spin_unlock_irqrestore(&udc->lock, flags);
1189 * bcm63xx_udc_dequeue - Remove a pending request from the queue.
1190 * @ep: Endpoint associated with the request.
1191 * @req: Request to remove.
1193 * If the request is not at the head of the queue, this is easy - just nuke
1194 * it. If the request is at the head of the queue, we'll need to stop the
1195 * DMA transaction and then queue up the successor.
1197 static int bcm63xx_udc_dequeue(struct usb_ep *ep, struct usb_request *req)
1199 struct bcm63xx_ep *bep = our_ep(ep);
1200 struct bcm63xx_udc *udc = bep->udc;
1201 struct bcm63xx_req *breq = our_req(req), *cur;
1202 unsigned long flags;
1205 spin_lock_irqsave(&udc->lock, flags);
1206 if (list_empty(&bep->queue)) {
1211 cur = list_first_entry(&bep->queue, struct bcm63xx_req, queue);
1212 usb_gadget_unmap_request(&udc->gadget, &breq->req, bep->iudma->is_tx);
1215 iudma_reset_channel(udc, bep->iudma);
1216 list_del(&breq->queue);
1218 if (!list_empty(&bep->queue)) {
1219 struct bcm63xx_req *next;
1221 next = list_first_entry(&bep->queue,
1222 struct bcm63xx_req, queue);
1223 iudma_write(udc, bep->iudma, next);
1226 list_del(&breq->queue);
1230 spin_unlock_irqrestore(&udc->lock, flags);
1232 req->status = -ESHUTDOWN;
1233 req->complete(ep, req);
1239 * bcm63xx_udc_set_halt - Enable/disable STALL flag in the hardware.
1240 * @ep: Endpoint to halt.
1241 * @value: Zero to clear halt; nonzero to set halt.
1243 * See comments in bcm63xx_update_wedge().
1245 static int bcm63xx_udc_set_halt(struct usb_ep *ep, int value)
1247 struct bcm63xx_ep *bep = our_ep(ep);
1248 struct bcm63xx_udc *udc = bep->udc;
1249 unsigned long flags;
1251 spin_lock_irqsave(&udc->lock, flags);
1252 bcm63xx_set_stall(udc, bep, !!value);
1253 bep->halted = value;
1254 spin_unlock_irqrestore(&udc->lock, flags);
1260 * bcm63xx_udc_set_wedge - Stall the endpoint until the next reset.
1261 * @ep: Endpoint to wedge.
1263 * See comments in bcm63xx_update_wedge().
1265 static int bcm63xx_udc_set_wedge(struct usb_ep *ep)
1267 struct bcm63xx_ep *bep = our_ep(ep);
1268 struct bcm63xx_udc *udc = bep->udc;
1269 unsigned long flags;
1271 spin_lock_irqsave(&udc->lock, flags);
1272 set_bit(bep->ep_num, &udc->wedgemap);
1273 bcm63xx_set_stall(udc, bep, true);
1274 spin_unlock_irqrestore(&udc->lock, flags);
1279 static const struct usb_ep_ops bcm63xx_udc_ep_ops = {
1280 .enable = bcm63xx_ep_enable,
1281 .disable = bcm63xx_ep_disable,
1283 .alloc_request = bcm63xx_udc_alloc_request,
1284 .free_request = bcm63xx_udc_free_request,
1286 .queue = bcm63xx_udc_queue,
1287 .dequeue = bcm63xx_udc_dequeue,
1289 .set_halt = bcm63xx_udc_set_halt,
1290 .set_wedge = bcm63xx_udc_set_wedge,
1293 /***********************************************************************
1295 ***********************************************************************/
1298 * bcm63xx_ep0_setup_callback - Drop spinlock to invoke ->setup callback.
1299 * @udc: Reference to the device controller.
1300 * @ctrl: 8-byte SETUP request.
1302 static int bcm63xx_ep0_setup_callback(struct bcm63xx_udc *udc,
1303 struct usb_ctrlrequest *ctrl)
1307 spin_unlock_irq(&udc->lock);
1308 rc = udc->driver->setup(&udc->gadget, ctrl);
1309 spin_lock_irq(&udc->lock);
1314 * bcm63xx_ep0_spoof_set_cfg - Synthesize a SET_CONFIGURATION request.
1315 * @udc: Reference to the device controller.
1317 * Many standard requests are handled automatically in the hardware, but
1318 * we still need to pass them to the gadget driver so that it can
1319 * reconfigure the interfaces/endpoints if necessary.
1321 * Unfortunately we are not able to send a STALL response if the host
1322 * requests an invalid configuration. If this happens, we'll have to be
1323 * content with printing a warning.
1325 static int bcm63xx_ep0_spoof_set_cfg(struct bcm63xx_udc *udc)
1327 struct usb_ctrlrequest ctrl;
1330 ctrl.bRequestType = USB_DIR_OUT | USB_RECIP_DEVICE;
1331 ctrl.bRequest = USB_REQ_SET_CONFIGURATION;
1332 ctrl.wValue = cpu_to_le16(udc->cfg);
1336 rc = bcm63xx_ep0_setup_callback(udc, &ctrl);
1338 dev_warn_ratelimited(udc->dev,
1339 "hardware auto-acked bad SET_CONFIGURATION(%d) request\n",
1346 * bcm63xx_ep0_spoof_set_iface - Synthesize a SET_INTERFACE request.
1347 * @udc: Reference to the device controller.
1349 static int bcm63xx_ep0_spoof_set_iface(struct bcm63xx_udc *udc)
1351 struct usb_ctrlrequest ctrl;
1354 ctrl.bRequestType = USB_DIR_OUT | USB_RECIP_INTERFACE;
1355 ctrl.bRequest = USB_REQ_SET_INTERFACE;
1356 ctrl.wValue = cpu_to_le16(udc->alt_iface);
1357 ctrl.wIndex = cpu_to_le16(udc->iface);
1360 rc = bcm63xx_ep0_setup_callback(udc, &ctrl);
1362 dev_warn_ratelimited(udc->dev,
1363 "hardware auto-acked bad SET_INTERFACE(%d,%d) request\n",
1364 udc->iface, udc->alt_iface);
1370 * bcm63xx_ep0_map_write - dma_map and iudma_write a single request.
1371 * @udc: Reference to the device controller.
1372 * @ch_idx: IUDMA channel number.
1373 * @req: USB gadget layer representation of the request.
1375 static void bcm63xx_ep0_map_write(struct bcm63xx_udc *udc, int ch_idx,
1376 struct usb_request *req)
1378 struct bcm63xx_req *breq = our_req(req);
1379 struct iudma_ch *iudma = &udc->iudma[ch_idx];
1381 BUG_ON(udc->ep0_request);
1382 udc->ep0_request = req;
1386 usb_gadget_map_request(&udc->gadget, req, iudma->is_tx);
1387 iudma_write(udc, iudma, breq);
1391 * bcm63xx_ep0_complete - Set completion status and "stage" the callback.
1392 * @udc: Reference to the device controller.
1393 * @req: USB gadget layer representation of the request.
1394 * @status: Status to return to the gadget driver.
1396 static void bcm63xx_ep0_complete(struct bcm63xx_udc *udc,
1397 struct usb_request *req, int status)
1399 req->status = status;
1402 if (req->complete) {
1403 spin_unlock_irq(&udc->lock);
1404 req->complete(&udc->bep[0].ep, req);
1405 spin_lock_irq(&udc->lock);
1410 * bcm63xx_ep0_nuke_reply - Abort request from the gadget driver due to
1412 * @udc: Reference to the device controller.
1413 * @is_tx: Nonzero for TX (IN), zero for RX (OUT).
1415 static void bcm63xx_ep0_nuke_reply(struct bcm63xx_udc *udc, int is_tx)
1417 struct usb_request *req = udc->ep0_reply;
1419 udc->ep0_reply = NULL;
1420 usb_gadget_unmap_request(&udc->gadget, req, is_tx);
1421 if (udc->ep0_request == req) {
1422 udc->ep0_req_completed = 0;
1423 udc->ep0_request = NULL;
1425 bcm63xx_ep0_complete(udc, req, -ESHUTDOWN);
1429 * bcm63xx_ep0_read_complete - Close out the pending ep0 request; return
1431 * @udc: Reference to the device controller.
1433 static int bcm63xx_ep0_read_complete(struct bcm63xx_udc *udc)
1435 struct usb_request *req = udc->ep0_request;
1437 udc->ep0_req_completed = 0;
1438 udc->ep0_request = NULL;
1444 * bcm63xx_ep0_internal_request - Helper function to submit an ep0 request.
1445 * @udc: Reference to the device controller.
1446 * @ch_idx: IUDMA channel number.
1447 * @length: Number of bytes to TX/RX.
1449 * Used for simple transfers performed by the ep0 worker. This will always
1450 * use ep0_ctrl_req / ep0_ctrl_buf.
1452 static void bcm63xx_ep0_internal_request(struct bcm63xx_udc *udc, int ch_idx,
1455 struct usb_request *req = &udc->ep0_ctrl_req.req;
1457 req->buf = udc->ep0_ctrl_buf;
1458 req->length = length;
1459 req->complete = NULL;
1461 bcm63xx_ep0_map_write(udc, ch_idx, req);
1465 * bcm63xx_ep0_do_setup - Parse new SETUP packet and decide how to handle it.
1466 * @udc: Reference to the device controller.
1468 * EP0_IDLE probably shouldn't ever happen. EP0_REQUEUE means we're ready
1469 * for the next packet. Anything else means the transaction requires multiple
1470 * stages of handling.
1472 static enum bcm63xx_ep0_state bcm63xx_ep0_do_setup(struct bcm63xx_udc *udc)
1475 struct usb_ctrlrequest *ctrl = (void *)udc->ep0_ctrl_buf;
1477 rc = bcm63xx_ep0_read_complete(udc);
1480 dev_err(udc->dev, "missing SETUP packet\n");
1485 * Handle 0-byte IN STATUS acknowledgement. The hardware doesn't
1486 * ALWAYS deliver these 100% of the time, so if we happen to see one,
1487 * just throw it away.
1492 /* Drop malformed SETUP packets */
1493 if (rc != sizeof(*ctrl)) {
1494 dev_warn_ratelimited(udc->dev,
1495 "malformed SETUP packet (%d bytes)\n", rc);
1499 /* Process new SETUP packet arriving on ep0 */
1500 rc = bcm63xx_ep0_setup_callback(udc, ctrl);
1502 bcm63xx_set_stall(udc, &udc->bep[0], true);
1508 else if (ctrl->bRequestType & USB_DIR_IN)
1509 return EP0_IN_DATA_PHASE_SETUP;
1511 return EP0_OUT_DATA_PHASE_SETUP;
1515 * bcm63xx_ep0_do_idle - Check for outstanding requests if ep0 is idle.
1516 * @udc: Reference to the device controller.
1518 * In state EP0_IDLE, the RX descriptor is either pending, or has been
1519 * filled with a SETUP packet from the host. This function handles new
1520 * SETUP packets, control IRQ events (which can generate fake SETUP packets),
1521 * and reset/shutdown events.
1523 * Returns 0 if work was done; -EAGAIN if nothing to do.
1525 static int bcm63xx_ep0_do_idle(struct bcm63xx_udc *udc)
1527 if (udc->ep0_req_reset) {
1528 udc->ep0_req_reset = 0;
1529 } else if (udc->ep0_req_set_cfg) {
1530 udc->ep0_req_set_cfg = 0;
1531 if (bcm63xx_ep0_spoof_set_cfg(udc) >= 0)
1532 udc->ep0state = EP0_IN_FAKE_STATUS_PHASE;
1533 } else if (udc->ep0_req_set_iface) {
1534 udc->ep0_req_set_iface = 0;
1535 if (bcm63xx_ep0_spoof_set_iface(udc) >= 0)
1536 udc->ep0state = EP0_IN_FAKE_STATUS_PHASE;
1537 } else if (udc->ep0_req_completed) {
1538 udc->ep0state = bcm63xx_ep0_do_setup(udc);
1539 return udc->ep0state == EP0_IDLE ? -EAGAIN : 0;
1540 } else if (udc->ep0_req_shutdown) {
1541 udc->ep0_req_shutdown = 0;
1542 udc->ep0_req_completed = 0;
1543 udc->ep0_request = NULL;
1544 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_RXCHAN]);
1545 usb_gadget_unmap_request(&udc->gadget,
1546 &udc->ep0_ctrl_req.req, 0);
1548 /* bcm63xx_udc_pullup() is waiting for this */
1550 udc->ep0state = EP0_SHUTDOWN;
1551 } else if (udc->ep0_reply) {
1553 * This could happen if a USB RESET shows up during an ep0
1554 * transaction (especially if a laggy driver like gadgetfs
1557 dev_warn(udc->dev, "nuking unexpected reply\n");
1558 bcm63xx_ep0_nuke_reply(udc, 0);
1567 * bcm63xx_ep0_one_round - Handle the current ep0 state.
1568 * @udc: Reference to the device controller.
1570 * Returns 0 if work was done; -EAGAIN if nothing to do.
1572 static int bcm63xx_ep0_one_round(struct bcm63xx_udc *udc)
1574 enum bcm63xx_ep0_state ep0state = udc->ep0state;
1575 bool shutdown = udc->ep0_req_reset || udc->ep0_req_shutdown;
1577 switch (udc->ep0state) {
1579 /* set up descriptor to receive SETUP packet */
1580 bcm63xx_ep0_internal_request(udc, IUDMA_EP0_RXCHAN,
1581 BCM63XX_MAX_CTRL_PKT);
1582 ep0state = EP0_IDLE;
1585 return bcm63xx_ep0_do_idle(udc);
1586 case EP0_IN_DATA_PHASE_SETUP:
1588 * Normal case: TX request is in ep0_reply (queued by the
1589 * callback), or will be queued shortly. When it's here,
1590 * send it to the HW and go to EP0_IN_DATA_PHASE_COMPLETE.
1592 * Shutdown case: Stop waiting for the reply. Just
1593 * REQUEUE->IDLE. The gadget driver is NOT expected to
1594 * queue anything else now.
1596 if (udc->ep0_reply) {
1597 bcm63xx_ep0_map_write(udc, IUDMA_EP0_TXCHAN,
1599 ep0state = EP0_IN_DATA_PHASE_COMPLETE;
1600 } else if (shutdown) {
1601 ep0state = EP0_REQUEUE;
1604 case EP0_IN_DATA_PHASE_COMPLETE: {
1606 * Normal case: TX packet (ep0_reply) is in flight; wait for
1607 * it to finish, then go back to REQUEUE->IDLE.
1609 * Shutdown case: Reset the TX channel, send -ESHUTDOWN
1610 * completion to the gadget driver, then REQUEUE->IDLE.
1612 if (udc->ep0_req_completed) {
1613 udc->ep0_reply = NULL;
1614 bcm63xx_ep0_read_complete(udc);
1616 * the "ack" sometimes gets eaten (see
1617 * bcm63xx_ep0_do_idle)
1619 ep0state = EP0_REQUEUE;
1620 } else if (shutdown) {
1621 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_TXCHAN]);
1622 bcm63xx_ep0_nuke_reply(udc, 1);
1623 ep0state = EP0_REQUEUE;
1627 case EP0_OUT_DATA_PHASE_SETUP:
1628 /* Similar behavior to EP0_IN_DATA_PHASE_SETUP */
1629 if (udc->ep0_reply) {
1630 bcm63xx_ep0_map_write(udc, IUDMA_EP0_RXCHAN,
1632 ep0state = EP0_OUT_DATA_PHASE_COMPLETE;
1633 } else if (shutdown) {
1634 ep0state = EP0_REQUEUE;
1637 case EP0_OUT_DATA_PHASE_COMPLETE: {
1638 /* Similar behavior to EP0_IN_DATA_PHASE_COMPLETE */
1639 if (udc->ep0_req_completed) {
1640 udc->ep0_reply = NULL;
1641 bcm63xx_ep0_read_complete(udc);
1643 /* send 0-byte ack to host */
1644 bcm63xx_ep0_internal_request(udc, IUDMA_EP0_TXCHAN, 0);
1645 ep0state = EP0_OUT_STATUS_PHASE;
1646 } else if (shutdown) {
1647 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_RXCHAN]);
1648 bcm63xx_ep0_nuke_reply(udc, 0);
1649 ep0state = EP0_REQUEUE;
1653 case EP0_OUT_STATUS_PHASE:
1655 * Normal case: 0-byte OUT ack packet is in flight; wait
1656 * for it to finish, then go back to REQUEUE->IDLE.
1658 * Shutdown case: just cancel the transmission. Don't bother
1659 * calling the completion, because it originated from this
1660 * function anyway. Then go back to REQUEUE->IDLE.
1662 if (udc->ep0_req_completed) {
1663 bcm63xx_ep0_read_complete(udc);
1664 ep0state = EP0_REQUEUE;
1665 } else if (shutdown) {
1666 iudma_reset_channel(udc, &udc->iudma[IUDMA_EP0_TXCHAN]);
1667 udc->ep0_request = NULL;
1668 ep0state = EP0_REQUEUE;
1671 case EP0_IN_FAKE_STATUS_PHASE: {
1673 * Normal case: we spoofed a SETUP packet and are now
1674 * waiting for the gadget driver to send a 0-byte reply.
1675 * This doesn't actually get sent to the HW because the
1676 * HW has already sent its own reply. Once we get the
1677 * response, return to IDLE.
1679 * Shutdown case: return to IDLE immediately.
1681 * Note that the ep0 RX descriptor has remained queued
1682 * (and possibly unfilled) during this entire transaction.
1683 * The HW datapath (IUDMA) never even sees SET_CONFIGURATION
1684 * or SET_INTERFACE transactions.
1686 struct usb_request *r = udc->ep0_reply;
1690 ep0state = EP0_IDLE;
1694 bcm63xx_ep0_complete(udc, r, 0);
1695 udc->ep0_reply = NULL;
1696 ep0state = EP0_IDLE;
1703 if (udc->ep0state == ep0state)
1706 udc->ep0state = ep0state;
1711 * bcm63xx_ep0_process - ep0 worker thread / state machine.
1712 * @w: Workqueue struct.
1714 * bcm63xx_ep0_process is triggered any time an event occurs on ep0. It
1715 * is used to synchronize ep0 events and ensure that both HW and SW events
1716 * occur in a well-defined order. When the ep0 IUDMA queues are idle, it may
1717 * synthesize SET_CONFIGURATION / SET_INTERFACE requests that were consumed
1718 * by the USBD hardware.
1720 * The worker function will continue iterating around the state machine
1721 * until there is nothing left to do. Usually "nothing left to do" means
1722 * that we're waiting for a new event from the hardware.
1724 static void bcm63xx_ep0_process(struct work_struct *w)
1726 struct bcm63xx_udc *udc = container_of(w, struct bcm63xx_udc, ep0_wq);
1727 spin_lock_irq(&udc->lock);
1728 while (bcm63xx_ep0_one_round(udc) == 0)
1730 spin_unlock_irq(&udc->lock);
1733 /***********************************************************************
1734 * Standard UDC gadget operations
1735 ***********************************************************************/
1738 * bcm63xx_udc_get_frame - Read current SOF frame number from the HW.
1739 * @gadget: USB slave device.
1741 static int bcm63xx_udc_get_frame(struct usb_gadget *gadget)
1743 struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1745 return (usbd_readl(udc, USBD_STATUS_REG) &
1746 USBD_STATUS_SOF_MASK) >> USBD_STATUS_SOF_SHIFT;
1750 * bcm63xx_udc_pullup - Enable/disable pullup on D+ line.
1751 * @gadget: USB slave device.
1752 * @is_on: 0 to disable pullup, 1 to enable.
1754 * See notes in bcm63xx_select_pullup().
1756 static int bcm63xx_udc_pullup(struct usb_gadget *gadget, int is_on)
1758 struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1759 unsigned long flags;
1760 int i, rc = -EINVAL;
1762 spin_lock_irqsave(&udc->lock, flags);
1763 if (is_on && udc->ep0state == EP0_SHUTDOWN) {
1764 udc->gadget.speed = USB_SPEED_UNKNOWN;
1765 udc->ep0state = EP0_REQUEUE;
1766 bcm63xx_fifo_setup(udc);
1767 bcm63xx_fifo_reset(udc);
1768 bcm63xx_ep_setup(udc);
1770 bitmap_zero(&udc->wedgemap, BCM63XX_NUM_EP);
1771 for (i = 0; i < BCM63XX_NUM_EP; i++)
1772 bcm63xx_set_stall(udc, &udc->bep[i], false);
1774 bcm63xx_set_ctrl_irqs(udc, true);
1775 bcm63xx_select_pullup(gadget_to_udc(gadget), true);
1777 } else if (!is_on && udc->ep0state != EP0_SHUTDOWN) {
1778 bcm63xx_select_pullup(gadget_to_udc(gadget), false);
1780 udc->ep0_req_shutdown = 1;
1781 spin_unlock_irqrestore(&udc->lock, flags);
1784 schedule_work(&udc->ep0_wq);
1785 if (udc->ep0state == EP0_SHUTDOWN)
1789 bcm63xx_set_ctrl_irqs(udc, false);
1790 cancel_work_sync(&udc->ep0_wq);
1794 spin_unlock_irqrestore(&udc->lock, flags);
1799 * bcm63xx_udc_start - Start the controller.
1800 * @gadget: USB slave device.
1801 * @driver: Driver for USB slave devices.
1803 static int bcm63xx_udc_start(struct usb_gadget *gadget,
1804 struct usb_gadget_driver *driver)
1806 struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1807 unsigned long flags;
1809 if (!driver || driver->max_speed < USB_SPEED_HIGH ||
1817 spin_lock_irqsave(&udc->lock, flags);
1819 set_clocks(udc, true);
1820 bcm63xx_fifo_setup(udc);
1821 bcm63xx_ep_init(udc);
1822 bcm63xx_ep_setup(udc);
1823 bcm63xx_fifo_reset(udc);
1824 bcm63xx_select_phy_mode(udc, true);
1826 udc->driver = driver;
1827 driver->driver.bus = NULL;
1828 udc->gadget.dev.of_node = udc->dev->of_node;
1830 spin_unlock_irqrestore(&udc->lock, flags);
1836 * bcm63xx_udc_stop - Shut down the controller.
1837 * @gadget: USB slave device.
1838 * @driver: Driver for USB slave devices.
1840 static int bcm63xx_udc_stop(struct usb_gadget *gadget,
1841 struct usb_gadget_driver *driver)
1843 struct bcm63xx_udc *udc = gadget_to_udc(gadget);
1844 unsigned long flags;
1846 spin_lock_irqsave(&udc->lock, flags);
1851 * If we switch the PHY too abruptly after dropping D+, the host
1852 * will often complain:
1854 * hub 1-0:1.0: port 1 disabled by hub (EMI?), re-enabling...
1858 bcm63xx_select_phy_mode(udc, false);
1859 set_clocks(udc, false);
1861 spin_unlock_irqrestore(&udc->lock, flags);
1866 static const struct usb_gadget_ops bcm63xx_udc_ops = {
1867 .get_frame = bcm63xx_udc_get_frame,
1868 .pullup = bcm63xx_udc_pullup,
1869 .udc_start = bcm63xx_udc_start,
1870 .udc_stop = bcm63xx_udc_stop,
1873 /***********************************************************************
1875 ***********************************************************************/
1878 * bcm63xx_update_cfg_iface - Read current configuration/interface settings.
1879 * @udc: Reference to the device controller.
1881 * This controller intercepts SET_CONFIGURATION and SET_INTERFACE messages.
1882 * The driver never sees the raw control packets coming in on the ep0
1883 * IUDMA channel, but at least we get an interrupt event to tell us that
1884 * new values are waiting in the USBD_STATUS register.
1886 static void bcm63xx_update_cfg_iface(struct bcm63xx_udc *udc)
1888 u32 reg = usbd_readl(udc, USBD_STATUS_REG);
1890 udc->cfg = (reg & USBD_STATUS_CFG_MASK) >> USBD_STATUS_CFG_SHIFT;
1891 udc->iface = (reg & USBD_STATUS_INTF_MASK) >> USBD_STATUS_INTF_SHIFT;
1892 udc->alt_iface = (reg & USBD_STATUS_ALTINTF_MASK) >>
1893 USBD_STATUS_ALTINTF_SHIFT;
1894 bcm63xx_ep_setup(udc);
1898 * bcm63xx_update_link_speed - Check to see if the link speed has changed.
1899 * @udc: Reference to the device controller.
1901 * The link speed update coincides with a SETUP IRQ. Returns 1 if the
1902 * speed has changed, so that the caller can update the endpoint settings.
1904 static int bcm63xx_update_link_speed(struct bcm63xx_udc *udc)
1906 u32 reg = usbd_readl(udc, USBD_STATUS_REG);
1907 enum usb_device_speed oldspeed = udc->gadget.speed;
1909 switch ((reg & USBD_STATUS_SPD_MASK) >> USBD_STATUS_SPD_SHIFT) {
1910 case BCM63XX_SPD_HIGH:
1911 udc->gadget.speed = USB_SPEED_HIGH;
1913 case BCM63XX_SPD_FULL:
1914 udc->gadget.speed = USB_SPEED_FULL;
1917 /* this should never happen */
1918 udc->gadget.speed = USB_SPEED_UNKNOWN;
1920 "received SETUP packet with invalid link speed\n");
1924 if (udc->gadget.speed != oldspeed) {
1925 dev_info(udc->dev, "link up, %s-speed mode\n",
1926 udc->gadget.speed == USB_SPEED_HIGH ? "high" : "full");
1934 * bcm63xx_update_wedge - Iterate through wedged endpoints.
1935 * @udc: Reference to the device controller.
1936 * @new_status: true to "refresh" wedge status; false to clear it.
1938 * On a SETUP interrupt, we need to manually "refresh" the wedge status
1939 * because the controller hardware is designed to automatically clear
1940 * stalls in response to a CLEAR_FEATURE request from the host.
1942 * On a RESET interrupt, we do want to restore all wedged endpoints.
1944 static void bcm63xx_update_wedge(struct bcm63xx_udc *udc, bool new_status)
1948 for_each_set_bit(i, &udc->wedgemap, BCM63XX_NUM_EP) {
1949 bcm63xx_set_stall(udc, &udc->bep[i], new_status);
1951 clear_bit(i, &udc->wedgemap);
1956 * bcm63xx_udc_ctrl_isr - ISR for control path events (USBD).
1957 * @irq: IRQ number (unused).
1958 * @dev_id: Reference to the device controller.
1960 * This is where we handle link (VBUS) down, USB reset, speed changes,
1961 * SET_CONFIGURATION, and SET_INTERFACE events.
1963 static irqreturn_t bcm63xx_udc_ctrl_isr(int irq, void *dev_id)
1965 struct bcm63xx_udc *udc = dev_id;
1967 bool disconnected = false;
1969 stat = usbd_readl(udc, USBD_EVENT_IRQ_STATUS_REG) &
1970 usbd_readl(udc, USBD_EVENT_IRQ_MASK_REG);
1972 usbd_writel(udc, stat, USBD_EVENT_IRQ_STATUS_REG);
1974 spin_lock(&udc->lock);
1975 if (stat & BIT(USBD_EVENT_IRQ_USB_LINK)) {
1978 if (!(usbd_readl(udc, USBD_EVENTS_REG) &
1979 USBD_EVENTS_USB_LINK_MASK) &&
1980 udc->gadget.speed != USB_SPEED_UNKNOWN)
1981 dev_info(udc->dev, "link down\n");
1983 udc->gadget.speed = USB_SPEED_UNKNOWN;
1984 disconnected = true;
1986 if (stat & BIT(USBD_EVENT_IRQ_USB_RESET)) {
1987 bcm63xx_fifo_setup(udc);
1988 bcm63xx_fifo_reset(udc);
1989 bcm63xx_ep_setup(udc);
1991 bcm63xx_update_wedge(udc, false);
1993 udc->ep0_req_reset = 1;
1994 schedule_work(&udc->ep0_wq);
1995 disconnected = true;
1997 if (stat & BIT(USBD_EVENT_IRQ_SETUP)) {
1998 if (bcm63xx_update_link_speed(udc)) {
1999 bcm63xx_fifo_setup(udc);
2000 bcm63xx_ep_setup(udc);
2002 bcm63xx_update_wedge(udc, true);
2004 if (stat & BIT(USBD_EVENT_IRQ_SETCFG)) {
2005 bcm63xx_update_cfg_iface(udc);
2006 udc->ep0_req_set_cfg = 1;
2007 schedule_work(&udc->ep0_wq);
2009 if (stat & BIT(USBD_EVENT_IRQ_SETINTF)) {
2010 bcm63xx_update_cfg_iface(udc);
2011 udc->ep0_req_set_iface = 1;
2012 schedule_work(&udc->ep0_wq);
2014 spin_unlock(&udc->lock);
2016 if (disconnected && udc->driver)
2017 udc->driver->disconnect(&udc->gadget);
2023 * bcm63xx_udc_data_isr - ISR for data path events (IUDMA).
2024 * @irq: IRQ number (unused).
2025 * @dev_id: Reference to the IUDMA channel that generated the interrupt.
2027 * For the two ep0 channels, we have special handling that triggers the
2028 * ep0 worker thread. For normal bulk/intr channels, either queue up
2029 * the next buffer descriptor for the transaction (incomplete transaction),
2030 * or invoke the completion callback (complete transactions).
2032 static irqreturn_t bcm63xx_udc_data_isr(int irq, void *dev_id)
2034 struct iudma_ch *iudma = dev_id;
2035 struct bcm63xx_udc *udc = iudma->udc;
2036 struct bcm63xx_ep *bep;
2037 struct usb_request *req = NULL;
2038 struct bcm63xx_req *breq = NULL;
2040 bool is_done = false;
2042 spin_lock(&udc->lock);
2044 usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
2045 ENETDMAC_IR_REG, iudma->ch_idx);
2047 rc = iudma_read(udc, iudma);
2049 /* special handling for EP0 RX (0) and TX (1) */
2050 if (iudma->ch_idx == IUDMA_EP0_RXCHAN ||
2051 iudma->ch_idx == IUDMA_EP0_TXCHAN) {
2052 req = udc->ep0_request;
2053 breq = our_req(req);
2055 /* a single request could require multiple submissions */
2059 if (req->actual >= req->length || breq->bd_bytes > rc) {
2060 udc->ep0_req_completed = 1;
2062 schedule_work(&udc->ep0_wq);
2064 /* "actual" on a ZLP is 1 byte */
2065 req->actual = min(req->actual, req->length);
2067 /* queue up the next BD (same request) */
2068 iudma_write(udc, iudma, breq);
2071 } else if (!list_empty(&bep->queue)) {
2072 breq = list_first_entry(&bep->queue, struct bcm63xx_req, queue);
2078 if (req->actual >= req->length || breq->bd_bytes > rc) {
2080 list_del(&breq->queue);
2082 req->actual = min(req->actual, req->length);
2084 if (!list_empty(&bep->queue)) {
2085 struct bcm63xx_req *next;
2087 next = list_first_entry(&bep->queue,
2088 struct bcm63xx_req, queue);
2089 iudma_write(udc, iudma, next);
2092 iudma_write(udc, iudma, breq);
2096 spin_unlock(&udc->lock);
2099 usb_gadget_unmap_request(&udc->gadget, req, iudma->is_tx);
2101 req->complete(&bep->ep, req);
2107 /***********************************************************************
2109 ***********************************************************************/
2112 * bcm63xx_usbd_dbg_show - Show USBD controller state.
2113 * @s: seq_file to which the information will be written.
2116 * This file nominally shows up as /sys/kernel/debug/bcm63xx_udc/usbd
2118 static int bcm63xx_usbd_dbg_show(struct seq_file *s, void *p)
2120 struct bcm63xx_udc *udc = s->private;
2125 seq_printf(s, "ep0 state: %s\n",
2126 bcm63xx_ep0_state_names[udc->ep0state]);
2127 seq_printf(s, " pending requests: %s%s%s%s%s%s%s\n",
2128 udc->ep0_req_reset ? "reset " : "",
2129 udc->ep0_req_set_cfg ? "set_cfg " : "",
2130 udc->ep0_req_set_iface ? "set_iface " : "",
2131 udc->ep0_req_shutdown ? "shutdown " : "",
2132 udc->ep0_request ? "pending " : "",
2133 udc->ep0_req_completed ? "completed " : "",
2134 udc->ep0_reply ? "reply " : "");
2135 seq_printf(s, "cfg: %d; iface: %d; alt_iface: %d\n",
2136 udc->cfg, udc->iface, udc->alt_iface);
2137 seq_printf(s, "regs:\n");
2138 seq_printf(s, " control: %08x; straps: %08x; status: %08x\n",
2139 usbd_readl(udc, USBD_CONTROL_REG),
2140 usbd_readl(udc, USBD_STRAPS_REG),
2141 usbd_readl(udc, USBD_STATUS_REG));
2142 seq_printf(s, " events: %08x; stall: %08x\n",
2143 usbd_readl(udc, USBD_EVENTS_REG),
2144 usbd_readl(udc, USBD_STALL_REG));
2150 * bcm63xx_iudma_dbg_show - Show IUDMA status and descriptors.
2151 * @s: seq_file to which the information will be written.
2154 * This file nominally shows up as /sys/kernel/debug/bcm63xx_udc/iudma
2156 static int bcm63xx_iudma_dbg_show(struct seq_file *s, void *p)
2158 struct bcm63xx_udc *udc = s->private;
2165 for (ch_idx = 0; ch_idx < BCM63XX_NUM_IUDMA; ch_idx++) {
2166 struct iudma_ch *iudma = &udc->iudma[ch_idx];
2167 struct list_head *pos;
2169 seq_printf(s, "IUDMA channel %d -- ", ch_idx);
2170 switch (iudma_defaults[ch_idx].ep_type) {
2172 seq_printf(s, "control");
2175 seq_printf(s, "bulk");
2178 seq_printf(s, "interrupt");
2181 seq_printf(s, ch_idx & 0x01 ? " tx" : " rx");
2182 seq_printf(s, " [ep%d]:\n",
2183 max_t(int, iudma_defaults[ch_idx].ep_num, 0));
2184 seq_printf(s, " cfg: %08x; irqstat: %08x; irqmask: %08x; maxburst: %08x\n",
2185 usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx),
2186 usb_dmac_readl(udc, ENETDMAC_IR_REG, ch_idx),
2187 usb_dmac_readl(udc, ENETDMAC_IRMASK_REG, ch_idx),
2188 usb_dmac_readl(udc, ENETDMAC_MAXBURST_REG, ch_idx));
2190 sram2 = usb_dmas_readl(udc, ENETDMAS_SRAM2_REG, ch_idx);
2191 sram3 = usb_dmas_readl(udc, ENETDMAS_SRAM3_REG, ch_idx);
2192 seq_printf(s, " base: %08x; index: %04x_%04x; desc: %04x_%04x %08x\n",
2193 usb_dmas_readl(udc, ENETDMAS_RSTART_REG, ch_idx),
2194 sram2 >> 16, sram2 & 0xffff,
2195 sram3 >> 16, sram3 & 0xffff,
2196 usb_dmas_readl(udc, ENETDMAS_SRAM4_REG, ch_idx));
2197 seq_printf(s, " desc: %d/%d used", iudma->n_bds_used,
2202 list_for_each(pos, &iudma->bep->queue)
2204 seq_printf(s, "; %d queued\n", i);
2206 seq_printf(s, "\n");
2209 for (i = 0; i < iudma->n_bds; i++) {
2210 struct bcm_enet_desc *d = &iudma->bd_ring[i];
2212 seq_printf(s, " %03x (%02x): len_stat: %04x_%04x; pa %08x",
2214 d->len_stat >> 16, d->len_stat & 0xffff,
2216 if (d == iudma->read_bd)
2217 seq_printf(s, " <<RD");
2218 if (d == iudma->write_bd)
2219 seq_printf(s, " <<WR");
2220 seq_printf(s, "\n");
2223 seq_printf(s, "\n");
2229 static int bcm63xx_usbd_dbg_open(struct inode *inode, struct file *file)
2231 return single_open(file, bcm63xx_usbd_dbg_show, inode->i_private);
2234 static int bcm63xx_iudma_dbg_open(struct inode *inode, struct file *file)
2236 return single_open(file, bcm63xx_iudma_dbg_show, inode->i_private);
2239 static const struct file_operations usbd_dbg_fops = {
2240 .owner = THIS_MODULE,
2241 .open = bcm63xx_usbd_dbg_open,
2242 .llseek = seq_lseek,
2244 .release = single_release,
2247 static const struct file_operations iudma_dbg_fops = {
2248 .owner = THIS_MODULE,
2249 .open = bcm63xx_iudma_dbg_open,
2250 .llseek = seq_lseek,
2252 .release = single_release,
2257 * bcm63xx_udc_init_debugfs - Create debugfs entries.
2258 * @udc: Reference to the device controller.
2260 static void bcm63xx_udc_init_debugfs(struct bcm63xx_udc *udc)
2262 struct dentry *root, *usbd, *iudma;
2264 if (!IS_ENABLED(CONFIG_USB_GADGET_DEBUG_FS))
2267 root = debugfs_create_dir(udc->gadget.name, NULL);
2268 if (IS_ERR(root) || !root)
2271 usbd = debugfs_create_file("usbd", 0400, root, udc,
2275 iudma = debugfs_create_file("iudma", 0400, root, udc,
2280 udc->debugfs_root = root;
2281 udc->debugfs_usbd = usbd;
2282 udc->debugfs_iudma = iudma;
2285 debugfs_remove(usbd);
2287 debugfs_remove(root);
2289 dev_err(udc->dev, "debugfs is not available\n");
2293 * bcm63xx_udc_cleanup_debugfs - Remove debugfs entries.
2294 * @udc: Reference to the device controller.
2296 * debugfs_remove() is safe to call with a NULL argument.
2298 static void bcm63xx_udc_cleanup_debugfs(struct bcm63xx_udc *udc)
2300 debugfs_remove(udc->debugfs_iudma);
2301 debugfs_remove(udc->debugfs_usbd);
2302 debugfs_remove(udc->debugfs_root);
2303 udc->debugfs_iudma = NULL;
2304 udc->debugfs_usbd = NULL;
2305 udc->debugfs_root = NULL;
2308 /***********************************************************************
2310 ***********************************************************************/
2313 * bcm63xx_udc_probe - Initialize a new instance of the UDC.
2314 * @pdev: Platform device struct from the bcm63xx BSP code.
2316 * Note that platform data is required, because pd.port_no varies from chip
2317 * to chip and is used to switch the correct USB port to device mode.
2319 static int bcm63xx_udc_probe(struct platform_device *pdev)
2321 struct device *dev = &pdev->dev;
2322 struct bcm63xx_usbd_platform_data *pd = dev->platform_data;
2323 struct bcm63xx_udc *udc;
2324 struct resource *res;
2325 int rc = -ENOMEM, i, irq;
2327 udc = devm_kzalloc(dev, sizeof(*udc), GFP_KERNEL);
2329 dev_err(dev, "cannot allocate memory\n");
2333 platform_set_drvdata(pdev, udc);
2338 dev_err(dev, "missing platform data\n");
2342 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2343 udc->usbd_regs = devm_ioremap_resource(dev, res);
2344 if (IS_ERR(udc->usbd_regs))
2345 return PTR_ERR(udc->usbd_regs);
2347 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2348 udc->iudma_regs = devm_ioremap_resource(dev, res);
2349 if (IS_ERR(udc->iudma_regs))
2350 return PTR_ERR(udc->iudma_regs);
2352 spin_lock_init(&udc->lock);
2353 INIT_WORK(&udc->ep0_wq, bcm63xx_ep0_process);
2355 udc->gadget.ops = &bcm63xx_udc_ops;
2356 udc->gadget.name = dev_name(dev);
2358 if (!pd->use_fullspeed && !use_fullspeed)
2359 udc->gadget.max_speed = USB_SPEED_HIGH;
2361 udc->gadget.max_speed = USB_SPEED_FULL;
2363 /* request clocks, allocate buffers, and clear any pending IRQs */
2364 rc = bcm63xx_init_udc_hw(udc);
2370 /* IRQ resource #0: control interrupt (VBUS, speed, etc.) */
2371 irq = platform_get_irq(pdev, 0);
2373 dev_err(dev, "missing IRQ resource #0\n");
2376 if (devm_request_irq(dev, irq, &bcm63xx_udc_ctrl_isr, 0,
2377 dev_name(dev), udc) < 0) {
2378 dev_err(dev, "error requesting IRQ #%d\n", irq);
2382 /* IRQ resources #1-6: data interrupts for IUDMA channels 0-5 */
2383 for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
2384 irq = platform_get_irq(pdev, i + 1);
2386 dev_err(dev, "missing IRQ resource #%d\n", i + 1);
2389 if (devm_request_irq(dev, irq, &bcm63xx_udc_data_isr, 0,
2390 dev_name(dev), &udc->iudma[i]) < 0) {
2391 dev_err(dev, "error requesting IRQ #%d\n", irq);
2396 bcm63xx_udc_init_debugfs(udc);
2397 rc = usb_add_gadget_udc(dev, &udc->gadget);
2401 bcm63xx_udc_cleanup_debugfs(udc);
2403 bcm63xx_uninit_udc_hw(udc);
2408 * bcm63xx_udc_remove - Remove the device from the system.
2409 * @pdev: Platform device struct from the bcm63xx BSP code.
2411 static int bcm63xx_udc_remove(struct platform_device *pdev)
2413 struct bcm63xx_udc *udc = platform_get_drvdata(pdev);
2415 bcm63xx_udc_cleanup_debugfs(udc);
2416 usb_del_gadget_udc(&udc->gadget);
2417 BUG_ON(udc->driver);
2419 bcm63xx_uninit_udc_hw(udc);
2424 static struct platform_driver bcm63xx_udc_driver = {
2425 .probe = bcm63xx_udc_probe,
2426 .remove = bcm63xx_udc_remove,
2428 .name = DRV_MODULE_NAME,
2429 .owner = THIS_MODULE,
2432 module_platform_driver(bcm63xx_udc_driver);
2434 MODULE_DESCRIPTION("BCM63xx USB Peripheral Controller");
2435 MODULE_AUTHOR("Kevin Cernekee <cernekee@gmail.com>");
2436 MODULE_LICENSE("GPL");
2437 MODULE_ALIAS("platform:" DRV_MODULE_NAME);