usb: dwc_otg_310: support vbus controlled by both gpio and pmic
[firefly-linux-kernel-4.4.55.git] / drivers / usb / dwc_otg_310 / dwc_otg_pcd.c
1 /* ==========================================================================
2  * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_pcd.c $
3  * $Revision: #104 $
4  * $Date: 2012/12/21 $
5  * $Change: 2131568 $
6  *
7  * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
8  * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
9  * otherwise expressly agreed to in writing between Synopsys and you.
10  *
11  * The Software IS NOT an item of Licensed Software or Licensed Product under
12  * any End User Software License Agreement or Agreement for Licensed Product
13  * with Synopsys or any supplement thereto. You are permitted to use and
14  * redistribute this Software in source and binary forms, with or without
15  * modification, provided that redistributions of source code must retain this
16  * notice. You may not view, use, disclose, copy or distribute this file or
17  * any information contained herein except pursuant to this license grant from
18  * Synopsys. If you do not agree with this notice, including the disclaimer
19  * below, then you are not authorized to use the Software.
20  *
21  * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
22  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
25  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
31  * DAMAGE.
32  * ========================================================================== */
33 #ifndef DWC_HOST_ONLY
34
35 /** @file
36  * This file implements PCD Core. All code in this file is portable and doesn't
37  * use any OS specific functions.
38  * PCD Core provides Interface, defined in <code><dwc_otg_pcd_if.h></code>
39  * header file, which can be used to implement OS specific PCD interface.
40  *
41  * An important function of the PCD is managing interrupts generated
42  * by the DWC_otg controller. The implementation of the DWC_otg device
43  * mode interrupt service routines is in dwc_otg_pcd_intr.c.
44  *
45  * @todo Add Device Mode test modes (Test J mode, Test K mode, etc).
46  * @todo Does it work when the request size is greater than DEPTSIZ
47  * transfer size
48  *
49  */
50
51 #include "dwc_otg_pcd.h"
52
53 #ifdef DWC_UTE_CFI
54 #include "dwc_otg_cfi.h"
55
56 extern int init_cfi(cfiobject_t *cfiobj);
57 #endif
58
59 /**
60  * Choose endpoint from ep arrays using usb_ep structure.
61  */
62 static dwc_otg_pcd_ep_t *get_ep_from_handle(dwc_otg_pcd_t *pcd, void *handle)
63 {
64         int i;
65         if (pcd->ep0.priv == handle) {
66                 return &pcd->ep0;
67         }
68         for (i = 0; i < MAX_EPS_CHANNELS - 1; i++) {
69                 if (pcd->in_ep[i].priv == handle)
70                         return &pcd->in_ep[i];
71                 if (pcd->out_ep[i].priv == handle)
72                         return &pcd->out_ep[i];
73         }
74
75         return NULL;
76 }
77
78 /**
79  * This function completes a request.  It call's the request call back.
80  */
81 void dwc_otg_request_done(dwc_otg_pcd_ep_t *ep, dwc_otg_pcd_request_t *req,
82                           int32_t status)
83 {
84         unsigned stopped = ep->stopped;
85
86         DWC_DEBUGPL(DBG_PCDV, "%s(ep %p req %p)\n", __func__, ep, req);
87         DWC_CIRCLEQ_REMOVE_INIT(&ep->queue, req, queue_entry);
88
89         /* don't modify queue heads during completion callback */
90         ep->stopped = 1;
91         /* spin_unlock/spin_lock now done in fops->complete() */
92         ep->pcd->fops->complete(ep->pcd, ep->priv, req->priv, status,
93                                 req->actual);
94
95         if (ep->pcd->request_pending > 0) {
96                 --ep->pcd->request_pending;
97         }
98
99         ep->stopped = stopped;
100         DWC_FREE(req);
101 }
102
103 /**
104  * This function terminates all the requsts in the EP request queue.
105  */
106 void dwc_otg_request_nuke(dwc_otg_pcd_ep_t *ep)
107 {
108         dwc_otg_pcd_request_t *req;
109
110         ep->stopped = 1;
111
112         /* called with irqs blocked?? */
113         while (!DWC_CIRCLEQ_EMPTY(&ep->queue)) {
114                 req = DWC_CIRCLEQ_FIRST(&ep->queue);
115                 dwc_otg_request_done(ep, req, -DWC_E_SHUTDOWN);
116         }
117 }
118
119 void dwc_otg_pcd_start(dwc_otg_pcd_t *pcd,
120                        const struct dwc_otg_pcd_function_ops *fops)
121 {
122         pcd->fops = fops;
123 }
124
125 /**
126  * PCD Callback function for initializing the PCD when switching to
127  * device mode.
128  *
129  * @param p void pointer to the <code>dwc_otg_pcd_t</code>
130  */
131 static int32_t dwc_otg_pcd_start_cb(void *p)
132 {
133         dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *) p;
134         dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
135
136         /*
137          * Initialized the Core for Device mode.
138          */
139         if (dwc_otg_is_device_mode(core_if)) {
140                 /* dwc_otg_core_dev_init(core_if); */
141                 /* Set core_if's lock pointer to the pcd->lock */
142                 core_if->lock = pcd->lock;
143         }
144         return 1;
145 }
146
147 /** CFI-specific buffer allocation function for EP */
148 #ifdef DWC_UTE_CFI
149 uint8_t *cfiw_ep_alloc_buffer(dwc_otg_pcd_t *pcd, void *pep, dwc_dma_t *addr,
150                               size_t buflen, int flags)
151 {
152         dwc_otg_pcd_ep_t *ep;
153         ep = get_ep_from_handle(pcd, pep);
154         if (!ep) {
155                 DWC_WARN("bad ep\n");
156                 return -DWC_E_INVALID;
157         }
158
159         return pcd->cfi->ops.ep_alloc_buf(pcd->cfi, pcd, ep, addr, buflen,
160                                           flags);
161 }
162 #else
163 uint8_t *cfiw_ep_alloc_buffer(dwc_otg_pcd_t *pcd, void *pep, dwc_dma_t *addr,
164                               size_t buflen, int flags);
165 #endif
166
167 /**
168  * PCD Callback function for notifying the PCD when resuming from
169  * suspend.
170  *
171  * @param p void pointer to the <code>dwc_otg_pcd_t</code>
172  */
173 static int32_t dwc_otg_pcd_resume_cb(void *p)
174 {
175         dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *) p;
176
177         if (pcd->fops->resume) {
178                 pcd->fops->resume(pcd);
179         }
180
181         /* Stop the SRP timeout timer. */
182         if ((GET_CORE_IF(pcd)->core_params->phy_type != DWC_PHY_TYPE_PARAM_FS)
183             || (!GET_CORE_IF(pcd)->core_params->i2c_enable)) {
184                 if (GET_CORE_IF(pcd)->srp_timer_started) {
185                         GET_CORE_IF(pcd)->srp_timer_started = 0;
186                         DWC_TIMER_CANCEL(GET_CORE_IF(pcd)->srp_timer);
187                 }
188         }
189         return 1;
190 }
191
192 /**
193  * PCD Callback function for notifying the PCD device is suspended.
194  *
195  * @param p void pointer to the <code>dwc_otg_pcd_t</code>
196  */
197 static int32_t dwc_otg_pcd_suspend_cb(void *p)
198 {
199         dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *) p;
200
201         if (pcd->fops->suspend) {
202                 DWC_SPINUNLOCK(pcd->lock);
203                 pcd->fops->suspend(pcd);
204                 DWC_SPINLOCK(pcd->lock);
205         }
206
207         return 1;
208 }
209
210 /**
211  * PCD Callback function for stopping the PCD when switching to Host
212  * mode.
213  *
214  * @param p void pointer to the <code>dwc_otg_pcd_t</code>
215  */
216 static int32_t dwc_otg_pcd_stop_cb(void *p)
217 {
218         dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *) p;
219         extern void dwc_otg_pcd_stop(dwc_otg_pcd_t *_pcd);
220
221         dwc_otg_pcd_stop(pcd);
222         return 1;
223 }
224
225 /**
226  * PCD Callback structure for handling mode switching.
227  */
228 static dwc_otg_cil_callbacks_t pcd_callbacks = {
229         .start = dwc_otg_pcd_start_cb,
230         .stop = dwc_otg_pcd_stop_cb,
231         .suspend = dwc_otg_pcd_suspend_cb,
232         .resume_wakeup = dwc_otg_pcd_resume_cb,
233         .p = 0,                 /* Set at registration */
234 };
235
236 /**
237  * This function allocates a DMA Descriptor chain for the Endpoint
238  * buffer to be used for a transfer to/from the specified endpoint.
239  */
240 dwc_otg_dev_dma_desc_t *dwc_otg_ep_alloc_desc_chain(dwc_dma_t *dma_desc_addr,
241                                                     uint32_t count)
242 {
243         return DWC_DEV_DMA_ALLOC_ATOMIC(count * sizeof(dwc_otg_dev_dma_desc_t),
244                                         dma_desc_addr);
245 }
246
247 /**
248  * This function frees a DMA Descriptor chain that was allocated by ep_alloc_desc.
249  */
250 void dwc_otg_ep_free_desc_chain(dwc_otg_dev_dma_desc_t *desc_addr,
251                                 uint32_t dma_desc_addr, uint32_t count)
252 {
253         DWC_DEV_DMA_FREE(count * sizeof(dwc_otg_dev_dma_desc_t), desc_addr,
254                          dma_desc_addr);
255 }
256
257 #ifdef DWC_EN_ISOC
258
259 /**
260  * This function initializes a descriptor chain for Isochronous transfer
261  *
262  * @param core_if Programming view of DWC_otg controller.
263  * @param dwc_ep The EP to start the transfer on.
264  *
265  */
266 void dwc_otg_iso_ep_start_ddma_transfer(dwc_otg_core_if_t *core_if,
267                                         dwc_ep_t *dwc_ep)
268 {
269
270         dsts_data_t dsts = {.d32 = 0 };
271         depctl_data_t depctl = {.d32 = 0 };
272         volatile uint32_t *addr;
273         int i, j;
274         uint32_t len;
275
276         if (dwc_ep->is_in)
277                 dwc_ep->desc_cnt = dwc_ep->buf_proc_intrvl / dwc_ep->bInterval;
278         else
279                 dwc_ep->desc_cnt =
280                     dwc_ep->buf_proc_intrvl*dwc_ep->pkt_per_frm /
281                     dwc_ep->bInterval;
282
283         /** Allocate descriptors for double buffering */
284         dwc_ep->iso_desc_addr =
285             dwc_otg_ep_alloc_desc_chain(&dwc_ep->iso_dma_desc_addr,
286                                         dwc_ep->desc_cnt * 2);
287         if (dwc_ep->desc_addr) {
288                 DWC_WARN("%s, can't allocate DMA descriptor chain\n", __func__);
289                 return;
290         }
291
292         dsts.d32 = DWC_READ_REG32(&core_if->dev_if->dev_global_regs->dsts);
293
294         /** ISO OUT EP */
295         if (dwc_ep->is_in == 0) {
296                 dev_dma_desc_sts_t sts = {.d32 = 0 };
297                 dwc_otg_dev_dma_desc_t *dma_desc = dwc_ep->iso_desc_addr;
298                 dma_addr_t dma_ad;
299                 uint32_t data_per_desc;
300                 dwc_otg_dev_out_ep_regs_t *out_regs =
301                     core_if->dev_if->out_ep_regs[dwc_ep->num];
302                 int offset;
303
304                 addr = &core_if->dev_if->out_ep_regs[dwc_ep->num]->doepctl;
305                 dma_ad = (dma_addr_t) DWC_READ_REG32(&(out_regs->doepdma));
306
307                 /** Buffer 0 descriptors setup */
308                 dma_ad = dwc_ep->dma_addr0;
309
310                 sts.b_iso_out.bs = BS_HOST_READY;
311                 sts.b_iso_out.rxsts = 0;
312                 sts.b_iso_out.l = 0;
313                 sts.b_iso_out.sp = 0;
314                 sts.b_iso_out.ioc = 0;
315                 sts.b_iso_out.pid = 0;
316                 sts.b_iso_out.framenum = 0;
317
318                 offset = 0;
319                 for (i = 0; i < dwc_ep->desc_cnt - dwc_ep->pkt_per_frm;
320                      i += dwc_ep->pkt_per_frm) {
321
322                         for (j = 0; j < dwc_ep->pkt_per_frm; ++j) {
323                                 uint32_t len = (j + 1)*dwc_ep->maxpacket;
324                                 if (len > dwc_ep->data_per_frame)
325                                         data_per_desc =
326                                             dwc_ep->data_per_frame -
327                                             j*dwc_ep->maxpacket;
328                                 else
329                                         data_per_desc = dwc_ep->maxpacket;
330                                 len = data_per_desc % 4;
331                                 if (len)
332                                         data_per_desc += 4 - len;
333
334                                 sts.b_iso_out.rxbytes = data_per_desc;
335                                 dma_desc->buf = dma_ad;
336                                 dma_desc->status.d32 = sts.d32;
337
338                                 offset += data_per_desc;
339                                 dma_desc++;
340                                 dma_ad += data_per_desc;
341                         }
342                 }
343
344                 for (j = 0; j < dwc_ep->pkt_per_frm - 1; ++j) {
345                         uint32_t len = (j + 1)*dwc_ep->maxpacket;
346                         if (len > dwc_ep->data_per_frame)
347                                 data_per_desc =
348                                     dwc_ep->data_per_frame -
349                                     j*dwc_ep->maxpacket;
350                         else
351                                 data_per_desc = dwc_ep->maxpacket;
352                         len = data_per_desc % 4;
353                         if (len)
354                                 data_per_desc += 4 - len;
355                         sts.b_iso_out.rxbytes = data_per_desc;
356                         dma_desc->buf = dma_ad;
357                         dma_desc->status.d32 = sts.d32;
358
359                         offset += data_per_desc;
360                         dma_desc++;
361                         dma_ad += data_per_desc;
362                 }
363
364                 sts.b_iso_out.ioc = 1;
365                 len = (j + 1)*dwc_ep->maxpacket;
366                 if (len > dwc_ep->data_per_frame)
367                         data_per_desc =
368                             dwc_ep->data_per_frame - j*dwc_ep->maxpacket;
369                 else
370                         data_per_desc = dwc_ep->maxpacket;
371                 len = data_per_desc % 4;
372                 if (len)
373                         data_per_desc += 4 - len;
374                 sts.b_iso_out.rxbytes = data_per_desc;
375
376                 dma_desc->buf = dma_ad;
377                 dma_desc->status.d32 = sts.d32;
378                 dma_desc++;
379
380                 /** Buffer 1 descriptors setup */
381                 sts.b_iso_out.ioc = 0;
382                 dma_ad = dwc_ep->dma_addr1;
383
384                 offset = 0;
385                 for (i = 0; i < dwc_ep->desc_cnt - dwc_ep->pkt_per_frm;
386                      i += dwc_ep->pkt_per_frm) {
387                         for (j = 0; j < dwc_ep->pkt_per_frm; ++j) {
388                                 uint32_t len = (j + 1)*dwc_ep->maxpacket;
389                                 if (len > dwc_ep->data_per_frame)
390                                         data_per_desc =
391                                             dwc_ep->data_per_frame -
392                                             j*dwc_ep->maxpacket;
393                                 else
394                                         data_per_desc = dwc_ep->maxpacket;
395                                 len = data_per_desc % 4;
396                                 if (len)
397                                         data_per_desc += 4 - len;
398
399                                 data_per_desc =
400                                     sts.b_iso_out.rxbytes = data_per_desc;
401                                 dma_desc->buf = dma_ad;
402                                 dma_desc->status.d32 = sts.d32;
403
404                                 offset += data_per_desc;
405                                 dma_desc++;
406                                 dma_ad += data_per_desc;
407                         }
408                 }
409                 for (j = 0; j < dwc_ep->pkt_per_frm - 1; ++j) {
410                         data_per_desc =
411                             ((j + 1)*dwc_ep->maxpacket >
412                              dwc_ep->data_per_frame) ? dwc_ep->data_per_frame -
413                             j*dwc_ep->maxpacket : dwc_ep->maxpacket;
414                         data_per_desc +=
415                             (data_per_desc % 4) ? (4 - data_per_desc % 4) : 0;
416                         sts.b_iso_out.rxbytes = data_per_desc;
417                         dma_desc->buf = dma_ad;
418                         dma_desc->status.d32 = sts.d32;
419
420                         offset += data_per_desc;
421                         dma_desc++;
422                         dma_ad += data_per_desc;
423                 }
424
425                 sts.b_iso_out.ioc = 1;
426                 sts.b_iso_out.l = 1;
427                 data_per_desc =
428                     ((j + 1)*dwc_ep->maxpacket >
429                      dwc_ep->data_per_frame) ? dwc_ep->data_per_frame -
430                     j*dwc_ep->maxpacket : dwc_ep->maxpacket;
431                 data_per_desc +=
432                     (data_per_desc % 4) ? (4 - data_per_desc % 4) : 0;
433                 sts.b_iso_out.rxbytes = data_per_desc;
434
435                 dma_desc->buf = dma_ad;
436                 dma_desc->status.d32 = sts.d32;
437
438                 dwc_ep->next_frame = 0;
439
440                 /** Write dma_ad into DOEPDMA register */
441                 DWC_WRITE_REG32(&(out_regs->doepdma),
442                                 (uint32_t) dwc_ep->iso_dma_desc_addr);
443
444         }
445         /** ISO IN EP */
446         else {
447                 dev_dma_desc_sts_t sts = {.d32 = 0 };
448                 dwc_otg_dev_dma_desc_t *dma_desc = dwc_ep->iso_desc_addr;
449                 dma_addr_t dma_ad;
450                 dwc_otg_dev_in_ep_regs_t *in_regs =
451                     core_if->dev_if->in_ep_regs[dwc_ep->num];
452                 unsigned int frmnumber;
453                 fifosize_data_t txfifosize, rxfifosize;
454
455                 txfifosize.d32 =
456                     DWC_READ_REG32(&core_if->dev_if->
457                                    in_ep_regs[dwc_ep->num]->dtxfsts);
458                 rxfifosize.d32 =
459                     DWC_READ_REG32(&core_if->core_global_regs->grxfsiz);
460
461                 addr = &core_if->dev_if->in_ep_regs[dwc_ep->num]->diepctl;
462
463                 dma_ad = dwc_ep->dma_addr0;
464
465                 dsts.d32 =
466                     DWC_READ_REG32(&core_if->dev_if->dev_global_regs->dsts);
467
468                 sts.b_iso_in.bs = BS_HOST_READY;
469                 sts.b_iso_in.txsts = 0;
470                 sts.b_iso_in.sp =
471                     (dwc_ep->data_per_frame % dwc_ep->maxpacket) ? 1 : 0;
472                 sts.b_iso_in.ioc = 0;
473                 sts.b_iso_in.pid = dwc_ep->pkt_per_frm;
474
475                 frmnumber = dwc_ep->next_frame;
476
477                 sts.b_iso_in.framenum = frmnumber;
478                 sts.b_iso_in.txbytes = dwc_ep->data_per_frame;
479                 sts.b_iso_in.l = 0;
480
481                 /** Buffer 0 descriptors setup */
482                 for (i = 0; i < dwc_ep->desc_cnt - 1; i++) {
483                         dma_desc->buf = dma_ad;
484                         dma_desc->status.d32 = sts.d32;
485                         dma_desc++;
486
487                         dma_ad += dwc_ep->data_per_frame;
488                         sts.b_iso_in.framenum += dwc_ep->bInterval;
489                 }
490
491                 sts.b_iso_in.ioc = 1;
492                 dma_desc->buf = dma_ad;
493                 dma_desc->status.d32 = sts.d32;
494                 ++dma_desc;
495
496                 /** Buffer 1 descriptors setup */
497                 sts.b_iso_in.ioc = 0;
498                 dma_ad = dwc_ep->dma_addr1;
499
500                 for (i = 0; i < dwc_ep->desc_cnt - dwc_ep->pkt_per_frm;
501                      i += dwc_ep->pkt_per_frm) {
502                         dma_desc->buf = dma_ad;
503                         dma_desc->status.d32 = sts.d32;
504                         dma_desc++;
505
506                         dma_ad += dwc_ep->data_per_frame;
507                         sts.b_iso_in.framenum += dwc_ep->bInterval;
508
509                         sts.b_iso_in.ioc = 0;
510                 }
511                 sts.b_iso_in.ioc = 1;
512                 sts.b_iso_in.l = 1;
513
514                 dma_desc->buf = dma_ad;
515                 dma_desc->status.d32 = sts.d32;
516
517                 dwc_ep->next_frame = sts.b_iso_in.framenum + dwc_ep->bInterval;
518
519                 /** Write dma_ad into diepdma register */
520                 DWC_WRITE_REG32(&(in_regs->diepdma),
521                                 (uint32_t) dwc_ep->iso_dma_desc_addr);
522         }
523         /** Enable endpoint, clear nak  */
524         depctl.d32 = 0;
525         depctl.b.epena = 1;
526         depctl.b.usbactep = 1;
527         depctl.b.cnak = 1;
528
529         DWC_MODIFY_REG32(addr, depctl.d32, depctl.d32);
530         depctl.d32 = DWC_READ_REG32(addr);
531 }
532
533 /**
534  * This function initializes a descriptor chain for Isochronous transfer
535  *
536  * @param core_if Programming view of DWC_otg controller.
537  * @param ep The EP to start the transfer on.
538  *
539  */
540 void dwc_otg_iso_ep_start_buf_transfer(dwc_otg_core_if_t *core_if,
541                                        dwc_ep_t *ep)
542 {
543         depctl_data_t depctl = {.d32 = 0 };
544         volatile uint32_t *addr;
545
546         if (ep->is_in) {
547                 addr = &core_if->dev_if->in_ep_regs[ep->num]->diepctl;
548         } else {
549                 addr = &core_if->dev_if->out_ep_regs[ep->num]->doepctl;
550         }
551
552         if (core_if->dma_enable == 0 || core_if->dma_desc_enable != 0) {
553                 return;
554         } else {
555                 deptsiz_data_t deptsiz = {.d32 = 0 };
556
557                 ep->xfer_len =
558                     ep->data_per_frame*ep->buf_proc_intrvl / ep->bInterval;
559                 ep->pkt_cnt =
560                     (ep->xfer_len - 1 + ep->maxpacket) / ep->maxpacket;
561                 ep->xfer_count = 0;
562                 ep->xfer_buff =
563                     (ep->proc_buf_num) ? ep->xfer_buff1 : ep->xfer_buff0;
564                 ep->dma_addr =
565                     (ep->proc_buf_num) ? ep->dma_addr1 : ep->dma_addr0;
566
567                 if (ep->is_in) {
568                         /* Program the transfer size and packet count
569                          *      as follows: xfersize = N * maxpacket +
570                          *      short_packet pktcnt = N + (short_packet
571                          *      exist ? 1 : 0)
572                          */
573                         deptsiz.b.mc = ep->pkt_per_frm;
574                         deptsiz.b.xfersize = ep->xfer_len;
575                         deptsiz.b.pktcnt =
576                             (ep->xfer_len - 1 + ep->maxpacket) / ep->maxpacket;
577                         DWC_WRITE_REG32(&core_if->dev_if->
578                                         in_ep_regs[ep->num]->dieptsiz,
579                                         deptsiz.d32);
580
581                         /* Write the DMA register */
582                         DWC_WRITE_REG32(&
583                                         (core_if->dev_if->
584                                          in_ep_regs[ep->num]->diepdma),
585                                         (uint32_t) ep->dma_addr);
586
587                 } else {
588                         deptsiz.b.pktcnt =
589                             (ep->xfer_len + (ep->maxpacket - 1)) /
590                             ep->maxpacket;
591                         deptsiz.b.xfersize = deptsiz.b.pktcnt*ep->maxpacket;
592
593                         DWC_WRITE_REG32(&core_if->dev_if->
594                                         out_ep_regs[ep->num]->doeptsiz,
595                                         deptsiz.d32);
596
597                         /* Write the DMA register */
598                         DWC_WRITE_REG32(&
599                                         (core_if->dev_if->
600                                          out_ep_regs[ep->num]->doepdma),
601                                         (uint32_t) ep->dma_addr);
602
603                 }
604                 /** Enable endpoint, clear nak  */
605                 depctl.d32 = 0;
606                 depctl.b.epena = 1;
607                 depctl.b.cnak = 1;
608
609                 DWC_MODIFY_REG32(addr, depctl.d32, depctl.d32);
610         }
611 }
612
613 /**
614  * This function does the setup for a data transfer for an EP and
615  * starts the transfer. For an IN transfer, the packets will be
616  * loaded into the appropriate Tx FIFO in the ISR. For OUT transfers,
617  * the packets are unloaded from the Rx FIFO in the ISR.
618  *
619  * @param core_if Programming view of DWC_otg controller.
620  * @param ep The EP to start the transfer on.
621  */
622
623 static void dwc_otg_iso_ep_start_transfer(dwc_otg_core_if_t *core_if,
624                                           dwc_ep_t *ep)
625 {
626         if (core_if->dma_enable) {
627                 if (core_if->dma_desc_enable) {
628                         if (ep->is_in) {
629                                 ep->desc_cnt = ep->pkt_cnt / ep->pkt_per_frm;
630                         } else {
631                                 ep->desc_cnt = ep->pkt_cnt;
632                         }
633                         dwc_otg_iso_ep_start_ddma_transfer(core_if, ep);
634                 } else {
635                         if (core_if->pti_enh_enable) {
636                                 dwc_otg_iso_ep_start_buf_transfer(core_if, ep);
637                         } else {
638                                 ep->cur_pkt_addr =
639                                     (ep->proc_buf_num) ? ep->
640                                     xfer_buff1 : ep->xfer_buff0;
641                                 ep->cur_pkt_dma_addr =
642                                     (ep->proc_buf_num) ? ep->
643                                     dma_addr1 : ep->dma_addr0;
644                                 dwc_otg_iso_ep_start_frm_transfer(core_if, ep);
645                         }
646                 }
647         } else {
648                 ep->cur_pkt_addr =
649                     (ep->proc_buf_num) ? ep->xfer_buff1 : ep->xfer_buff0;
650                 ep->cur_pkt_dma_addr =
651                     (ep->proc_buf_num) ? ep->dma_addr1 : ep->dma_addr0;
652                 dwc_otg_iso_ep_start_frm_transfer(core_if, ep);
653         }
654 }
655
656 /**
657  * This function stops transfer for an EP and
658  * resets the ep's variables.
659  *
660  * @param core_if Programming view of DWC_otg controller.
661  * @param ep The EP to start the transfer on.
662  */
663
664 void dwc_otg_iso_ep_stop_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep)
665 {
666         depctl_data_t depctl = {.d32 = 0 };
667         volatile uint32_t *addr;
668
669         if (ep->is_in == 1) {
670                 addr = &core_if->dev_if->in_ep_regs[ep->num]->diepctl;
671         } else {
672                 addr = &core_if->dev_if->out_ep_regs[ep->num]->doepctl;
673         }
674
675         /* disable the ep */
676         depctl.d32 = DWC_READ_REG32(addr);
677
678         depctl.b.epdis = 1;
679         depctl.b.snak = 1;
680
681         DWC_WRITE_REG32(addr, depctl.d32);
682
683         if (core_if->dma_desc_enable &&
684             ep->iso_desc_addr && ep->iso_dma_desc_addr) {
685                 dwc_otg_ep_free_desc_chain(ep->iso_desc_addr,
686                                            ep->iso_dma_desc_addr,
687                                            ep->desc_cnt * 2);
688         }
689
690         /* reset varibales */
691         ep->dma_addr0 = 0;
692         ep->dma_addr1 = 0;
693         ep->xfer_buff0 = 0;
694         ep->xfer_buff1 = 0;
695         ep->data_per_frame = 0;
696         ep->data_pattern_frame = 0;
697         ep->sync_frame = 0;
698         ep->buf_proc_intrvl = 0;
699         ep->bInterval = 0;
700         ep->proc_buf_num = 0;
701         ep->pkt_per_frm = 0;
702         ep->pkt_per_frm = 0;
703         ep->desc_cnt = 0;
704         ep->iso_desc_addr = 0;
705         ep->iso_dma_desc_addr = 0;
706 }
707
708 int dwc_otg_pcd_iso_ep_start(dwc_otg_pcd_t *pcd, void *ep_handle,
709                              uint8_t *buf0, uint8_t *buf1, dwc_dma_t dma0,
710                              dwc_dma_t dma1, int sync_frame, int dp_frame,
711                              int data_per_frame, int start_frame,
712                              int buf_proc_intrvl, void *req_handle,
713                              int atomic_alloc)
714 {
715         dwc_otg_pcd_ep_t *ep;
716         dwc_irqflags_t flags = 0;
717         dwc_ep_t *dwc_ep;
718         int32_t frm_data;
719         dsts_data_t dsts;
720         dwc_otg_core_if_t *core_if;
721
722         ep = get_ep_from_handle(pcd, ep_handle);
723
724         if (!ep || !ep->desc || ep->dwc_ep.num == 0) {
725                 DWC_WARN("bad ep\n");
726                 return -DWC_E_INVALID;
727         }
728
729         DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
730         core_if = GET_CORE_IF(pcd);
731         dwc_ep = &ep->dwc_ep;
732
733         if (ep->iso_req_handle) {
734                 DWC_WARN("ISO request in progress\n");
735         }
736
737         dwc_ep->dma_addr0 = dma0;
738         dwc_ep->dma_addr1 = dma1;
739
740         dwc_ep->xfer_buff0 = buf0;
741         dwc_ep->xfer_buff1 = buf1;
742
743         dwc_ep->data_per_frame = data_per_frame;
744
745         /** @todo - pattern data support is to be implemented in the future */
746         dwc_ep->data_pattern_frame = dp_frame;
747         dwc_ep->sync_frame = sync_frame;
748
749         dwc_ep->buf_proc_intrvl = buf_proc_intrvl;
750
751         dwc_ep->bInterval = 1 << (ep->desc->bInterval - 1);
752
753         dwc_ep->proc_buf_num = 0;
754
755         dwc_ep->pkt_per_frm = 0;
756         frm_data = ep->dwc_ep.data_per_frame;
757         while (frm_data > 0) {
758                 dwc_ep->pkt_per_frm++;
759                 frm_data -= ep->dwc_ep.maxpacket;
760         }
761
762         dsts.d32 = DWC_READ_REG32(&core_if->dev_if->dev_global_regs->dsts);
763
764         if (start_frame == -1) {
765                 dwc_ep->next_frame = dsts.b.soffn + 1;
766                 if (dwc_ep->bInterval != 1) {
767                         dwc_ep->next_frame =
768                             dwc_ep->next_frame + (dwc_ep->bInterval - 1 -
769                                                   dwc_ep->next_frame %
770                                                   dwc_ep->bInterval);
771                 }
772         } else {
773                 dwc_ep->next_frame = start_frame;
774         }
775
776         if (!core_if->pti_enh_enable) {
777                 dwc_ep->pkt_cnt =
778                     dwc_ep->buf_proc_intrvl*dwc_ep->pkt_per_frm /
779                     dwc_ep->bInterval;
780         } else {
781                 dwc_ep->pkt_cnt =
782                     (dwc_ep->data_per_frame *
783                      (dwc_ep->buf_proc_intrvl / dwc_ep->bInterval)
784                      - 1 + dwc_ep->maxpacket) / dwc_ep->maxpacket;
785         }
786
787         if (core_if->dma_desc_enable) {
788                 dwc_ep->desc_cnt =
789                     dwc_ep->buf_proc_intrvl*dwc_ep->pkt_per_frm /
790                     dwc_ep->bInterval;
791         }
792
793         if (atomic_alloc) {
794                 dwc_ep->pkt_info =
795                     DWC_ALLOC_ATOMIC(sizeof(iso_pkt_info_t)*dwc_ep->pkt_cnt);
796         } else {
797                 dwc_ep->pkt_info =
798                     DWC_ALLOC(sizeof(iso_pkt_info_t)*dwc_ep->pkt_cnt);
799         }
800         if (!dwc_ep->pkt_info) {
801                 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
802                 return -DWC_E_NO_MEMORY;
803         }
804         if (core_if->pti_enh_enable) {
805                 dwc_memset(dwc_ep->pkt_info, 0,
806                            sizeof(iso_pkt_info_t)*dwc_ep->pkt_cnt);
807         }
808
809         dwc_ep->cur_pkt = 0;
810         ep->iso_req_handle = req_handle;
811
812         DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
813         dwc_otg_iso_ep_start_transfer(core_if, dwc_ep);
814         return 0;
815 }
816
817 int dwc_otg_pcd_iso_ep_stop(dwc_otg_pcd_t *pcd, void *ep_handle,
818                             void *req_handle)
819 {
820         dwc_irqflags_t flags = 0;
821         dwc_otg_pcd_ep_t *ep;
822         dwc_ep_t *dwc_ep;
823
824         ep = get_ep_from_handle(pcd, ep_handle);
825         if (!ep || !ep->desc || ep->dwc_ep.num == 0) {
826                 DWC_WARN("bad ep\n");
827                 return -DWC_E_INVALID;
828         }
829         dwc_ep = &ep->dwc_ep;
830
831         dwc_otg_iso_ep_stop_transfer(GET_CORE_IF(pcd), dwc_ep);
832
833         DWC_FREE(dwc_ep->pkt_info);
834         DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
835         if (ep->iso_req_handle != req_handle) {
836                 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
837                 return -DWC_E_INVALID;
838         }
839
840         DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
841
842         ep->iso_req_handle = 0;
843         return 0;
844 }
845
846 /**
847  * This function is used for perodical data exchnage between PCD and gadget drivers.
848  * for Isochronous EPs
849  *
850  *      - Every time a sync period completes this function is called to
851  *        perform data exchange between PCD and gadget
852  */
853 void dwc_otg_iso_buffer_done(dwc_otg_pcd_t *pcd, dwc_otg_pcd_ep_t *ep,
854                              void *req_handle)
855 {
856         int i;
857         dwc_ep_t *dwc_ep;
858
859         dwc_ep = &ep->dwc_ep;
860
861         DWC_SPINUNLOCK(ep->pcd->lock);
862         pcd->fops->isoc_complete(pcd, ep->priv, ep->iso_req_handle,
863                                  dwc_ep->proc_buf_num ^ 0x1);
864         DWC_SPINLOCK(ep->pcd->lock);
865
866         for (i = 0; i < dwc_ep->pkt_cnt; ++i) {
867                 dwc_ep->pkt_info[i].status = 0;
868                 dwc_ep->pkt_info[i].offset = 0;
869                 dwc_ep->pkt_info[i].length = 0;
870         }
871 }
872
873 int dwc_otg_pcd_get_iso_packet_count(dwc_otg_pcd_t *pcd, void *ep_handle,
874                                      void *iso_req_handle)
875 {
876         dwc_otg_pcd_ep_t *ep;
877         dwc_ep_t *dwc_ep;
878
879         ep = get_ep_from_handle(pcd, ep_handle);
880         if (!ep->desc || ep->dwc_ep.num == 0) {
881                 DWC_WARN("bad ep\n");
882                 return -DWC_E_INVALID;
883         }
884         dwc_ep = &ep->dwc_ep;
885
886         return dwc_ep->pkt_cnt;
887 }
888
889 void dwc_otg_pcd_get_iso_packet_params(dwc_otg_pcd_t *pcd, void *ep_handle,
890                                        void *iso_req_handle, int packet,
891                                        int *status, int *actual, int *offset)
892 {
893         dwc_otg_pcd_ep_t *ep;
894         dwc_ep_t *dwc_ep;
895
896         ep = get_ep_from_handle(pcd, ep_handle);
897         if (!ep)
898                 DWC_WARN("bad ep\n");
899
900         dwc_ep = &ep->dwc_ep;
901
902         *status = dwc_ep->pkt_info[packet].status;
903         *actual = dwc_ep->pkt_info[packet].length;
904         *offset = dwc_ep->pkt_info[packet].offset;
905 }
906
907 #endif /* DWC_EN_ISOC */
908
909 static void dwc_otg_pcd_init_ep(dwc_otg_pcd_t *pcd, dwc_otg_pcd_ep_t *pcd_ep,
910                                 uint32_t is_in, uint32_t ep_num)
911 {
912         /* Init EP structure */
913         pcd_ep->desc = 0;
914         pcd_ep->pcd = pcd;
915         pcd_ep->stopped = 1;
916         pcd_ep->queue_sof = 0;
917
918         /* Init DWC ep structure */
919         pcd_ep->dwc_ep.is_in = is_in;
920         pcd_ep->dwc_ep.num = ep_num;
921         pcd_ep->dwc_ep.active = 0;
922         pcd_ep->dwc_ep.tx_fifo_num = 0;
923         /* Control until ep is actvated */
924         pcd_ep->dwc_ep.type = DWC_OTG_EP_TYPE_CONTROL;
925         pcd_ep->dwc_ep.maxpacket = MAX_PACKET_SIZE;
926         pcd_ep->dwc_ep.dma_addr = 0;
927         pcd_ep->dwc_ep.start_xfer_buff = 0;
928         pcd_ep->dwc_ep.xfer_buff = 0;
929         pcd_ep->dwc_ep.xfer_len = 0;
930         pcd_ep->dwc_ep.xfer_count = 0;
931         pcd_ep->dwc_ep.sent_zlp = 0;
932         pcd_ep->dwc_ep.total_len = 0;
933         pcd_ep->dwc_ep.desc_addr = 0;
934         pcd_ep->dwc_ep.dma_desc_addr = 0;
935         DWC_CIRCLEQ_INIT(&pcd_ep->queue);
936 }
937
938 /**
939  * Initialize ep's
940  */
941 static void dwc_otg_pcd_reinit(dwc_otg_pcd_t *pcd)
942 {
943         int i;
944         uint32_t hwcfg1;
945         dwc_otg_pcd_ep_t *ep;
946         int in_ep_cntr, out_ep_cntr;
947         uint32_t num_in_eps = (GET_CORE_IF(pcd))->dev_if->num_in_eps;
948         uint32_t num_out_eps = (GET_CORE_IF(pcd))->dev_if->num_out_eps;
949         /**
950          * Initialize the EP0 structure.
951          */
952         ep = &pcd->ep0;
953         dwc_otg_pcd_init_ep(pcd, ep, 0, 0);
954
955         in_ep_cntr = 0;
956         hwcfg1 = (GET_CORE_IF(pcd))->hwcfg1.d32 >> 3;
957         for (i = 1; in_ep_cntr < num_in_eps; i++) {
958                 if ((hwcfg1 & 0x1) == 0) {
959                         dwc_otg_pcd_ep_t *ep = &pcd->in_ep[in_ep_cntr];
960                         in_ep_cntr++;
961                         /**
962                          * @todo NGS: Add direction to EP, based on contents
963                          * of HWCFG1.  Need a copy of HWCFG1 in pcd structure?
964                          * sprintf(";r
965                          */
966                         dwc_otg_pcd_init_ep(pcd, ep, 1 /* IN */ , i);
967
968                         DWC_CIRCLEQ_INIT(&ep->queue);
969                 }
970                 hwcfg1 >>= 2;
971         }
972
973         out_ep_cntr = 0;
974         hwcfg1 = (GET_CORE_IF(pcd))->hwcfg1.d32 >> 2;
975         for (i = 1; out_ep_cntr < num_out_eps; i++) {
976                 if ((hwcfg1 & 0x1) == 0) {
977                         dwc_otg_pcd_ep_t *ep = &pcd->out_ep[out_ep_cntr];
978                         out_ep_cntr++;
979                         /**
980                          * @todo NGS: Add direction to EP, based on contents
981                          * of HWCFG1.  Need a copy of HWCFG1 in pcd structure?
982                          * sprintf(";r
983                          */
984                         dwc_otg_pcd_init_ep(pcd, ep, 0 /* OUT */ , i);
985                         DWC_CIRCLEQ_INIT(&ep->queue);
986                 }
987                 hwcfg1 >>= 2;
988         }
989
990         pcd->ep0state = EP0_DISCONNECT;
991         pcd->ep0.dwc_ep.maxpacket = MAX_EP0_SIZE;
992         pcd->ep0.dwc_ep.type = DWC_OTG_EP_TYPE_CONTROL;
993 }
994
995 /**
996  * This function is called when the SRP timer expires. The SRP should
997  * complete within 6 seconds.
998  */
999 static void srp_timeout(void *ptr)
1000 {
1001         gotgctl_data_t gotgctl;
1002         dwc_otg_core_if_t *core_if = (dwc_otg_core_if_t *) ptr;
1003         volatile uint32_t *addr = &core_if->core_global_regs->gotgctl;
1004
1005         gotgctl.d32 = DWC_READ_REG32(addr);
1006
1007         core_if->srp_timer_started = 0;
1008
1009         if (core_if->adp_enable) {
1010                 if (gotgctl.b.bsesvld == 0) {
1011                         gpwrdn_data_t gpwrdn = {.d32 = 0 };
1012                         DWC_PRINTF("SRP Timeout BSESSVLD = 0\n");
1013                         /* Power off the core */
1014                         if (core_if->power_down == 2) {
1015                                 gpwrdn.b.pwrdnswtch = 1;
1016                                 DWC_MODIFY_REG32(&core_if->core_global_regs->
1017                                                  gpwrdn, gpwrdn.d32, 0);
1018                         }
1019
1020                         gpwrdn.d32 = 0;
1021                         gpwrdn.b.pmuintsel = 1;
1022                         gpwrdn.b.pmuactv = 1;
1023                         DWC_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, 0,
1024                                          gpwrdn.d32);
1025                         dwc_otg_adp_probe_start(core_if);
1026                 } else {
1027                         DWC_PRINTF("SRP Timeout BSESSVLD = 1\n");
1028                         core_if->op_state = B_PERIPHERAL;
1029                         dwc_otg_core_init(core_if);
1030                         dwc_otg_enable_global_interrupts(core_if);
1031                         cil_pcd_start(core_if);
1032                 }
1033         }
1034
1035         if ((core_if->core_params->phy_type == DWC_PHY_TYPE_PARAM_FS) &&
1036             (core_if->core_params->i2c_enable)) {
1037                 DWC_PRINTF("SRP Timeout\n");
1038
1039                 if ((core_if->srp_success) && (gotgctl.b.bsesvld)) {
1040                         if (core_if->pcd_cb && core_if->pcd_cb->resume_wakeup) {
1041                                 core_if->pcd_cb->resume_wakeup(core_if->pcd_cb->
1042                                                                p);
1043                         }
1044
1045                         /* Clear Session Request */
1046                         gotgctl.d32 = 0;
1047                         gotgctl.b.sesreq = 1;
1048                         DWC_MODIFY_REG32(&core_if->core_global_regs->gotgctl,
1049                                          gotgctl.d32, 0);
1050
1051                         core_if->srp_success = 0;
1052                 } else {
1053                         __DWC_ERROR("Device not connected/responding\n");
1054                         gotgctl.b.sesreq = 0;
1055                         DWC_WRITE_REG32(addr, gotgctl.d32);
1056                 }
1057         } else if (gotgctl.b.sesreq) {
1058                 DWC_PRINTF("SRP Timeout\n");
1059
1060                 __DWC_ERROR("Device not connected/responding\n");
1061                 gotgctl.b.sesreq = 0;
1062                 DWC_WRITE_REG32(addr, gotgctl.d32);
1063         } else {
1064                 DWC_PRINTF(" SRP GOTGCTL=%0x\n", gotgctl.d32);
1065         }
1066 }
1067
1068 /**
1069  * Tasklet
1070  *
1071  */
1072 extern void start_next_request(dwc_otg_pcd_ep_t *ep);
1073
1074 static void start_xfer_tasklet_func(void *data)
1075 {
1076         dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *) data;
1077         dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
1078
1079         int i;
1080         depctl_data_t diepctl;
1081
1082         DWC_DEBUGPL(DBG_PCDV, "Start xfer tasklet\n");
1083
1084         diepctl.d32 = DWC_READ_REG32(&core_if->dev_if->in_ep_regs[0]->diepctl);
1085
1086         if (pcd->ep0.queue_sof) {
1087                 pcd->ep0.queue_sof = 0;
1088                 start_next_request(&pcd->ep0);
1089                 /* break; */
1090         }
1091
1092         for (i = 0; i < core_if->dev_if->num_in_eps; i++) {
1093                 depctl_data_t diepctl;
1094                 diepctl.d32 =
1095                     DWC_READ_REG32(&core_if->dev_if->in_ep_regs[i]->diepctl);
1096
1097                 if (pcd->in_ep[i].queue_sof) {
1098                         pcd->in_ep[i].queue_sof = 0;
1099                         start_next_request(&pcd->in_ep[i]);
1100                         /* break; */
1101                 }
1102         }
1103
1104         return;
1105 }
1106
1107 /**
1108  * This function initialized the PCD portion of the driver.
1109  *
1110  */
1111 dwc_otg_pcd_t *dwc_otg_pcd_init(dwc_otg_core_if_t *core_if)
1112 {
1113         dwc_otg_pcd_t *pcd = NULL;
1114         dwc_otg_dev_if_t *dev_if;
1115         int i;
1116
1117         /*
1118          * Allocate PCD structure
1119          */
1120         pcd = DWC_ALLOC(sizeof(dwc_otg_pcd_t));
1121
1122         if (pcd == NULL) {
1123                 return NULL;
1124         }
1125
1126         pcd->lock = DWC_SPINLOCK_ALLOC();
1127         if (!pcd->lock) {
1128                 DWC_ERROR("Could not allocate lock for pcd");
1129                 DWC_FREE(pcd);
1130                 return NULL;
1131         }
1132         /* Set core_if's lock pointer to hcd->lock */
1133         core_if->lock = pcd->lock;
1134         pcd->core_if = core_if;
1135
1136         dev_if = core_if->dev_if;
1137         dev_if->isoc_ep = NULL;
1138
1139         if (core_if->hwcfg4.b.ded_fifo_en) {
1140                 DWC_PRINTF("Dedicated Tx FIFOs mode\n");
1141         } else {
1142                 DWC_PRINTF("Shared Tx FIFO mode\n");
1143         }
1144
1145         /*
1146          * Initialized the Core for Device mode here if there is nod ADP support.
1147          * Otherwise it will be done later in dwc_otg_adp_start routine.
1148          */
1149         /* if (dwc_otg_is_device_mode(core_if) ) { */
1150         /*      dwc_otg_core_dev_init(core_if); */
1151         /*} */
1152
1153         /*
1154          * Register the PCD Callbacks.
1155          */
1156         dwc_otg_cil_register_pcd_callbacks(core_if, &pcd_callbacks, pcd);
1157
1158         /*
1159          * Initialize the DMA buffer for SETUP packets
1160          */
1161         if (GET_CORE_IF(pcd)->dma_enable) {
1162                 pcd->setup_pkt =
1163                     DWC_DEV_DMA_ALLOC_ATOMIC(sizeof(*pcd->setup_pkt) * 5,
1164                                              &pcd->setup_pkt_dma_handle);
1165                 if (pcd->setup_pkt == NULL) {
1166                         DWC_FREE(pcd);
1167                         return NULL;
1168                 }
1169
1170                 pcd->status_buf =
1171                     DWC_DEV_DMA_ALLOC_ATOMIC(sizeof(uint16_t),
1172                                              &pcd->status_buf_dma_handle);
1173                 if (pcd->status_buf == NULL) {
1174                         DWC_DEV_DMA_FREE(sizeof(*pcd->setup_pkt) * 5,
1175                                          pcd->setup_pkt,
1176                                          pcd->setup_pkt_dma_handle);
1177                         DWC_FREE(pcd);
1178                         return NULL;
1179                 }
1180
1181                 if (GET_CORE_IF(pcd)->dma_desc_enable) {
1182                         dev_if->setup_desc_addr[0] =
1183                             dwc_otg_ep_alloc_desc_chain
1184                             (&dev_if->dma_setup_desc_addr[0], 1);
1185                         dev_if->setup_desc_addr[1] =
1186                             dwc_otg_ep_alloc_desc_chain
1187                             (&dev_if->dma_setup_desc_addr[1], 1);
1188                         dev_if->in_desc_addr =
1189                             dwc_otg_ep_alloc_desc_chain
1190                             (&dev_if->dma_in_desc_addr, 1);
1191                         dev_if->out_desc_addr =
1192                             dwc_otg_ep_alloc_desc_chain
1193                             (&dev_if->dma_out_desc_addr, 1);
1194                         pcd->data_terminated = 0;
1195
1196                         if (dev_if->setup_desc_addr[0] == 0
1197                             || dev_if->setup_desc_addr[1] == 0
1198                             || dev_if->in_desc_addr == 0
1199                             || dev_if->out_desc_addr == 0) {
1200
1201                                 if (dev_if->out_desc_addr)
1202                                         dwc_otg_ep_free_desc_chain
1203                                             (dev_if->out_desc_addr,
1204                                              dev_if->dma_out_desc_addr, 1);
1205                                 if (dev_if->in_desc_addr)
1206                                         dwc_otg_ep_free_desc_chain
1207                                             (dev_if->in_desc_addr,
1208                                              dev_if->dma_in_desc_addr, 1);
1209                                 if (dev_if->setup_desc_addr[1])
1210                                         dwc_otg_ep_free_desc_chain
1211                                             (dev_if->setup_desc_addr[1],
1212                                              dev_if->dma_setup_desc_addr[1], 1);
1213                                 if (dev_if->setup_desc_addr[0])
1214                                         dwc_otg_ep_free_desc_chain
1215                                             (dev_if->setup_desc_addr[0],
1216                                              dev_if->dma_setup_desc_addr[0], 1);
1217
1218                                 DWC_DEV_DMA_FREE(sizeof(*pcd->setup_pkt) * 5,
1219                                                  pcd->setup_pkt,
1220                                                  pcd->setup_pkt_dma_handle);
1221                                 DWC_DEV_DMA_FREE(sizeof(*pcd->status_buf),
1222                                                  pcd->status_buf,
1223                                                  pcd->status_buf_dma_handle);
1224
1225                                 DWC_FREE(pcd);
1226
1227                                 return NULL;
1228                         }
1229                 }
1230         } else {
1231                 pcd->setup_pkt = DWC_ALLOC(sizeof(*pcd->setup_pkt) * 5);
1232                 if (pcd->setup_pkt == NULL) {
1233                         DWC_FREE(pcd);
1234                         return NULL;
1235                 }
1236
1237                 pcd->status_buf = DWC_ALLOC(sizeof(uint16_t));
1238                 if (pcd->status_buf == NULL) {
1239                         DWC_FREE(pcd->setup_pkt);
1240                         DWC_FREE(pcd);
1241                         return NULL;
1242                 }
1243         }
1244
1245         dwc_otg_pcd_reinit(pcd);
1246
1247         /* Allocate the cfi object for the PCD */
1248 #ifdef DWC_UTE_CFI
1249         pcd->cfi = DWC_ALLOC(sizeof(cfiobject_t));
1250         if (NULL == pcd->cfi)
1251                 goto fail;
1252         if (init_cfi(pcd->cfi)) {
1253                 CFI_INFO("%s: Failed to init the CFI object\n", __func__);
1254                 goto fail;
1255         }
1256 #endif
1257
1258         /* Initialize tasklets */
1259         pcd->start_xfer_tasklet = DWC_TASK_ALLOC("xfer_tasklet",
1260                                                  start_xfer_tasklet_func, pcd);
1261         pcd->test_mode_tasklet = DWC_TASK_ALLOC("test_mode_tasklet",
1262                                                 do_test_mode, pcd);
1263
1264         /* Initialize SRP timer */
1265         core_if->srp_timer = DWC_TIMER_ALLOC("SRP TIMER", srp_timeout, core_if);
1266
1267         if (core_if->core_params->dev_out_nak) {
1268                 /**
1269                 * Initialize xfer timeout timer. Implemented for
1270                 * 2.93a feature "Device DDMA OUT NAK Enhancement"
1271                 */
1272                 for (i = 0; i < MAX_EPS_CHANNELS; i++) {
1273                         pcd->core_if->ep_xfer_timer[i] =
1274                             DWC_TIMER_ALLOC("ep timer", ep_xfer_timeout,
1275                                             &pcd->core_if->ep_xfer_info[i]);
1276                 }
1277         }
1278
1279         return pcd;
1280 #ifdef DWC_UTE_CFI
1281 fail:
1282 #endif
1283         if (pcd->setup_pkt)
1284                 DWC_FREE(pcd->setup_pkt);
1285         if (pcd->status_buf)
1286                 DWC_FREE(pcd->status_buf);
1287 #ifdef DWC_UTE_CFI
1288         if (pcd->cfi)
1289                 DWC_FREE(pcd->cfi);
1290 #endif
1291         if (pcd)
1292                 DWC_FREE(pcd);
1293         return NULL;
1294
1295 }
1296
1297 /**
1298  * Remove PCD specific data
1299  */
1300 void dwc_otg_pcd_remove(dwc_otg_pcd_t *pcd)
1301 {
1302         dwc_otg_dev_if_t *dev_if = GET_CORE_IF(pcd)->dev_if;
1303         int i;
1304         if (pcd->core_if->core_params->dev_out_nak) {
1305                 for (i = 0; i < MAX_EPS_CHANNELS; i++) {
1306                         DWC_TIMER_CANCEL(pcd->core_if->ep_xfer_timer[i]);
1307                         pcd->core_if->ep_xfer_info[i].state = 0;
1308                 }
1309         }
1310
1311         if (GET_CORE_IF(pcd)->dma_enable) {
1312                 DWC_DEV_DMA_FREE(sizeof(*pcd->setup_pkt) * 5, pcd->setup_pkt,
1313                                  pcd->setup_pkt_dma_handle);
1314                 DWC_DEV_DMA_FREE(sizeof(uint16_t), pcd->status_buf,
1315                                  pcd->status_buf_dma_handle);
1316                 if (GET_CORE_IF(pcd)->dma_desc_enable) {
1317                         dwc_otg_ep_free_desc_chain(dev_if->setup_desc_addr[0],
1318                                                    dev_if->dma_setup_desc_addr
1319                                                    [0], 1);
1320                         dwc_otg_ep_free_desc_chain(dev_if->setup_desc_addr[1],
1321                                                    dev_if->dma_setup_desc_addr
1322                                                    [1], 1);
1323                         dwc_otg_ep_free_desc_chain(dev_if->in_desc_addr,
1324                                                    dev_if->dma_in_desc_addr, 1);
1325                         dwc_otg_ep_free_desc_chain(dev_if->out_desc_addr,
1326                                                    dev_if->dma_out_desc_addr,
1327                                                    1);
1328                 }
1329         } else {
1330                 DWC_FREE(pcd->setup_pkt);
1331                 DWC_FREE(pcd->status_buf);
1332         }
1333         DWC_SPINLOCK_FREE(pcd->lock);
1334         /* Set core_if's lock pointer to NULL */
1335         pcd->core_if->lock = NULL;
1336
1337         DWC_TASK_FREE(pcd->start_xfer_tasklet);
1338         DWC_TASK_FREE(pcd->test_mode_tasklet);
1339         if (pcd->core_if->core_params->dev_out_nak) {
1340                 for (i = 0; i < MAX_EPS_CHANNELS; i++) {
1341                         if (pcd->core_if->ep_xfer_timer[i]) {
1342                                 DWC_TIMER_FREE(pcd->core_if->ep_xfer_timer[i]);
1343                         }
1344                 }
1345         }
1346
1347 /* Release the CFI object's dynamic memory */
1348 #ifdef DWC_UTE_CFI
1349         if (pcd->cfi->ops.release) {
1350                 pcd->cfi->ops.release(pcd->cfi);
1351         }
1352 #endif
1353
1354         DWC_FREE(pcd);
1355 }
1356
1357 /**
1358  * Returns whether registered pcd is dual speed or not
1359  */
1360 uint32_t dwc_otg_pcd_is_dualspeed(dwc_otg_pcd_t *pcd)
1361 {
1362         dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
1363
1364         if ((core_if->core_params->speed == DWC_SPEED_PARAM_FULL) ||
1365             ((core_if->hwcfg2.b.hs_phy_type == 2) &&
1366              (core_if->hwcfg2.b.fs_phy_type == 1) &&
1367              (core_if->core_params->ulpi_fs_ls))) {
1368                 return 0;
1369         }
1370
1371         return 1;
1372 }
1373
1374 /**
1375  * Returns whether registered pcd is OTG capable or not
1376  */
1377 uint32_t dwc_otg_pcd_is_otg(dwc_otg_pcd_t *pcd)
1378 {
1379         dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
1380         gusbcfg_data_t usbcfg = {.d32 = 0 };
1381         uint32_t retval = 0;
1382
1383         usbcfg.d32 = DWC_READ_REG32(&core_if->core_global_regs->gusbcfg);
1384 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0)
1385         if (!usbcfg.b.srpcap || !usbcfg.b.hnpcap)
1386                 return 0;
1387         else
1388                 return 1;
1389 # else
1390         if (!usbcfg.b.srpcap)
1391                 return 0;
1392         else
1393                 retval |= 1;
1394
1395         if (usbcfg.b.hnpcap)
1396                 retval |= 2;
1397
1398         if (core_if->adp_enable)
1399                 retval |= 4;
1400 #endif
1401
1402         return retval;
1403 }
1404
1405 /**
1406  * This function assigns periodic Tx FIFO to an periodic EP
1407  * in shared Tx FIFO mode
1408  */
1409 static uint32_t assign_tx_fifo(dwc_otg_core_if_t *core_if)
1410 {
1411         uint32_t TxMsk = 1;
1412         int i;
1413
1414         for (i = 0; i < core_if->hwcfg4.b.num_in_eps; ++i) {
1415                 if ((TxMsk & core_if->tx_msk) == 0) {
1416                         core_if->tx_msk |= TxMsk;
1417                         return i + 1;
1418                 }
1419                 TxMsk <<= 1;
1420         }
1421         return 0;
1422 }
1423
1424 /**
1425  * This function assigns periodic Tx FIFO to an periodic EP
1426  * in shared Tx FIFO mode
1427  */
1428 static uint32_t assign_perio_tx_fifo(dwc_otg_core_if_t *core_if)
1429 {
1430         uint32_t PerTxMsk = 1;
1431         int i;
1432         for (i = 0; i < core_if->hwcfg4.b.num_dev_perio_in_ep; ++i) {
1433                 if ((PerTxMsk & core_if->p_tx_msk) == 0) {
1434                         core_if->p_tx_msk |= PerTxMsk;
1435                         return i + 1;
1436                 }
1437                 PerTxMsk <<= 1;
1438         }
1439         return 0;
1440 }
1441
1442 /**
1443  * This function releases periodic Tx FIFO
1444  * in shared Tx FIFO mode
1445  */
1446 static void release_perio_tx_fifo(dwc_otg_core_if_t *core_if,
1447                                   uint32_t fifo_num)
1448 {
1449         core_if->p_tx_msk =
1450             (core_if->p_tx_msk & (1 << (fifo_num - 1))) ^ core_if->p_tx_msk;
1451 }
1452
1453 /**
1454  * This function releases periodic Tx FIFO
1455  * in shared Tx FIFO mode
1456  */
1457 static void release_tx_fifo(dwc_otg_core_if_t *core_if, uint32_t fifo_num)
1458 {
1459         core_if->tx_msk =
1460             (core_if->tx_msk & (1 << (fifo_num - 1))) ^ core_if->tx_msk;
1461 }
1462
1463 /**
1464  * This function is being called from gadget
1465  * to enable PCD endpoint.
1466  */
1467 int dwc_otg_pcd_ep_enable(dwc_otg_pcd_t *pcd,
1468                           const uint8_t *ep_desc, void *usb_ep)
1469 {
1470         int num, dir;
1471         dwc_otg_pcd_ep_t *ep = NULL;
1472         const usb_endpoint_descriptor_t *desc;
1473         dwc_irqflags_t flags;
1474         /* fifosize_data_t dptxfsiz = {.d32 = 0 }; */
1475         /* gdfifocfg_data_t gdfifocfg = {.d32 = 0 }; */
1476         /* gdfifocfg_data_t gdfifocfgbase = {.d32 = 0 }; */
1477         int retval = 0;
1478         int i, epcount;
1479
1480         desc = (const usb_endpoint_descriptor_t *)ep_desc;
1481
1482         if (!desc) {
1483                 pcd->ep0.priv = usb_ep;
1484                 ep = &pcd->ep0;
1485                 retval = -DWC_E_INVALID;
1486                 goto out;
1487         }
1488
1489         num = UE_GET_ADDR(desc->bEndpointAddress);
1490         dir = UE_GET_DIR(desc->bEndpointAddress);
1491
1492         if (!desc->wMaxPacketSize) {
1493                 DWC_WARN("bad maxpacketsize\n");
1494                 retval = -DWC_E_INVALID;
1495                 goto out;
1496         }
1497
1498         if (dir == UE_DIR_IN) {
1499                 epcount = pcd->core_if->dev_if->num_in_eps;
1500                 for (i = 0; i < epcount; i++) {
1501                         if (num == pcd->in_ep[i].dwc_ep.num) {
1502                                 ep = &pcd->in_ep[i];
1503                                 break;
1504                         }
1505                 }
1506         } else {
1507                 epcount = pcd->core_if->dev_if->num_out_eps;
1508                 for (i = 0; i < epcount; i++) {
1509                         if (num == pcd->out_ep[i].dwc_ep.num) {
1510                                 ep = &pcd->out_ep[i];
1511                                 break;
1512                         }
1513                 }
1514         }
1515
1516         if (!ep) {
1517                 DWC_WARN("bad address\n");
1518                 retval = -DWC_E_INVALID;
1519                 goto out;
1520         }
1521
1522         DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
1523
1524         ep->desc = desc;
1525         ep->priv = usb_ep;
1526
1527         /*
1528          * Activate the EP
1529          */
1530         ep->stopped = 0;
1531
1532         ep->dwc_ep.is_in = (dir == UE_DIR_IN);
1533         ep->dwc_ep.maxpacket = UGETW(desc->wMaxPacketSize);
1534
1535         ep->dwc_ep.type = desc->bmAttributes & UE_XFERTYPE;
1536
1537         if (ep->dwc_ep.is_in) {
1538                 if (!GET_CORE_IF(pcd)->en_multiple_tx_fifo) {
1539                         ep->dwc_ep.tx_fifo_num = 0;
1540
1541                         if (ep->dwc_ep.type == UE_ISOCHRONOUS) {
1542                                 /*
1543                                  * if ISOC EP then assign a Periodic Tx FIFO.
1544                                  */
1545                                 ep->dwc_ep.tx_fifo_num =
1546                                     assign_perio_tx_fifo(GET_CORE_IF(pcd));
1547                         }
1548                 } else {
1549                         /*
1550                          * if Dedicated FIFOs mode is on then assign a Tx FIFO.
1551                          */
1552                         ep->dwc_ep.tx_fifo_num =
1553                             assign_tx_fifo(GET_CORE_IF(pcd));
1554                 }
1555
1556                 /* Calculating EP info controller base address */
1557 #if 0
1558                 if (ep->dwc_ep.tx_fifo_num
1559                     && GET_CORE_IF(pcd)->en_multiple_tx_fifo) {
1560                         gdfifocfg.d32 =
1561                             DWC_READ_REG32(&GET_CORE_IF(pcd)->core_global_regs->
1562                                            gdfifocfg);
1563                         gdfifocfgbase.d32 = gdfifocfg.d32 >> 16;
1564                         dptxfsiz.d32 =
1565                             (DWC_READ_REG32
1566                              (&GET_CORE_IF(pcd)->
1567                               core_global_regs->dtxfsiz[ep->dwc_ep.tx_fifo_num -
1568                                                         1]) >> 16);
1569                         gdfifocfg.b.epinfobase =
1570                             gdfifocfgbase.d32 + dptxfsiz.d32;
1571                         if (GET_CORE_IF(pcd)->snpsid <= OTG_CORE_REV_2_94a) {
1572                                 DWC_WRITE_REG32(&GET_CORE_IF
1573                                                 (pcd)->core_global_regs->
1574                                                 gdfifocfg, gdfifocfg.d32);
1575                         }
1576                 }
1577 #endif
1578         }
1579         /* Set initial data PID. */
1580         if (ep->dwc_ep.type == UE_BULK) {
1581                 ep->dwc_ep.data_pid_start = 0;
1582         }
1583
1584         /* Alloc DMA Descriptors */
1585         if (GET_CORE_IF(pcd)->dma_desc_enable) {
1586 #ifndef DWC_UTE_PER_IO
1587                 if (ep->dwc_ep.type != UE_ISOCHRONOUS) {
1588 #endif
1589                         ep->dwc_ep.desc_addr =
1590                             dwc_otg_ep_alloc_desc_chain(&ep->dwc_ep.
1591                                                         dma_desc_addr,
1592                                                         MAX_DMA_DESC_CNT);
1593                         if (!ep->dwc_ep.desc_addr) {
1594                                 DWC_WARN("%s, can't allocate DMA descriptor\n",
1595                                          __func__);
1596                                 retval = -DWC_E_SHUTDOWN;
1597                                 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
1598                                 goto out;
1599                         }
1600 #ifndef DWC_UTE_PER_IO
1601                 }
1602 #endif
1603         }
1604
1605         DWC_DEBUGPL(DBG_PCD, "Activate %s: type=%d, mps=%d desc=%p\n",
1606                     (ep->dwc_ep.is_in ? "IN" : "OUT"),
1607                     ep->dwc_ep.type, ep->dwc_ep.maxpacket, ep->desc);
1608 #ifdef DWC_UTE_PER_IO
1609         ep->dwc_ep.xiso_bInterval = 1 << (ep->desc->bInterval - 1);
1610 #endif
1611         if (ep->dwc_ep.type == DWC_OTG_EP_TYPE_ISOC) {
1612                 ep->dwc_ep.bInterval = 1 << (ep->desc->bInterval - 1);
1613                 ep->dwc_ep.frame_num = 0xFFFFFFFF;
1614         }
1615
1616         dwc_otg_ep_activate(GET_CORE_IF(pcd), &ep->dwc_ep);
1617
1618 #ifdef DWC_UTE_CFI
1619         if (pcd->cfi->ops.ep_enable) {
1620                 pcd->cfi->ops.ep_enable(pcd->cfi, pcd, ep);
1621         }
1622 #endif
1623
1624         DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
1625
1626 out:
1627         return retval;
1628 }
1629
1630 /**
1631  * This function is being called from gadget
1632  * to disable PCD endpoint.
1633  */
1634 int dwc_otg_pcd_ep_disable(dwc_otg_pcd_t *pcd, void *ep_handle)
1635 {
1636         dwc_otg_pcd_ep_t *ep;
1637         dwc_irqflags_t flags;
1638         dwc_otg_dev_dma_desc_t *desc_addr;
1639         dwc_dma_t dma_desc_addr;
1640         gdfifocfg_data_t gdfifocfgbase = {.d32 = 0 };
1641         gdfifocfg_data_t gdfifocfg = {.d32 = 0 };
1642         /* fifosize_data_t dptxfsiz = {.d32 = 0 }; */
1643
1644         ep = get_ep_from_handle(pcd, ep_handle);
1645
1646         if (!ep || !ep->desc) {
1647                 DWC_DEBUGPL(DBG_PCD, "bad ep address\n");
1648                 return -DWC_E_INVALID;
1649         }
1650
1651         DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
1652
1653         dwc_otg_request_nuke(ep);
1654
1655         dwc_otg_ep_deactivate(GET_CORE_IF(pcd), &ep->dwc_ep);
1656         if (pcd->core_if->core_params->dev_out_nak) {
1657                 DWC_TIMER_CANCEL(pcd->core_if->ep_xfer_timer[ep->dwc_ep.num]);
1658                 pcd->core_if->ep_xfer_info[ep->dwc_ep.num].state = 0;
1659         }
1660         ep->desc = NULL;
1661         ep->stopped = 1;
1662
1663         gdfifocfg.d32 =
1664             DWC_READ_REG32(&GET_CORE_IF(pcd)->core_global_regs->gdfifocfg);
1665         gdfifocfgbase.d32 = gdfifocfg.d32 >> 16;
1666
1667         if (ep->dwc_ep.is_in) {
1668                 if (GET_CORE_IF(pcd)->en_multiple_tx_fifo) {
1669                         /* Flush the Tx FIFO */
1670                         dwc_otg_flush_tx_fifo(GET_CORE_IF(pcd),
1671                                               ep->dwc_ep.tx_fifo_num);
1672                 }
1673                 release_perio_tx_fifo(GET_CORE_IF(pcd), ep->dwc_ep.tx_fifo_num);
1674                 release_tx_fifo(GET_CORE_IF(pcd), ep->dwc_ep.tx_fifo_num);
1675 #if 0
1676                 if (GET_CORE_IF(pcd)->en_multiple_tx_fifo) {
1677                         /* Decreasing EPinfo Base Addr */
1678                         dptxfsiz.d32 =
1679                             (DWC_READ_REG32
1680                              (&GET_CORE_IF(pcd)->core_global_regs->
1681                               dtxfsiz[ep->dwc_ep.tx_fifo_num - 1]) >> 16);
1682                         gdfifocfg.b.epinfobase =
1683                             gdfifocfgbase.d32 - dptxfsiz.d32;
1684                         if (GET_CORE_IF(pcd)->snpsid <= OTG_CORE_REV_2_94a) {
1685                                 DWC_WRITE_REG32(&GET_CORE_IF(pcd)->
1686                                                 core_global_regs->gdfifocfg,
1687                                                 gdfifocfg.d32);
1688                         }
1689                 }
1690 #endif
1691         }
1692
1693         /* Free DMA Descriptors */
1694         if (GET_CORE_IF(pcd)->dma_desc_enable) {
1695                 if (ep->dwc_ep.type != UE_ISOCHRONOUS) {
1696                         desc_addr = ep->dwc_ep.desc_addr;
1697                         dma_desc_addr = ep->dwc_ep.dma_desc_addr;
1698
1699                         /* Cannot call dma_free_coherent() with IRQs disabled */
1700                         DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
1701                         dwc_otg_ep_free_desc_chain(desc_addr, dma_desc_addr,
1702                                                    MAX_DMA_DESC_CNT);
1703
1704                         goto out_unlocked;
1705                 }
1706         }
1707         DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
1708
1709 out_unlocked:
1710         DWC_DEBUGPL(DBG_PCD, "%d %s disabled\n", ep->dwc_ep.num,
1711                     ep->dwc_ep.is_in ? "IN" : "OUT");
1712         return 0;
1713
1714 }
1715
1716 static int dwc_otg_wait_bit_set(volatile uint32_t *reg,
1717                                 uint32_t bit, uint32_t timeout)
1718 {
1719         int i;
1720
1721         for (i = 0; i < timeout; i++) {
1722                 if (DWC_READ_REG32(reg) & bit)
1723                         return 0;
1724
1725                 dwc_udelay(1);
1726         }
1727
1728         return -ETIMEDOUT;
1729 }
1730
1731 static void dwc_otg_pcd_ep_stop_transfer(dwc_otg_core_if_t
1732                                          *core_if, dwc_ep_t *ep)
1733 {
1734         depctl_data_t depctl = {.d32 = 0 };
1735
1736         /* Read DEPCTLn register */
1737         if (ep->is_in == 1)
1738                 depctl.d32 = DWC_READ_REG32(&core_if->dev_if->
1739                                              in_ep_regs[ep->num]->
1740                                              diepctl);
1741         else
1742                 depctl.d32 = DWC_READ_REG32(&core_if->dev_if->
1743                                             out_ep_regs[ep->num]->
1744                                             doepctl);
1745
1746         if (ep->is_in == 1) {
1747                 diepint_data_t diepint = {.d32 = 0 };
1748
1749                 depctl.b.snak = 1;
1750                 DWC_WRITE_REG32(&core_if->dev_if->
1751                                 in_ep_regs[ep->num]->diepctl,
1752                                 depctl.d32);
1753
1754                 diepint.b.inepnakeff = 1;
1755                 /* Wait for Nak effect */
1756                 if (dwc_otg_wait_bit_set(&core_if->dev_if->
1757                                          in_ep_regs[ep->num]
1758                                          ->diepint,
1759                                          diepint.d32,
1760                                          100)) {
1761                         DWC_WARN("%s: timeout diepctl.snak\n",
1762                                  __func__);
1763                 } else {
1764                         DWC_WRITE_REG32(&core_if->dev_if->
1765                                         in_ep_regs[ep->num]->
1766                                         diepint, diepint.d32);
1767                 }
1768
1769                 depctl.d32 = 0;
1770                 depctl.b.epdis = 1;
1771                 DWC_WRITE_REG32(&core_if->dev_if->
1772                                 in_ep_regs[ep->num]->diepctl,
1773                                 depctl.d32);
1774
1775                 diepint.d32 = 0;
1776                 diepint.b.epdisabled = 1;
1777                 if (dwc_otg_wait_bit_set(&core_if->dev_if->
1778                                          in_ep_regs[ep->num]
1779                                          ->diepint,
1780                                          diepint.d32,
1781                                          100)) {
1782                         DWC_WARN("%s: timeout diepctl.epdis\n",
1783                                  __func__);
1784                 } else {
1785                         DWC_WRITE_REG32(&core_if->dev_if->
1786                                         in_ep_regs[ep->num]->
1787                                         diepint, diepint.d32);
1788                 }
1789         } else {
1790                 dctl_data_t dctl = {.d32 = 0 };
1791                 gintmsk_data_t gintsts = {.d32 = 0 };
1792                 doepint_data_t doepint = {.d32 = 0 };
1793
1794                 dctl.b.sgoutnak = 1;
1795                 DWC_MODIFY_REG32(&core_if->dev_if->
1796                                  dev_global_regs->dctl, 0, dctl.d32);
1797
1798                 /* Wait for global nak to take effect */
1799                 gintsts.d32 = 0;
1800                 gintsts.b.goutnakeff = 1;
1801                 if (dwc_otg_wait_bit_set(&core_if->core_global_regs->
1802                                          gintsts, gintsts.d32,
1803                                          100)) {
1804                         DWC_WARN("%s: timeout dctl.sgoutnak\n",
1805                                  __func__);
1806                 } else {
1807                         DWC_WRITE_REG32(&core_if->core_global_regs
1808                                         ->gintsts, gintsts.d32);
1809                 }
1810
1811                 depctl.d32 = 0;
1812                 depctl.b.epdis = 1;
1813                 depctl.b.snak = 1;
1814                 DWC_WRITE_REG32(&core_if->dev_if->out_ep_regs[ep->num]->
1815                                 doepctl, depctl.d32);
1816
1817                 doepint.b.epdisabled = 1;
1818                 if (dwc_otg_wait_bit_set(&core_if->dev_if
1819                                          ->out_ep_regs[ep->num]
1820                                          ->doepint, doepint.d32,
1821                                          100)) {
1822                         DWC_WARN("%s: timeout doepctl.epdis\n",
1823                                  __func__);
1824                 } else {
1825                         DWC_WRITE_REG32(&core_if->dev_if->
1826                                         out_ep_regs[ep->num]->
1827                                         doepint, doepint.d32);
1828                 }
1829
1830                 dctl.d32 = 0;
1831                 dctl.b.cgoutnak = 1;
1832                 DWC_MODIFY_REG32(&core_if->dev_if->dev_global_regs->
1833                                  dctl, 0, dctl.d32);
1834         }
1835 }
1836
1837 /******************************************************************************/
1838 #ifdef DWC_UTE_PER_IO
1839
1840 /**
1841  * Free the request and its extended parts
1842  *
1843  */
1844 void dwc_pcd_xiso_ereq_free(dwc_otg_pcd_ep_t *ep, dwc_otg_pcd_request_t *req)
1845 {
1846         DWC_FREE(req->ext_req.per_io_frame_descs);
1847         DWC_FREE(req);
1848 }
1849
1850 /**
1851  * Start the next request in the endpoint's queue.
1852  *
1853  */
1854 int dwc_otg_pcd_xiso_start_next_request(dwc_otg_pcd_t *pcd,
1855                                         dwc_otg_pcd_ep_t *ep)
1856 {
1857         int i;
1858         dwc_otg_pcd_request_t *req = NULL;
1859         dwc_ep_t *dwcep = NULL;
1860         struct dwc_iso_xreq_port *ereq = NULL;
1861         struct dwc_iso_pkt_desc_port *ddesc_iso;
1862         uint16_t nat;
1863         depctl_data_t diepctl;
1864
1865         dwcep = &ep->dwc_ep;
1866
1867         if (dwcep->xiso_active_xfers > 0) {
1868 #if 0
1869                 /* Disable this to decrease s/w overhead
1870                  * that is crucial for Isoc transfers */
1871                 DWC_WARN("There are currently active transfers for EP%d \
1872                         (active=%d; queued=%d)", dwcep->num,
1873                         dwcep->xiso_active_xfers, dwcep->xiso_queued_xfers);
1874 #endif
1875                 return 0;
1876         }
1877
1878         nat = UGETW(ep->desc->wMaxPacketSize);
1879         nat = (nat >> 11) & 0x03;
1880
1881         if (!DWC_CIRCLEQ_EMPTY(&ep->queue)) {
1882                 req = DWC_CIRCLEQ_FIRST(&ep->queue);
1883                 ereq = &req->ext_req;
1884                 ep->stopped = 0;
1885
1886                 /* Get the frame number */
1887                 dwcep->xiso_frame_num =
1888                     dwc_otg_get_frame_number(GET_CORE_IF(pcd));
1889                 DWC_DEBUG("FRM_NUM=%d", dwcep->xiso_frame_num);
1890
1891                 ddesc_iso = ereq->per_io_frame_descs;
1892
1893                 if (dwcep->is_in) {
1894                         /* Setup DMA Descriptor chain for IN Isoc request */
1895                         for (i = 0; i < ereq->pio_pkt_count; i++) {
1896                                 /* if ((i % (nat + 1)) == 0) */
1897                                 if (i > 0)
1898                                         dwcep->xiso_frame_num =
1899                                             (dwcep->xiso_bInterval +
1900                                              dwcep->xiso_frame_num) & 0x3FFF;
1901                                 dwcep->desc_addr[i].buf =
1902                                     req->dma + ddesc_iso[i].offset;
1903                                 dwcep->desc_addr[i].status.b_iso_in.txbytes =
1904                                     ddesc_iso[i].length;
1905                                 dwcep->desc_addr[i].status.b_iso_in.framenum =
1906                                     dwcep->xiso_frame_num;
1907                                 dwcep->desc_addr[i].status.b_iso_in.bs =
1908                                     BS_HOST_READY;
1909                                 dwcep->desc_addr[i].status.b_iso_in.txsts = 0;
1910                                 dwcep->desc_addr[i].status.b_iso_in.sp =
1911                                     (ddesc_iso[i].length %
1912                                      dwcep->maxpacket) ? 1 : 0;
1913                                 dwcep->desc_addr[i].status.b_iso_in.ioc = 0;
1914                                 dwcep->desc_addr[i].status.b_iso_in.pid =
1915                                     nat + 1;
1916                                 dwcep->desc_addr[i].status.b_iso_in.l = 0;
1917
1918                                 /* Process the last descriptor */
1919                                 if (i == ereq->pio_pkt_count - 1) {
1920                                         dwcep->desc_addr[i].status.b_iso_in.
1921                                             ioc = 1;
1922                                         dwcep->desc_addr[i].status.b_iso_in.l =
1923                                             1;
1924                                 }
1925                         }
1926
1927                         /* Setup and start the transfer for this endpoint */
1928                         dwcep->xiso_active_xfers++;
1929                         DWC_WRITE_REG32(&GET_CORE_IF(pcd)->
1930                                         dev_if->in_ep_regs[dwcep->num]->diepdma,
1931                                         dwcep->dma_desc_addr);
1932                         diepctl.d32 = 0;
1933                         diepctl.b.epena = 1;
1934                         diepctl.b.cnak = 1;
1935                         DWC_MODIFY_REG32(&GET_CORE_IF(pcd)->
1936                                          dev_if->in_ep_regs[dwcep->num]->
1937                                          diepctl, 0, diepctl.d32);
1938                 } else {
1939                         /* Setup DMA Descriptor chain for OUT Isoc request */
1940                         for (i = 0; i < ereq->pio_pkt_count; i++) {
1941                                 /* if ((i % (nat + 1)) == 0) */
1942                                 dwcep->xiso_frame_num = (dwcep->xiso_bInterval +
1943                                                          dwcep->
1944                                                          xiso_frame_num) &
1945                                     0x3FFF;
1946                                 dwcep->desc_addr[i].buf =
1947                                     req->dma + ddesc_iso[i].offset;
1948                                 dwcep->desc_addr[i].status.b_iso_out.rxbytes =
1949                                     ddesc_iso[i].length;
1950                                 dwcep->desc_addr[i].status.b_iso_out.framenum =
1951                                     dwcep->xiso_frame_num;
1952                                 dwcep->desc_addr[i].status.b_iso_out.bs =
1953                                     BS_HOST_READY;
1954                                 dwcep->desc_addr[i].status.b_iso_out.rxsts = 0;
1955                                 dwcep->desc_addr[i].status.b_iso_out.sp =
1956                                     (ddesc_iso[i].length %
1957                                      dwcep->maxpacket) ? 1 : 0;
1958                                 dwcep->desc_addr[i].status.b_iso_out.ioc = 0;
1959                                 dwcep->desc_addr[i].status.b_iso_out.pid =
1960                                     nat + 1;
1961                                 dwcep->desc_addr[i].status.b_iso_out.l = 0;
1962
1963                                 /* Process the last descriptor */
1964                                 if (i == ereq->pio_pkt_count - 1) {
1965                                         dwcep->desc_addr[i].status.b_iso_out.
1966                                             ioc = 1;
1967                                         dwcep->desc_addr[i].status.b_iso_out.l =
1968                                             1;
1969                                 }
1970                         }
1971
1972                         /* Setup and start the transfer for this endpoint */
1973                         dwcep->xiso_active_xfers++;
1974                         DWC_WRITE_REG32(&GET_CORE_IF(pcd)->dev_if->
1975                                         out_ep_regs[dwcep->num]->doepdma,
1976                                         dwcep->dma_desc_addr);
1977                         diepctl.d32 = 0;
1978                         diepctl.b.epena = 1;
1979                         diepctl.b.cnak = 1;
1980                         DWC_MODIFY_REG32(&GET_CORE_IF(pcd)->dev_if->
1981                                          out_ep_regs[dwcep->num]->doepctl, 0,
1982                                          diepctl.d32);
1983                 }
1984
1985         } else {
1986                 ep->stopped = 1;
1987         }
1988
1989         return 0;
1990 }
1991
1992 /**
1993  *      - Remove the request from the queue
1994  */
1995 void complete_xiso_ep(dwc_otg_pcd_ep_t *ep)
1996 {
1997         dwc_otg_pcd_request_t *req = NULL;
1998         struct dwc_iso_xreq_port *ereq = NULL;
1999         struct dwc_iso_pkt_desc_port *ddesc_iso = NULL;
2000         dwc_ep_t *dwcep = NULL;
2001         int i;
2002
2003         /* DWC_DEBUG(); */
2004         dwcep = &ep->dwc_ep;
2005
2006         /* Get the first pending request from the queue */
2007         if (!DWC_CIRCLEQ_EMPTY(&ep->queue)) {
2008                 req = DWC_CIRCLEQ_FIRST(&ep->queue);
2009                 if (!req) {
2010                         DWC_PRINTF("complete_ep 0x%p, req = NULL!\n", ep);
2011                         return;
2012                 }
2013                 dwcep->xiso_active_xfers--;
2014                 dwcep->xiso_queued_xfers--;
2015                 /* Remove this request from the queue */
2016                 DWC_CIRCLEQ_REMOVE_INIT(&ep->queue, req, queue_entry);
2017         } else {
2018                 DWC_PRINTF("complete_ep 0x%p, ep->queue empty!\n", ep);
2019                 return;
2020         }
2021
2022         ep->stopped = 1;
2023         ereq = &req->ext_req;
2024         ddesc_iso = ereq->per_io_frame_descs;
2025
2026         if (dwcep->xiso_active_xfers < 0) {
2027                 DWC_WARN("EP#%d (xiso_active_xfers=%d)", dwcep->num,
2028                          dwcep->xiso_active_xfers);
2029         }
2030
2031         /* Fill the Isoc descs of portable extended req from dma descriptors */
2032         for (i = 0; i < ereq->pio_pkt_count; i++) {
2033                 if (dwcep->is_in) {     /* IN endpoints */
2034                         ddesc_iso[i].actual_length = ddesc_iso[i].length -
2035                             dwcep->desc_addr[i].status.b_iso_in.txbytes;
2036                         ddesc_iso[i].status =
2037                             dwcep->desc_addr[i].status.b_iso_in.txsts;
2038                 } else {        /* OUT endpoints */
2039                         ddesc_iso[i].actual_length = ddesc_iso[i].length -
2040                             dwcep->desc_addr[i].status.b_iso_out.rxbytes;
2041                         ddesc_iso[i].status =
2042                             dwcep->desc_addr[i].status.b_iso_out.rxsts;
2043                 }
2044         }
2045
2046         DWC_SPINUNLOCK(ep->pcd->lock);
2047
2048         /* Call the completion function in the non-portable logic */
2049         ep->pcd->fops->xisoc_complete(ep->pcd, ep->priv, req->priv, 0,
2050                                       &req->ext_req);
2051
2052         DWC_SPINLOCK(ep->pcd->lock);
2053
2054         /* Free the request - specific freeing needed for extended request object */
2055         dwc_pcd_xiso_ereq_free(ep, req);
2056
2057         /* Start the next request */
2058         dwc_otg_pcd_xiso_start_next_request(ep->pcd, ep);
2059
2060         return;
2061 }
2062
2063 /**
2064  * Create and initialize the Isoc pkt descriptors of the extended request.
2065  *
2066  */
2067 static int dwc_otg_pcd_xiso_create_pkt_descs(dwc_otg_pcd_request_t *req,
2068                                              void *ereq_nonport,
2069                                              int atomic_alloc)
2070 {
2071         struct dwc_iso_xreq_port *ereq = NULL;
2072         struct dwc_iso_xreq_port *req_mapped = NULL;
2073         struct dwc_iso_pkt_desc_port *ipds = NULL;      /* To be created in this function */
2074         uint32_t pkt_count;
2075         int i;
2076
2077         ereq = &req->ext_req;
2078         req_mapped = (struct dwc_iso_xreq_port *)ereq_nonport;
2079         pkt_count = req_mapped->pio_pkt_count;
2080
2081         /* Create the isoc descs */
2082         if (atomic_alloc) {
2083                 ipds = DWC_ALLOC_ATOMIC(sizeof(*ipds) * pkt_count);
2084         } else {
2085                 ipds = DWC_ALLOC(sizeof(*ipds) * pkt_count);
2086         }
2087
2088         if (!ipds) {
2089                 DWC_ERROR("Failed to allocate isoc descriptors");
2090                 return -DWC_E_NO_MEMORY;
2091         }
2092
2093         /* Initialize the extended request fields */
2094         ereq->per_io_frame_descs = ipds;
2095         ereq->error_count = 0;
2096         ereq->pio_alloc_pkt_count = pkt_count;
2097         ereq->pio_pkt_count = pkt_count;
2098         ereq->tr_sub_flags = req_mapped->tr_sub_flags;
2099
2100         /* Init the Isoc descriptors */
2101         for (i = 0; i < pkt_count; i++) {
2102                 ipds[i].length = req_mapped->per_io_frame_descs[i].length;
2103                 ipds[i].offset = req_mapped->per_io_frame_descs[i].offset;
2104                 ipds[i].status = req_mapped->per_io_frame_descs[i].status;      /* 0 */
2105                 ipds[i].actual_length =
2106                     req_mapped->per_io_frame_descs[i].actual_length;
2107         }
2108
2109         return 0;
2110 }
2111
2112 static void prn_ext_request(struct dwc_iso_xreq_port *ereq)
2113 {
2114         struct dwc_iso_pkt_desc_port *xfd = NULL;
2115         int i;
2116
2117         DWC_DEBUG("per_io_frame_descs=%p", ereq->per_io_frame_descs);
2118         DWC_DEBUG("tr_sub_flags=%d", ereq->tr_sub_flags);
2119         DWC_DEBUG("error_count=%d", ereq->error_count);
2120         DWC_DEBUG("pio_alloc_pkt_count=%d", ereq->pio_alloc_pkt_count);
2121         DWC_DEBUG("pio_pkt_count=%d", ereq->pio_pkt_count);
2122         DWC_DEBUG("res=%d", ereq->res);
2123
2124         for (i = 0; i < ereq->pio_pkt_count; i++) {
2125                 xfd = &ereq->per_io_frame_descs[0];
2126                 DWC_DEBUG("FD #%d", i);
2127
2128                 DWC_DEBUG("xfd->actual_length=%d", xfd->actual_length);
2129                 DWC_DEBUG("xfd->length=%d", xfd->length);
2130                 DWC_DEBUG("xfd->offset=%d", xfd->offset);
2131                 DWC_DEBUG("xfd->status=%d", xfd->status);
2132         }
2133 }
2134
2135 /**
2136  *
2137  */
2138 int dwc_otg_pcd_xiso_ep_queue(dwc_otg_pcd_t *pcd, void *ep_handle,
2139                               uint8_t *buf, dwc_dma_t dma_buf, uint32_t buflen,
2140                               int zero, void *req_handle, int atomic_alloc,
2141                               void *ereq_nonport)
2142 {
2143         dwc_otg_pcd_request_t *req = NULL;
2144         dwc_otg_pcd_ep_t *ep;
2145         dwc_irqflags_t flags;
2146         int res;
2147
2148         ep = get_ep_from_handle(pcd, ep_handle);
2149         if (!ep) {
2150                 DWC_WARN("bad ep\n");
2151                 return -DWC_E_INVALID;
2152         }
2153
2154         /* We support this extension only for DDMA mode */
2155         if (ep->dwc_ep.type == DWC_OTG_EP_TYPE_ISOC)
2156                 if (!GET_CORE_IF(pcd)->dma_desc_enable)
2157                         return -DWC_E_INVALID;
2158
2159         /* Create a dwc_otg_pcd_request_t object */
2160         if (atomic_alloc) {
2161                 req = DWC_ALLOC_ATOMIC(sizeof(*req));
2162         } else {
2163                 req = DWC_ALLOC(sizeof(*req));
2164         }
2165
2166         if (!req) {
2167                 return -DWC_E_NO_MEMORY;
2168         }
2169
2170         /* Create the Isoc descs for this request which shall be the exact match
2171          * of the structure sent to us from the non-portable logic */
2172         res =
2173             dwc_otg_pcd_xiso_create_pkt_descs(req, ereq_nonport, atomic_alloc);
2174         if (res) {
2175                 DWC_WARN("Failed to init the Isoc descriptors");
2176                 DWC_FREE(req);
2177                 return res;
2178         }
2179
2180         DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
2181
2182         DWC_CIRCLEQ_INIT_ENTRY(req, queue_entry);
2183         req->buf = buf;
2184         req->dma = dma_buf;
2185         req->length = buflen;
2186         req->sent_zlp = zero;
2187         req->priv = req_handle;
2188
2189         /* DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags); */
2190         ep->dwc_ep.dma_addr = dma_buf;
2191         ep->dwc_ep.start_xfer_buff = buf;
2192         ep->dwc_ep.xfer_buff = buf;
2193         ep->dwc_ep.xfer_len = 0;
2194         ep->dwc_ep.xfer_count = 0;
2195         ep->dwc_ep.sent_zlp = 0;
2196         ep->dwc_ep.total_len = buflen;
2197
2198         /* Add this request to the tail */
2199         DWC_CIRCLEQ_INSERT_TAIL(&ep->queue, req, queue_entry);
2200         ep->dwc_ep.xiso_queued_xfers++;
2201
2202         /* DWC_DEBUG("CP_0"); */
2203         /* DWC_DEBUG("req->ext_req.tr_sub_flags=%d", req->ext_req.tr_sub_flags); */
2204         /* prn_ext_request((struct dwc_iso_xreq_port *) ereq_nonport); */
2205         /* prn_ext_request(&req->ext_req); */
2206
2207         /* DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags); */
2208
2209         /* If the req->status == ASAP  then check if there is any active transfer
2210          * for this endpoint. If no active transfers, then get the first entry
2211          * from the queue and start that transfer
2212          */
2213         if (req->ext_req.tr_sub_flags == DWC_EREQ_TF_ASAP) {
2214                 res = dwc_otg_pcd_xiso_start_next_request(pcd, ep);
2215                 if (res) {
2216                         DWC_WARN("Failed to start the next Isoc transfer");
2217                         DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2218                         DWC_FREE(req);
2219                         return res;
2220                 }
2221         }
2222
2223         DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2224         return 0;
2225 }
2226
2227 #endif
2228 /* END ifdef DWC_UTE_PER_IO ***************************************************/
2229 int dwc_otg_pcd_ep_queue(dwc_otg_pcd_t *pcd, void *ep_handle,
2230                          uint8_t *buf, dwc_dma_t dma_buf, uint32_t buflen,
2231                          int zero, void *req_handle, int atomic_alloc)
2232 {
2233         dwc_irqflags_t flags;
2234         dwc_otg_pcd_request_t *req;
2235         dwc_otg_pcd_ep_t *ep;
2236         uint32_t max_transfer;
2237
2238         ep = get_ep_from_handle(pcd, ep_handle);
2239         if (!ep || (!ep->desc && ep->dwc_ep.num != 0)) {
2240                 DWC_WARN("bad ep\n");
2241                 return -DWC_E_INVALID;
2242         }
2243
2244         if (atomic_alloc) {
2245                 req = DWC_ALLOC_ATOMIC(sizeof(*req));
2246         } else {
2247                 req = DWC_ALLOC(sizeof(*req));
2248         }
2249
2250         if (!req) {
2251                 return -DWC_E_NO_MEMORY;
2252         }
2253         DWC_CIRCLEQ_INIT_ENTRY(req, queue_entry);
2254         if (!GET_CORE_IF(pcd)->core_params->opt) {
2255                 if (ep->dwc_ep.num != 0) {
2256                         DWC_ERROR("queue req %p, len %d buf %p\n",
2257                                   req_handle, buflen, buf);
2258                 }
2259         }
2260
2261         req->buf = buf;
2262         req->dma = dma_buf;
2263         req->length = buflen;
2264         req->sent_zlp = zero;
2265         req->priv = req_handle;
2266         req->dw_align_buf = NULL;
2267         if ((dma_buf & 0x3) && GET_CORE_IF(pcd)->dma_enable
2268             && !GET_CORE_IF(pcd)->dma_desc_enable)
2269                 req->dw_align_buf = DWC_DEV_DMA_ALLOC_ATOMIC(buflen,
2270                                                              &req->
2271                                                              dw_align_buf_dma);
2272         DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
2273
2274         /*
2275          * After adding request to the queue for IN ISOC wait for In Token Received
2276          * when TX FIFO is empty interrupt and for OUT ISOC wait for OUT Token
2277          * Received when EP is disabled interrupt to obtain starting microframe
2278          * (odd/even) start transfer
2279          */
2280         if (ep->dwc_ep.type == DWC_OTG_EP_TYPE_ISOC) {
2281                 if (req != 0) {
2282                         depctl_data_t depctl = {.d32 =
2283                                     DWC_READ_REG32(&pcd->core_if->
2284                                                    dev_if->in_ep_regs[ep->
2285                                                                       dwc_ep.
2286                                                                       num]->diepctl)
2287                         };
2288                         ++pcd->request_pending;
2289
2290                         DWC_CIRCLEQ_INSERT_TAIL(&ep->queue, req, queue_entry);
2291                         if (ep->dwc_ep.is_in) {
2292                                 depctl.b.cnak = 1;
2293                                 DWC_WRITE_REG32(&pcd->core_if->
2294                                                 dev_if->in_ep_regs[ep->dwc_ep.
2295                                                                    num]->diepctl,
2296                                                 depctl.d32);
2297                         }
2298
2299                         DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2300                 }
2301                 return 0;
2302         }
2303
2304         /*
2305          * For EP0 IN without premature status, zlp is required?
2306          */
2307         if (ep->dwc_ep.num == 0 && ep->dwc_ep.is_in) {
2308                 DWC_DEBUGPL(DBG_PCDV, "%d-OUT ZLP\n", ep->dwc_ep.num);
2309                 /* _req->zero = 1; */
2310         }
2311
2312         /* Start the transfer */
2313         if (DWC_CIRCLEQ_EMPTY(&ep->queue) && !ep->stopped) {
2314                 /* EP0 Transfer? */
2315                 if (ep->dwc_ep.num == 0) {
2316                         switch (pcd->ep0state) {
2317                         case EP0_IN_DATA_PHASE:
2318                                 DWC_DEBUGPL(DBG_PCD,
2319                                             "%s ep0: EP0_IN_DATA_PHASE\n",
2320                                             __func__);
2321                                 break;
2322
2323                         case EP0_OUT_DATA_PHASE:
2324                                 DWC_DEBUGPL(DBG_PCD,
2325                                             "%s ep0: EP0_OUT_DATA_PHASE\n",
2326                                             __func__);
2327                                 if (pcd->request_config) {
2328                                         /* Complete STATUS PHASE */
2329                                         ep->dwc_ep.is_in = 1;
2330                                         pcd->ep0state = EP0_IN_STATUS_PHASE;
2331                                 }
2332                                 break;
2333
2334                         case EP0_IN_STATUS_PHASE:
2335                                 DWC_DEBUGPL(DBG_PCD,
2336                                             "%s ep0: EP0_IN_STATUS_PHASE\n",
2337                                             __func__);
2338                                 break;
2339
2340                         default:
2341                                 DWC_DEBUGPL(DBG_ANY, "ep0: odd state %d\n",
2342                                             pcd->ep0state);
2343                                 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2344                                 return -DWC_E_SHUTDOWN;
2345                         }
2346
2347                         ep->dwc_ep.dma_addr = dma_buf;
2348                         ep->dwc_ep.start_xfer_buff = buf;
2349                         ep->dwc_ep.xfer_buff = buf;
2350                         ep->dwc_ep.xfer_len = buflen;
2351                         ep->dwc_ep.xfer_count = 0;
2352                         ep->dwc_ep.sent_zlp = 0;
2353                         ep->dwc_ep.total_len = ep->dwc_ep.xfer_len;
2354
2355                         if (zero) {
2356                                 if ((ep->dwc_ep.xfer_len %
2357                                      ep->dwc_ep.maxpacket == 0)
2358                                     && (ep->dwc_ep.xfer_len != 0)) {
2359                                         ep->dwc_ep.sent_zlp = 1;
2360                                 }
2361
2362                         }
2363
2364                         dwc_otg_ep0_start_transfer(GET_CORE_IF(pcd),
2365                                                    &ep->dwc_ep);
2366                 }               /* non-ep0 endpoints */
2367                 else {
2368 #ifdef DWC_UTE_CFI
2369                         if (ep->dwc_ep.buff_mode != BM_STANDARD) {
2370                                 /* store the request length */
2371                                 ep->dwc_ep.cfi_req_len = buflen;
2372                                 pcd->cfi->ops.build_descriptors(pcd->cfi, pcd,
2373                                                                 ep, req);
2374                         } else {
2375 #endif
2376                                 max_transfer =
2377                                     GET_CORE_IF(ep->pcd)->
2378                                     core_params->max_transfer_size;
2379
2380                                 /* Setup and start the Transfer */
2381                                 if (req->dw_align_buf) {
2382                                         if (ep->dwc_ep.is_in)
2383                                                 dwc_memcpy(req->dw_align_buf,
2384                                                            buf, buflen);
2385                                         ep->dwc_ep.dma_addr =
2386                                             req->dw_align_buf_dma;
2387                                         ep->dwc_ep.start_xfer_buff =
2388                                             req->dw_align_buf;
2389                                         ep->dwc_ep.xfer_buff =
2390                                             req->dw_align_buf;
2391                                 } else {
2392                                         ep->dwc_ep.dma_addr = dma_buf;
2393                                         ep->dwc_ep.start_xfer_buff = buf;
2394                                         ep->dwc_ep.xfer_buff = buf;
2395                                 }
2396                                 ep->dwc_ep.xfer_len = 0;
2397                                 ep->dwc_ep.xfer_count = 0;
2398                                 ep->dwc_ep.sent_zlp = 0;
2399                                 ep->dwc_ep.total_len = buflen;
2400
2401                                 ep->dwc_ep.maxxfer = max_transfer;
2402                                 if (GET_CORE_IF(pcd)->dma_desc_enable) {
2403                                         uint32_t out_max_xfer =
2404                                             DDMA_MAX_TRANSFER_SIZE -
2405                                             (DDMA_MAX_TRANSFER_SIZE % 4);
2406                                         if (ep->dwc_ep.is_in) {
2407                                                 if (ep->dwc_ep.maxxfer >
2408                                                     DDMA_MAX_TRANSFER_SIZE) {
2409                                                         ep->dwc_ep.maxxfer =
2410                                                             DDMA_MAX_TRANSFER_SIZE;
2411                                                 }
2412                                         } else {
2413                                                 if (ep->dwc_ep.maxxfer >
2414                                                     out_max_xfer) {
2415                                                         ep->dwc_ep.maxxfer =
2416                                                             out_max_xfer;
2417                                                 }
2418                                         }
2419                                 }
2420                                 if (ep->dwc_ep.maxxfer < ep->dwc_ep.total_len) {
2421                                         ep->dwc_ep.maxxfer -=
2422                                             (ep->dwc_ep.maxxfer %
2423                                              ep->dwc_ep.maxpacket);
2424                                 }
2425
2426                                 if (zero) {
2427                                         if ((ep->dwc_ep.total_len %
2428                                              ep->dwc_ep.maxpacket == 0)
2429                                             && (ep->dwc_ep.total_len != 0)) {
2430                                                 ep->dwc_ep.sent_zlp = 1;
2431                                         }
2432                                 }
2433 #ifdef DWC_UTE_CFI
2434                         }
2435 #endif
2436                         dwc_otg_ep_start_transfer(GET_CORE_IF(pcd),
2437                                                   &ep->dwc_ep);
2438                 }
2439         }
2440
2441         if (req != 0) {
2442                 ++pcd->request_pending;
2443                 DWC_CIRCLEQ_INSERT_TAIL(&ep->queue, req, queue_entry);
2444                 if (ep->dwc_ep.is_in && ep->stopped
2445                     && !(GET_CORE_IF(pcd)->dma_enable)) {
2446                         /** @todo NGS Create a function for this. */
2447                         diepmsk_data_t diepmsk = {.d32 = 0 };
2448                         diepmsk.b.intktxfemp = 1;
2449                         if (GET_CORE_IF(pcd)->multiproc_int_enable) {
2450                                 DWC_MODIFY_REG32(&GET_CORE_IF(pcd)->dev_if->
2451                                                  dev_global_regs->
2452                                                  diepeachintmsk[ep->dwc_ep.num],
2453                                                  0, diepmsk.d32);
2454                         } else {
2455                                 DWC_MODIFY_REG32(&GET_CORE_IF(pcd)->dev_if->
2456                                                  dev_global_regs->diepmsk, 0,
2457                                                  diepmsk.d32);
2458                         }
2459
2460                 }
2461         }
2462         DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2463
2464         return 0;
2465 }
2466
2467 int dwc_otg_pcd_ep_dequeue(dwc_otg_pcd_t *pcd, void *ep_handle,
2468                            void *req_handle)
2469 {
2470         dwc_irqflags_t flags;
2471         dwc_otg_pcd_request_t *req;
2472         dwc_otg_pcd_ep_t *ep;
2473
2474         ep = get_ep_from_handle(pcd, ep_handle);
2475         if (!ep || (!ep->desc && ep->dwc_ep.num != 0)) {
2476                 DWC_WARN("bad argument\n");
2477                 return -DWC_E_INVALID;
2478         }
2479
2480         DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
2481
2482         /* make sure it's actually queued on this endpoint */
2483         DWC_CIRCLEQ_FOREACH(req, &ep->queue, queue_entry) {
2484                 if (req->priv == (void *)req_handle) {
2485                         break;
2486                 }
2487         }
2488
2489         if (req->priv != (void *)req_handle) {
2490                 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2491                 return -DWC_E_INVALID;
2492         }
2493
2494         if (!DWC_CIRCLEQ_EMPTY_ENTRY(req, queue_entry)) {
2495                 dwc_otg_pcd_ep_stop_transfer(GET_CORE_IF(pcd),
2496                                              &ep->dwc_ep);
2497                 /* Flush the Tx FIFO */
2498                 if (ep->dwc_ep.is_in) {
2499                         dwc_otg_flush_tx_fifo(GET_CORE_IF(pcd),
2500                                               ep->dwc_ep.tx_fifo_num);
2501                         release_perio_tx_fifo(GET_CORE_IF(pcd),
2502                                               ep->dwc_ep.tx_fifo_num);
2503                         release_tx_fifo(GET_CORE_IF(pcd),
2504                                         ep->dwc_ep.tx_fifo_num);
2505                 }
2506
2507                 dwc_otg_request_done(ep, req, -DWC_E_RESTART);
2508         } else {
2509                 req = NULL;
2510         }
2511
2512         DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2513
2514         return req ? 0 : -DWC_E_SHUTDOWN;
2515
2516 }
2517
2518 int dwc_otg_pcd_ep_halt(dwc_otg_pcd_t *pcd, void *ep_handle, int value)
2519 {
2520         dwc_otg_pcd_ep_t *ep;
2521         dwc_irqflags_t flags;
2522         int retval = 0;
2523
2524         ep = get_ep_from_handle(pcd, ep_handle);
2525
2526         if (!ep || (!ep->desc && ep != &pcd->ep0) ||
2527             (ep->desc && (ep->desc->bmAttributes == UE_ISOCHRONOUS))) {
2528                 DWC_WARN("%s, bad ep\n", __func__);
2529                 return -DWC_E_INVALID;
2530         }
2531
2532         DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
2533         if (!DWC_CIRCLEQ_EMPTY(&ep->queue)) {
2534                 DWC_WARN("%d %s XFer In process\n", ep->dwc_ep.num,
2535                          ep->dwc_ep.is_in ? "IN" : "OUT");
2536                 retval = -DWC_E_AGAIN;
2537         } else if (value == 0) {
2538                 ep->dwc_ep.stall_clear_flag = 0;
2539                 dwc_otg_ep_clear_stall(GET_CORE_IF(pcd), &ep->dwc_ep);
2540         } else if (value == 1) {
2541 stall:
2542                 if (ep->dwc_ep.is_in == 1 && GET_CORE_IF(pcd)->dma_desc_enable) {
2543                         dtxfsts_data_t txstatus;
2544                         fifosize_data_t txfifosize;
2545
2546                         txfifosize.d32 =
2547                             DWC_READ_REG32(&GET_CORE_IF(pcd)->core_global_regs->
2548                                            dtxfsiz[ep->dwc_ep.tx_fifo_num]);
2549                         txstatus.d32 =
2550                             DWC_READ_REG32(&GET_CORE_IF(pcd)->dev_if->
2551                                            in_ep_regs[ep->dwc_ep.num]->dtxfsts);
2552
2553                         if (txstatus.b.txfspcavail < txfifosize.b.depth) {
2554                                 DWC_WARN("%s() Data In Tx Fifo\n", __func__);
2555                                 retval = -DWC_E_AGAIN;
2556                         } else {
2557                                 if (ep->dwc_ep.num == 0) {
2558                                         pcd->ep0state = EP0_STALL;
2559                                 }
2560
2561                                 ep->stopped = 1;
2562                                 dwc_otg_ep_set_stall(GET_CORE_IF(pcd),
2563                                                      &ep->dwc_ep);
2564                         }
2565                 } else {
2566                         if (ep->dwc_ep.num == 0) {
2567                                 pcd->ep0state = EP0_STALL;
2568                         }
2569
2570                         ep->stopped = 1;
2571                         dwc_otg_ep_set_stall(GET_CORE_IF(pcd), &ep->dwc_ep);
2572                 }
2573         } else if (value == 2) {
2574                 ep->dwc_ep.stall_clear_flag = 0;
2575         } else if (value == 3) {
2576                 ep->dwc_ep.stall_clear_flag = 1;
2577                 goto stall;
2578         }
2579
2580         DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2581
2582         return retval;
2583 }
2584
2585 /**
2586  * This function initiates remote wakeup of the host from suspend state.
2587  */
2588 void dwc_otg_pcd_rem_wkup_from_suspend(dwc_otg_pcd_t *pcd, int set)
2589 {
2590         dctl_data_t dctl = { 0 };
2591         dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
2592         dsts_data_t dsts;
2593
2594         dsts.d32 = DWC_READ_REG32(&core_if->dev_if->dev_global_regs->dsts);
2595         if (!dsts.b.suspsts) {
2596                 DWC_WARN("Remote wakeup while is not in suspend state\n");
2597         }
2598         /* Check if DEVICE_REMOTE_WAKEUP feature enabled */
2599         if (pcd->remote_wakeup_enable) {
2600                 if (set) {
2601
2602                         if (core_if->adp_enable) {
2603                                 gpwrdn_data_t gpwrdn;
2604
2605                                 dwc_otg_adp_probe_stop(core_if);
2606
2607                                 /* Mask SRP detected interrupt from Power Down Logic */
2608                                 gpwrdn.d32 = 0;
2609                                 gpwrdn.b.srp_det_msk = 1;
2610                                 DWC_MODIFY_REG32(&core_if->core_global_regs->
2611                                                  gpwrdn, gpwrdn.d32, 0);
2612
2613                                 /* Disable Power Down Logic */
2614                                 gpwrdn.d32 = 0;
2615                                 gpwrdn.b.pmuactv = 1;
2616                                 DWC_MODIFY_REG32(&core_if->core_global_regs->
2617                                                  gpwrdn, gpwrdn.d32, 0);
2618
2619                                 /*
2620                                  * Initialize the Core for Device mode.
2621                                  */
2622                                 core_if->op_state = B_PERIPHERAL;
2623                                 dwc_otg_core_init(core_if);
2624                                 dwc_otg_enable_global_interrupts(core_if);
2625                                 cil_pcd_start(core_if);
2626
2627                                 dwc_otg_initiate_srp(core_if);
2628                         }
2629
2630                         dctl.b.rmtwkupsig = 1;
2631                         DWC_MODIFY_REG32(&core_if->dev_if->
2632                                          dev_global_regs->dctl, 0, dctl.d32);
2633                         DWC_DEBUGPL(DBG_PCD, "Set Remote Wakeup\n");
2634
2635                         dwc_mdelay(2);
2636                         DWC_MODIFY_REG32(&core_if->dev_if->
2637                                          dev_global_regs->dctl, dctl.d32, 0);
2638                         DWC_DEBUGPL(DBG_PCD, "Clear Remote Wakeup\n");
2639                 }
2640         } else {
2641                 DWC_DEBUGPL(DBG_PCD, "Remote Wakeup is disabled\n");
2642         }
2643 }
2644
2645 #ifdef CONFIG_USB_DWC_OTG_LPM
2646 /**
2647  * This function initiates remote wakeup of the host from L1 sleep state.
2648  */
2649 void dwc_otg_pcd_rem_wkup_from_sleep(dwc_otg_pcd_t *pcd, int set)
2650 {
2651         glpmcfg_data_t lpmcfg;
2652         pcgcctl_data_t pcgcctl = {.d32 = 0 };
2653
2654         dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
2655
2656         lpmcfg.d32 = DWC_READ_REG32(&core_if->core_global_regs->glpmcfg);
2657
2658         /* Check if we are in L1 state */
2659         if (!lpmcfg.b.prt_sleep_sts) {
2660                 DWC_DEBUGPL(DBG_PCD, "Device is not in sleep state\n");
2661                 return;
2662         }
2663
2664         /* Check if host allows remote wakeup */
2665         if (!lpmcfg.b.rem_wkup_en) {
2666                 DWC_DEBUGPL(DBG_PCD, "Host does not allow remote wakeup\n");
2667                 return;
2668         }
2669
2670         /* Check if Resume OK */
2671         if (!lpmcfg.b.sleep_state_resumeok) {
2672                 DWC_DEBUGPL(DBG_PCD, "Sleep state resume is not OK\n");
2673                 return;
2674         }
2675
2676         lpmcfg.d32 = DWC_READ_REG32(&core_if->core_global_regs->glpmcfg);
2677         lpmcfg.b.en_utmi_sleep = 0;
2678         lpmcfg.b.hird_thres &= (~(1 << 4));
2679
2680         /* Clear Enbl_L1Gating bit. */
2681         pcgcctl.b.enbl_sleep_gating = 1;
2682         DWC_MODIFY_REG32(core_if->pcgcctl, pcgcctl.d32, 0);
2683
2684         DWC_WRITE_REG32(&core_if->core_global_regs->glpmcfg, lpmcfg.d32);
2685
2686         if (set) {
2687                 dctl_data_t dctl = {.d32 = 0 };
2688                 dctl.b.rmtwkupsig = 1;
2689                 /* Set RmtWkUpSig bit to start remote wakup signaling.
2690                  * Hardware will automatically clear this bit.
2691                  */
2692                 DWC_MODIFY_REG32(&core_if->dev_if->dev_global_regs->dctl,
2693                                  0, dctl.d32);
2694                 DWC_DEBUGPL(DBG_PCD, "Set Remote Wakeup\n");
2695         }
2696
2697 }
2698 #endif
2699
2700 /**
2701  * Performs remote wakeup.
2702  */
2703 void dwc_otg_pcd_remote_wakeup(dwc_otg_pcd_t *pcd, int set)
2704 {
2705         dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
2706         dwc_irqflags_t flags;
2707         if (dwc_otg_is_device_mode(core_if)) {
2708                 DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
2709 #ifdef CONFIG_USB_DWC_OTG_LPM
2710                 if (core_if->lx_state == DWC_OTG_L1) {
2711                         dwc_otg_pcd_rem_wkup_from_sleep(pcd, set);
2712                 } else {
2713 #endif
2714                         dwc_otg_pcd_rem_wkup_from_suspend(pcd, set);
2715 #ifdef CONFIG_USB_DWC_OTG_LPM
2716                 }
2717 #endif
2718                 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2719         }
2720         return;
2721 }
2722
2723 void dwc_otg_pcd_disconnect_us(dwc_otg_pcd_t *pcd, int no_of_usecs)
2724 {
2725         dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
2726         dctl_data_t dctl = { 0 };
2727
2728         if (dwc_otg_is_device_mode(core_if)) {
2729                 dctl.b.sftdiscon = 1;
2730                 DWC_PRINTF("Soft disconnect for %d useconds\n", no_of_usecs);
2731                 DWC_MODIFY_REG32(&core_if->dev_if->dev_global_regs->dctl, 0,
2732                                  dctl.d32);
2733                 dwc_udelay(no_of_usecs);
2734                 DWC_MODIFY_REG32(&core_if->dev_if->dev_global_regs->dctl,
2735                                  dctl.d32, 0);
2736
2737         } else {
2738                 DWC_PRINTF("NOT SUPPORTED IN HOST MODE\n");
2739         }
2740         return;
2741
2742 }
2743
2744 int dwc_otg_pcd_wakeup(dwc_otg_pcd_t *pcd)
2745 {
2746         dsts_data_t dsts;
2747         gotgctl_data_t gotgctl;
2748
2749         /*
2750          * This function starts the Protocol if no session is in progress. If
2751          * a session is already in progress, but the device is suspended,
2752          * remote wakeup signaling is started.
2753          */
2754
2755         /* Check if valid session */
2756         gotgctl.d32 =
2757             DWC_READ_REG32(&(GET_CORE_IF(pcd)->core_global_regs->gotgctl));
2758         if (gotgctl.b.bsesvld) {
2759                 /* Check if suspend state */
2760                 dsts.d32 =
2761                     DWC_READ_REG32(&
2762                                    (GET_CORE_IF(pcd)->dev_if->dev_global_regs->
2763                                     dsts));
2764                 if (dsts.b.suspsts) {
2765                         dwc_otg_pcd_remote_wakeup(pcd, 1);
2766                 }
2767         } else {
2768                 dwc_otg_pcd_initiate_srp(pcd);
2769         }
2770
2771         return 0;
2772
2773 }
2774
2775 /**
2776  * Implement Soft-Connect and Soft-Disconnect function
2777  */
2778
2779 void dwc_otg_pcd_pullup_enable(dwc_otg_pcd_t *pcd)
2780 {
2781         if (pcd)
2782         DWC_MODIFY_REG32(&(GET_CORE_IF(pcd)->dev_if->dev_global_regs->dctl), 2,
2783                          0);
2784 }
2785
2786 void dwc_otg_pcd_pullup_disable(dwc_otg_pcd_t *pcd)
2787 {
2788         if (pcd)
2789         DWC_MODIFY_REG32(&(GET_CORE_IF(pcd)->dev_if->dev_global_regs->dctl), 0,
2790                          2);
2791 }
2792
2793 void dwc_pcd_reset(dwc_otg_pcd_t *pcd)
2794 {
2795         dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
2796         dwc_otg_disable_global_interrupts(core_if);
2797         dwc_otg_core_init(core_if);
2798         dwc_otg_pcd_reinit(pcd);
2799         dwc_otg_core_dev_init(core_if);
2800         dwc_otg_enable_global_interrupts(core_if);
2801 }
2802
2803 /**
2804  * Start the SRP timer to detect when the SRP does not complete within
2805  * 6 seconds.
2806  *
2807  * @param pcd the pcd structure.
2808  */
2809 void dwc_otg_pcd_initiate_srp(dwc_otg_pcd_t *pcd)
2810 {
2811         dwc_irqflags_t flags;
2812         DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
2813         dwc_otg_initiate_srp(GET_CORE_IF(pcd));
2814         DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2815 }
2816
2817 int dwc_otg_pcd_get_frame_number(dwc_otg_pcd_t *pcd)
2818 {
2819         return dwc_otg_get_frame_number(GET_CORE_IF(pcd));
2820 }
2821
2822 int dwc_otg_pcd_is_lpm_enabled(dwc_otg_pcd_t *pcd)
2823 {
2824         return GET_CORE_IF(pcd)->core_params->lpm_enable;
2825 }
2826
2827 int dwc_otg_pcd_is_besl_enabled(dwc_otg_pcd_t *pcd)
2828 {
2829         return GET_CORE_IF(pcd)->core_params->besl_enable;
2830 }
2831
2832 int dwc_otg_pcd_get_param_baseline_besl(dwc_otg_pcd_t *pcd)
2833 {
2834         return GET_CORE_IF(pcd)->core_params->baseline_besl;
2835 }
2836
2837 int dwc_otg_pcd_get_param_deep_besl(dwc_otg_pcd_t *pcd)
2838 {
2839         return GET_CORE_IF(pcd)->core_params->deep_besl;
2840 }
2841
2842 uint32_t get_b_hnp_enable(dwc_otg_pcd_t *pcd)
2843 {
2844         return pcd->b_hnp_enable;
2845 }
2846
2847 uint32_t get_a_hnp_support(dwc_otg_pcd_t *pcd)
2848 {
2849         return pcd->a_hnp_support;
2850 }
2851
2852 uint32_t get_a_alt_hnp_support(dwc_otg_pcd_t *pcd)
2853 {
2854         return pcd->a_alt_hnp_support;
2855 }
2856
2857 int dwc_otg_pcd_get_rmwkup_enable(dwc_otg_pcd_t *pcd)
2858 {
2859         return pcd->remote_wakeup_enable;
2860 }
2861
2862 #endif /* DWC_HOST_ONLY */