USB: fix Coding Style.
[firefly-linux-kernel-4.4.55.git] / drivers / usb / dwc_otg_310 / dwc_otg_pcd.c
1 /* ==========================================================================
2  * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_pcd.c $
3  * $Revision: #104 $
4  * $Date: 2012/12/21 $
5  * $Change: 2131568 $
6  *
7  * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
8  * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
9  * otherwise expressly agreed to in writing between Synopsys and you.
10  *
11  * The Software IS NOT an item of Licensed Software or Licensed Product under
12  * any End User Software License Agreement or Agreement for Licensed Product
13  * with Synopsys or any supplement thereto. You are permitted to use and
14  * redistribute this Software in source and binary forms, with or without
15  * modification, provided that redistributions of source code must retain this
16  * notice. You may not view, use, disclose, copy or distribute this file or
17  * any information contained herein except pursuant to this license grant from
18  * Synopsys. If you do not agree with this notice, including the disclaimer
19  * below, then you are not authorized to use the Software.
20  *
21  * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
22  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
25  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
31  * DAMAGE.
32  * ========================================================================== */
33 #ifndef DWC_HOST_ONLY
34
35 /** @file
36  * This file implements PCD Core. All code in this file is portable and doesn't
37  * use any OS specific functions.
38  * PCD Core provides Interface, defined in <code><dwc_otg_pcd_if.h></code>
39  * header file, which can be used to implement OS specific PCD interface.
40  *
41  * An important function of the PCD is managing interrupts generated
42  * by the DWC_otg controller. The implementation of the DWC_otg device
43  * mode interrupt service routines is in dwc_otg_pcd_intr.c.
44  *
45  * @todo Add Device Mode test modes (Test J mode, Test K mode, etc).
46  * @todo Does it work when the request size is greater than DEPTSIZ
47  * transfer size
48  *
49  */
50
51 #include "dwc_otg_pcd.h"
52
53 #ifdef DWC_UTE_CFI
54 #include "dwc_otg_cfi.h"
55
56 extern int init_cfi(cfiobject_t *cfiobj);
57 #endif
58
59 /**
60  * Choose endpoint from ep arrays using usb_ep structure.
61  */
62 static dwc_otg_pcd_ep_t *get_ep_from_handle(dwc_otg_pcd_t *pcd, void *handle)
63 {
64         int i;
65         if (pcd->ep0.priv == handle) {
66                 return &pcd->ep0;
67         }
68         for (i = 0; i < MAX_EPS_CHANNELS - 1; i++) {
69                 if (pcd->in_ep[i].priv == handle)
70                         return &pcd->in_ep[i];
71                 if (pcd->out_ep[i].priv == handle)
72                         return &pcd->out_ep[i];
73         }
74
75         return NULL;
76 }
77
78 /**
79  * This function completes a request.  It call's the request call back.
80  */
81 void dwc_otg_request_done(dwc_otg_pcd_ep_t *ep, dwc_otg_pcd_request_t *req,
82                           int32_t status)
83 {
84         unsigned stopped = ep->stopped;
85
86         DWC_DEBUGPL(DBG_PCDV, "%s(ep %p req %p)\n", __func__, ep, req);
87         DWC_CIRCLEQ_REMOVE_INIT(&ep->queue, req, queue_entry);
88
89         /* don't modify queue heads during completion callback */
90         ep->stopped = 1;
91         /* spin_unlock/spin_lock now done in fops->complete() */
92         ep->pcd->fops->complete(ep->pcd, ep->priv, req->priv, status,
93                                 req->actual);
94
95         if (ep->pcd->request_pending > 0) {
96                 --ep->pcd->request_pending;
97         }
98
99         ep->stopped = stopped;
100         DWC_FREE(req);
101 }
102
103 /**
104  * This function terminates all the requsts in the EP request queue.
105  */
106 void dwc_otg_request_nuke(dwc_otg_pcd_ep_t *ep)
107 {
108         dwc_otg_pcd_request_t *req;
109
110         ep->stopped = 1;
111
112         /* called with irqs blocked?? */
113         while (!DWC_CIRCLEQ_EMPTY(&ep->queue)) {
114                 req = DWC_CIRCLEQ_FIRST(&ep->queue);
115                 dwc_otg_request_done(ep, req, -DWC_E_SHUTDOWN);
116         }
117 }
118
119 void dwc_otg_pcd_start(dwc_otg_pcd_t *pcd,
120                        const struct dwc_otg_pcd_function_ops *fops)
121 {
122         pcd->fops = fops;
123 }
124
125 /**
126  * PCD Callback function for initializing the PCD when switching to
127  * device mode.
128  *
129  * @param p void pointer to the <code>dwc_otg_pcd_t</code>
130  */
131 static int32_t dwc_otg_pcd_start_cb(void *p)
132 {
133         dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *) p;
134         dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
135
136         /*
137          * Initialized the Core for Device mode.
138          */
139         if (dwc_otg_is_device_mode(core_if)) {
140                 /* dwc_otg_core_dev_init(core_if); */
141                 /* Set core_if's lock pointer to the pcd->lock */
142                 core_if->lock = pcd->lock;
143         }
144         return 1;
145 }
146
147 /** CFI-specific buffer allocation function for EP */
148 #ifdef DWC_UTE_CFI
149 uint8_t *cfiw_ep_alloc_buffer(dwc_otg_pcd_t *pcd, void *pep, dwc_dma_t *addr,
150                               size_t buflen, int flags)
151 {
152         dwc_otg_pcd_ep_t *ep;
153         ep = get_ep_from_handle(pcd, pep);
154         if (!ep) {
155                 DWC_WARN("bad ep\n");
156                 return -DWC_E_INVALID;
157         }
158
159         return pcd->cfi->ops.ep_alloc_buf(pcd->cfi, pcd, ep, addr, buflen,
160                                           flags);
161 }
162 #else
163 uint8_t *cfiw_ep_alloc_buffer(dwc_otg_pcd_t *pcd, void *pep, dwc_dma_t *addr,
164                               size_t buflen, int flags);
165 #endif
166
167 /**
168  * PCD Callback function for notifying the PCD when resuming from
169  * suspend.
170  *
171  * @param p void pointer to the <code>dwc_otg_pcd_t</code>
172  */
173 static int32_t dwc_otg_pcd_resume_cb(void *p)
174 {
175         dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *) p;
176
177         if (pcd->fops->resume) {
178                 pcd->fops->resume(pcd);
179         }
180
181         /* Stop the SRP timeout timer. */
182         if ((GET_CORE_IF(pcd)->core_params->phy_type != DWC_PHY_TYPE_PARAM_FS)
183             || (!GET_CORE_IF(pcd)->core_params->i2c_enable)) {
184                 if (GET_CORE_IF(pcd)->srp_timer_started) {
185                         GET_CORE_IF(pcd)->srp_timer_started = 0;
186                         DWC_TIMER_CANCEL(GET_CORE_IF(pcd)->srp_timer);
187                 }
188         }
189         return 1;
190 }
191
192 /**
193  * PCD Callback function for notifying the PCD device is suspended.
194  *
195  * @param p void pointer to the <code>dwc_otg_pcd_t</code>
196  */
197 static int32_t dwc_otg_pcd_suspend_cb(void *p)
198 {
199         dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *) p;
200
201         if (pcd->fops->suspend) {
202                 DWC_SPINUNLOCK(pcd->lock);
203                 pcd->fops->suspend(pcd);
204                 DWC_SPINLOCK(pcd->lock);
205         }
206
207         return 1;
208 }
209
210 /**
211  * PCD Callback function for stopping the PCD when switching to Host
212  * mode.
213  *
214  * @param p void pointer to the <code>dwc_otg_pcd_t</code>
215  */
216 static int32_t dwc_otg_pcd_stop_cb(void *p)
217 {
218         dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *) p;
219         extern void dwc_otg_pcd_stop(dwc_otg_pcd_t *_pcd);
220
221         dwc_otg_pcd_stop(pcd);
222         return 1;
223 }
224
225 /**
226  * PCD Callback structure for handling mode switching.
227  */
228 static dwc_otg_cil_callbacks_t pcd_callbacks = {
229         .start = dwc_otg_pcd_start_cb,
230         .stop = dwc_otg_pcd_stop_cb,
231         .suspend = dwc_otg_pcd_suspend_cb,
232         .resume_wakeup = dwc_otg_pcd_resume_cb,
233         .p = 0,                 /* Set at registration */
234 };
235
236 /**
237  * This function allocates a DMA Descriptor chain for the Endpoint
238  * buffer to be used for a transfer to/from the specified endpoint.
239  */
240 dwc_otg_dev_dma_desc_t *dwc_otg_ep_alloc_desc_chain(dwc_dma_t *dma_desc_addr,
241                                                     uint32_t count)
242 {
243         return DWC_DMA_ALLOC_ATOMIC(count * sizeof(dwc_otg_dev_dma_desc_t),
244                                     dma_desc_addr);
245 }
246
247 /**
248  * This function frees a DMA Descriptor chain that was allocated by ep_alloc_desc.
249  */
250 void dwc_otg_ep_free_desc_chain(dwc_otg_dev_dma_desc_t *desc_addr,
251                                 uint32_t dma_desc_addr, uint32_t count)
252 {
253         DWC_DMA_FREE(count * sizeof(dwc_otg_dev_dma_desc_t), desc_addr,
254                      dma_desc_addr);
255 }
256
257 #ifdef DWC_EN_ISOC
258
259 /**
260  * This function initializes a descriptor chain for Isochronous transfer
261  *
262  * @param core_if Programming view of DWC_otg controller.
263  * @param dwc_ep The EP to start the transfer on.
264  *
265  */
266 void dwc_otg_iso_ep_start_ddma_transfer(dwc_otg_core_if_t *core_if,
267                                         dwc_ep_t *dwc_ep)
268 {
269
270         dsts_data_t dsts = {.d32 = 0 };
271         depctl_data_t depctl = {.d32 = 0 };
272         volatile uint32_t *addr;
273         int i, j;
274         uint32_t len;
275
276         if (dwc_ep->is_in)
277                 dwc_ep->desc_cnt = dwc_ep->buf_proc_intrvl / dwc_ep->bInterval;
278         else
279                 dwc_ep->desc_cnt =
280                     dwc_ep->buf_proc_intrvl*dwc_ep->pkt_per_frm /
281                     dwc_ep->bInterval;
282
283         /** Allocate descriptors for double buffering */
284         dwc_ep->iso_desc_addr =
285             dwc_otg_ep_alloc_desc_chain(&dwc_ep->iso_dma_desc_addr,
286                                         dwc_ep->desc_cnt * 2);
287         if (dwc_ep->desc_addr) {
288                 DWC_WARN("%s, can't allocate DMA descriptor chain\n", __func__);
289                 return;
290         }
291
292         dsts.d32 = DWC_READ_REG32(&core_if->dev_if->dev_global_regs->dsts);
293
294         /** ISO OUT EP */
295         if (dwc_ep->is_in == 0) {
296                 dev_dma_desc_sts_t sts = {.d32 = 0 };
297                 dwc_otg_dev_dma_desc_t *dma_desc = dwc_ep->iso_desc_addr;
298                 dma_addr_t dma_ad;
299                 uint32_t data_per_desc;
300                 dwc_otg_dev_out_ep_regs_t *out_regs =
301                     core_if->dev_if->out_ep_regs[dwc_ep->num];
302                 int offset;
303
304                 addr = &core_if->dev_if->out_ep_regs[dwc_ep->num]->doepctl;
305                 dma_ad = (dma_addr_t) DWC_READ_REG32(&(out_regs->doepdma));
306
307                 /** Buffer 0 descriptors setup */
308                 dma_ad = dwc_ep->dma_addr0;
309
310                 sts.b_iso_out.bs = BS_HOST_READY;
311                 sts.b_iso_out.rxsts = 0;
312                 sts.b_iso_out.l = 0;
313                 sts.b_iso_out.sp = 0;
314                 sts.b_iso_out.ioc = 0;
315                 sts.b_iso_out.pid = 0;
316                 sts.b_iso_out.framenum = 0;
317
318                 offset = 0;
319                 for (i = 0; i < dwc_ep->desc_cnt - dwc_ep->pkt_per_frm;
320                      i += dwc_ep->pkt_per_frm) {
321
322                         for (j = 0; j < dwc_ep->pkt_per_frm; ++j) {
323                                 uint32_t len = (j + 1)*dwc_ep->maxpacket;
324                                 if (len > dwc_ep->data_per_frame)
325                                         data_per_desc =
326                                             dwc_ep->data_per_frame -
327                                             j*dwc_ep->maxpacket;
328                                 else
329                                         data_per_desc = dwc_ep->maxpacket;
330                                 len = data_per_desc % 4;
331                                 if (len)
332                                         data_per_desc += 4 - len;
333
334                                 sts.b_iso_out.rxbytes = data_per_desc;
335                                 dma_desc->buf = dma_ad;
336                                 dma_desc->status.d32 = sts.d32;
337
338                                 offset += data_per_desc;
339                                 dma_desc++;
340                                 dma_ad += data_per_desc;
341                         }
342                 }
343
344                 for (j = 0; j < dwc_ep->pkt_per_frm - 1; ++j) {
345                         uint32_t len = (j + 1)*dwc_ep->maxpacket;
346                         if (len > dwc_ep->data_per_frame)
347                                 data_per_desc =
348                                     dwc_ep->data_per_frame -
349                                     j*dwc_ep->maxpacket;
350                         else
351                                 data_per_desc = dwc_ep->maxpacket;
352                         len = data_per_desc % 4;
353                         if (len)
354                                 data_per_desc += 4 - len;
355                         sts.b_iso_out.rxbytes = data_per_desc;
356                         dma_desc->buf = dma_ad;
357                         dma_desc->status.d32 = sts.d32;
358
359                         offset += data_per_desc;
360                         dma_desc++;
361                         dma_ad += data_per_desc;
362                 }
363
364                 sts.b_iso_out.ioc = 1;
365                 len = (j + 1)*dwc_ep->maxpacket;
366                 if (len > dwc_ep->data_per_frame)
367                         data_per_desc =
368                             dwc_ep->data_per_frame - j*dwc_ep->maxpacket;
369                 else
370                         data_per_desc = dwc_ep->maxpacket;
371                 len = data_per_desc % 4;
372                 if (len)
373                         data_per_desc += 4 - len;
374                 sts.b_iso_out.rxbytes = data_per_desc;
375
376                 dma_desc->buf = dma_ad;
377                 dma_desc->status.d32 = sts.d32;
378                 dma_desc++;
379
380                 /** Buffer 1 descriptors setup */
381                 sts.b_iso_out.ioc = 0;
382                 dma_ad = dwc_ep->dma_addr1;
383
384                 offset = 0;
385                 for (i = 0; i < dwc_ep->desc_cnt - dwc_ep->pkt_per_frm;
386                      i += dwc_ep->pkt_per_frm) {
387                         for (j = 0; j < dwc_ep->pkt_per_frm; ++j) {
388                                 uint32_t len = (j + 1)*dwc_ep->maxpacket;
389                                 if (len > dwc_ep->data_per_frame)
390                                         data_per_desc =
391                                             dwc_ep->data_per_frame -
392                                             j*dwc_ep->maxpacket;
393                                 else
394                                         data_per_desc = dwc_ep->maxpacket;
395                                 len = data_per_desc % 4;
396                                 if (len)
397                                         data_per_desc += 4 - len;
398
399                                 data_per_desc =
400                                     sts.b_iso_out.rxbytes = data_per_desc;
401                                 dma_desc->buf = dma_ad;
402                                 dma_desc->status.d32 = sts.d32;
403
404                                 offset += data_per_desc;
405                                 dma_desc++;
406                                 dma_ad += data_per_desc;
407                         }
408                 }
409                 for (j = 0; j < dwc_ep->pkt_per_frm - 1; ++j) {
410                         data_per_desc =
411                             ((j + 1)*dwc_ep->maxpacket >
412                              dwc_ep->data_per_frame) ? dwc_ep->data_per_frame -
413                             j*dwc_ep->maxpacket : dwc_ep->maxpacket;
414                         data_per_desc +=
415                             (data_per_desc % 4) ? (4 - data_per_desc % 4) : 0;
416                         sts.b_iso_out.rxbytes = data_per_desc;
417                         dma_desc->buf = dma_ad;
418                         dma_desc->status.d32 = sts.d32;
419
420                         offset += data_per_desc;
421                         dma_desc++;
422                         dma_ad += data_per_desc;
423                 }
424
425                 sts.b_iso_out.ioc = 1;
426                 sts.b_iso_out.l = 1;
427                 data_per_desc =
428                     ((j + 1)*dwc_ep->maxpacket >
429                      dwc_ep->data_per_frame) ? dwc_ep->data_per_frame -
430                     j*dwc_ep->maxpacket : dwc_ep->maxpacket;
431                 data_per_desc +=
432                     (data_per_desc % 4) ? (4 - data_per_desc % 4) : 0;
433                 sts.b_iso_out.rxbytes = data_per_desc;
434
435                 dma_desc->buf = dma_ad;
436                 dma_desc->status.d32 = sts.d32;
437
438                 dwc_ep->next_frame = 0;
439
440                 /** Write dma_ad into DOEPDMA register */
441                 DWC_WRITE_REG32(&(out_regs->doepdma),
442                                 (uint32_t) dwc_ep->iso_dma_desc_addr);
443
444         }
445         /** ISO IN EP */
446         else {
447                 dev_dma_desc_sts_t sts = {.d32 = 0 };
448                 dwc_otg_dev_dma_desc_t *dma_desc = dwc_ep->iso_desc_addr;
449                 dma_addr_t dma_ad;
450                 dwc_otg_dev_in_ep_regs_t *in_regs =
451                     core_if->dev_if->in_ep_regs[dwc_ep->num];
452                 unsigned int frmnumber;
453                 fifosize_data_t txfifosize, rxfifosize;
454
455                 txfifosize.d32 =
456                     DWC_READ_REG32(&core_if->dev_if->
457                                    in_ep_regs[dwc_ep->num]->dtxfsts);
458                 rxfifosize.d32 =
459                     DWC_READ_REG32(&core_if->core_global_regs->grxfsiz);
460
461                 addr = &core_if->dev_if->in_ep_regs[dwc_ep->num]->diepctl;
462
463                 dma_ad = dwc_ep->dma_addr0;
464
465                 dsts.d32 =
466                     DWC_READ_REG32(&core_if->dev_if->dev_global_regs->dsts);
467
468                 sts.b_iso_in.bs = BS_HOST_READY;
469                 sts.b_iso_in.txsts = 0;
470                 sts.b_iso_in.sp =
471                     (dwc_ep->data_per_frame % dwc_ep->maxpacket) ? 1 : 0;
472                 sts.b_iso_in.ioc = 0;
473                 sts.b_iso_in.pid = dwc_ep->pkt_per_frm;
474
475                 frmnumber = dwc_ep->next_frame;
476
477                 sts.b_iso_in.framenum = frmnumber;
478                 sts.b_iso_in.txbytes = dwc_ep->data_per_frame;
479                 sts.b_iso_in.l = 0;
480
481                 /** Buffer 0 descriptors setup */
482                 for (i = 0; i < dwc_ep->desc_cnt - 1; i++) {
483                         dma_desc->buf = dma_ad;
484                         dma_desc->status.d32 = sts.d32;
485                         dma_desc++;
486
487                         dma_ad += dwc_ep->data_per_frame;
488                         sts.b_iso_in.framenum += dwc_ep->bInterval;
489                 }
490
491                 sts.b_iso_in.ioc = 1;
492                 dma_desc->buf = dma_ad;
493                 dma_desc->status.d32 = sts.d32;
494                 ++dma_desc;
495
496                 /** Buffer 1 descriptors setup */
497                 sts.b_iso_in.ioc = 0;
498                 dma_ad = dwc_ep->dma_addr1;
499
500                 for (i = 0; i < dwc_ep->desc_cnt - dwc_ep->pkt_per_frm;
501                      i += dwc_ep->pkt_per_frm) {
502                         dma_desc->buf = dma_ad;
503                         dma_desc->status.d32 = sts.d32;
504                         dma_desc++;
505
506                         dma_ad += dwc_ep->data_per_frame;
507                         sts.b_iso_in.framenum += dwc_ep->bInterval;
508
509                         sts.b_iso_in.ioc = 0;
510                 }
511                 sts.b_iso_in.ioc = 1;
512                 sts.b_iso_in.l = 1;
513
514                 dma_desc->buf = dma_ad;
515                 dma_desc->status.d32 = sts.d32;
516
517                 dwc_ep->next_frame = sts.b_iso_in.framenum + dwc_ep->bInterval;
518
519                 /** Write dma_ad into diepdma register */
520                 DWC_WRITE_REG32(&(in_regs->diepdma),
521                                 (uint32_t) dwc_ep->iso_dma_desc_addr);
522         }
523         /** Enable endpoint, clear nak  */
524         depctl.d32 = 0;
525         depctl.b.epena = 1;
526         depctl.b.usbactep = 1;
527         depctl.b.cnak = 1;
528
529         DWC_MODIFY_REG32(addr, depctl.d32, depctl.d32);
530         depctl.d32 = DWC_READ_REG32(addr);
531 }
532
533 /**
534  * This function initializes a descriptor chain for Isochronous transfer
535  *
536  * @param core_if Programming view of DWC_otg controller.
537  * @param ep The EP to start the transfer on.
538  *
539  */
540 void dwc_otg_iso_ep_start_buf_transfer(dwc_otg_core_if_t *core_if,
541                                        dwc_ep_t *ep)
542 {
543         depctl_data_t depctl = {.d32 = 0 };
544         volatile uint32_t *addr;
545
546         if (ep->is_in) {
547                 addr = &core_if->dev_if->in_ep_regs[ep->num]->diepctl;
548         } else {
549                 addr = &core_if->dev_if->out_ep_regs[ep->num]->doepctl;
550         }
551
552         if (core_if->dma_enable == 0 || core_if->dma_desc_enable != 0) {
553                 return;
554         } else {
555                 deptsiz_data_t deptsiz = {.d32 = 0 };
556
557                 ep->xfer_len =
558                     ep->data_per_frame*ep->buf_proc_intrvl / ep->bInterval;
559                 ep->pkt_cnt =
560                     (ep->xfer_len - 1 + ep->maxpacket) / ep->maxpacket;
561                 ep->xfer_count = 0;
562                 ep->xfer_buff =
563                     (ep->proc_buf_num) ? ep->xfer_buff1 : ep->xfer_buff0;
564                 ep->dma_addr =
565                     (ep->proc_buf_num) ? ep->dma_addr1 : ep->dma_addr0;
566
567                 if (ep->is_in) {
568                         /* Program the transfer size and packet count
569                          *      as follows: xfersize = N * maxpacket +
570                          *      short_packet pktcnt = N + (short_packet
571                          *      exist ? 1 : 0)
572                          */
573                         deptsiz.b.mc = ep->pkt_per_frm;
574                         deptsiz.b.xfersize = ep->xfer_len;
575                         deptsiz.b.pktcnt =
576                             (ep->xfer_len - 1 + ep->maxpacket) / ep->maxpacket;
577                         DWC_WRITE_REG32(&core_if->dev_if->
578                                         in_ep_regs[ep->num]->dieptsiz,
579                                         deptsiz.d32);
580
581                         /* Write the DMA register */
582                         DWC_WRITE_REG32(&
583                                         (core_if->dev_if->
584                                          in_ep_regs[ep->num]->diepdma),
585                                         (uint32_t) ep->dma_addr);
586
587                 } else {
588                         deptsiz.b.pktcnt =
589                             (ep->xfer_len + (ep->maxpacket - 1)) /
590                             ep->maxpacket;
591                         deptsiz.b.xfersize = deptsiz.b.pktcnt*ep->maxpacket;
592
593                         DWC_WRITE_REG32(&core_if->dev_if->
594                                         out_ep_regs[ep->num]->doeptsiz,
595                                         deptsiz.d32);
596
597                         /* Write the DMA register */
598                         DWC_WRITE_REG32(&
599                                         (core_if->dev_if->
600                                          out_ep_regs[ep->num]->doepdma),
601                                         (uint32_t) ep->dma_addr);
602
603                 }
604                 /** Enable endpoint, clear nak  */
605                 depctl.d32 = 0;
606                 depctl.b.epena = 1;
607                 depctl.b.cnak = 1;
608
609                 DWC_MODIFY_REG32(addr, depctl.d32, depctl.d32);
610         }
611 }
612
613 /**
614  * This function does the setup for a data transfer for an EP and
615  * starts the transfer. For an IN transfer, the packets will be
616  * loaded into the appropriate Tx FIFO in the ISR. For OUT transfers,
617  * the packets are unloaded from the Rx FIFO in the ISR.
618  *
619  * @param core_if Programming view of DWC_otg controller.
620  * @param ep The EP to start the transfer on.
621  */
622
623 static void dwc_otg_iso_ep_start_transfer(dwc_otg_core_if_t *core_if,
624                                           dwc_ep_t *ep)
625 {
626         if (core_if->dma_enable) {
627                 if (core_if->dma_desc_enable) {
628                         if (ep->is_in) {
629                                 ep->desc_cnt = ep->pkt_cnt / ep->pkt_per_frm;
630                         } else {
631                                 ep->desc_cnt = ep->pkt_cnt;
632                         }
633                         dwc_otg_iso_ep_start_ddma_transfer(core_if, ep);
634                 } else {
635                         if (core_if->pti_enh_enable) {
636                                 dwc_otg_iso_ep_start_buf_transfer(core_if, ep);
637                         } else {
638                                 ep->cur_pkt_addr =
639                                     (ep->proc_buf_num) ? ep->
640                                     xfer_buff1 : ep->xfer_buff0;
641                                 ep->cur_pkt_dma_addr =
642                                     (ep->proc_buf_num) ? ep->
643                                     dma_addr1 : ep->dma_addr0;
644                                 dwc_otg_iso_ep_start_frm_transfer(core_if, ep);
645                         }
646                 }
647         } else {
648                 ep->cur_pkt_addr =
649                     (ep->proc_buf_num) ? ep->xfer_buff1 : ep->xfer_buff0;
650                 ep->cur_pkt_dma_addr =
651                     (ep->proc_buf_num) ? ep->dma_addr1 : ep->dma_addr0;
652                 dwc_otg_iso_ep_start_frm_transfer(core_if, ep);
653         }
654 }
655
656 /**
657  * This function stops transfer for an EP and
658  * resets the ep's variables.
659  *
660  * @param core_if Programming view of DWC_otg controller.
661  * @param ep The EP to start the transfer on.
662  */
663
664 void dwc_otg_iso_ep_stop_transfer(dwc_otg_core_if_t *core_if, dwc_ep_t *ep)
665 {
666         depctl_data_t depctl = {.d32 = 0 };
667         volatile uint32_t *addr;
668
669         if (ep->is_in == 1) {
670                 addr = &core_if->dev_if->in_ep_regs[ep->num]->diepctl;
671         } else {
672                 addr = &core_if->dev_if->out_ep_regs[ep->num]->doepctl;
673         }
674
675         /* disable the ep */
676         depctl.d32 = DWC_READ_REG32(addr);
677
678         depctl.b.epdis = 1;
679         depctl.b.snak = 1;
680
681         DWC_WRITE_REG32(addr, depctl.d32);
682
683         if (core_if->dma_desc_enable &&
684             ep->iso_desc_addr && ep->iso_dma_desc_addr) {
685                 dwc_otg_ep_free_desc_chain(ep->iso_desc_addr,
686                                            ep->iso_dma_desc_addr,
687                                            ep->desc_cnt * 2);
688         }
689
690         /* reset varibales */
691         ep->dma_addr0 = 0;
692         ep->dma_addr1 = 0;
693         ep->xfer_buff0 = 0;
694         ep->xfer_buff1 = 0;
695         ep->data_per_frame = 0;
696         ep->data_pattern_frame = 0;
697         ep->sync_frame = 0;
698         ep->buf_proc_intrvl = 0;
699         ep->bInterval = 0;
700         ep->proc_buf_num = 0;
701         ep->pkt_per_frm = 0;
702         ep->pkt_per_frm = 0;
703         ep->desc_cnt = 0;
704         ep->iso_desc_addr = 0;
705         ep->iso_dma_desc_addr = 0;
706 }
707
708 int dwc_otg_pcd_iso_ep_start(dwc_otg_pcd_t *pcd, void *ep_handle,
709                              uint8_t *buf0, uint8_t *buf1, dwc_dma_t dma0,
710                              dwc_dma_t dma1, int sync_frame, int dp_frame,
711                              int data_per_frame, int start_frame,
712                              int buf_proc_intrvl, void *req_handle,
713                              int atomic_alloc)
714 {
715         dwc_otg_pcd_ep_t *ep;
716         dwc_irqflags_t flags = 0;
717         dwc_ep_t *dwc_ep;
718         int32_t frm_data;
719         dsts_data_t dsts;
720         dwc_otg_core_if_t *core_if;
721
722         ep = get_ep_from_handle(pcd, ep_handle);
723
724         if (!ep || !ep->desc || ep->dwc_ep.num == 0) {
725                 DWC_WARN("bad ep\n");
726                 return -DWC_E_INVALID;
727         }
728
729         DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
730         core_if = GET_CORE_IF(pcd);
731         dwc_ep = &ep->dwc_ep;
732
733         if (ep->iso_req_handle) {
734                 DWC_WARN("ISO request in progress\n");
735         }
736
737         dwc_ep->dma_addr0 = dma0;
738         dwc_ep->dma_addr1 = dma1;
739
740         dwc_ep->xfer_buff0 = buf0;
741         dwc_ep->xfer_buff1 = buf1;
742
743         dwc_ep->data_per_frame = data_per_frame;
744
745         /** @todo - pattern data support is to be implemented in the future */
746         dwc_ep->data_pattern_frame = dp_frame;
747         dwc_ep->sync_frame = sync_frame;
748
749         dwc_ep->buf_proc_intrvl = buf_proc_intrvl;
750
751         dwc_ep->bInterval = 1 << (ep->desc->bInterval - 1);
752
753         dwc_ep->proc_buf_num = 0;
754
755         dwc_ep->pkt_per_frm = 0;
756         frm_data = ep->dwc_ep.data_per_frame;
757         while (frm_data > 0) {
758                 dwc_ep->pkt_per_frm++;
759                 frm_data -= ep->dwc_ep.maxpacket;
760         }
761
762         dsts.d32 = DWC_READ_REG32(&core_if->dev_if->dev_global_regs->dsts);
763
764         if (start_frame == -1) {
765                 dwc_ep->next_frame = dsts.b.soffn + 1;
766                 if (dwc_ep->bInterval != 1) {
767                         dwc_ep->next_frame =
768                             dwc_ep->next_frame + (dwc_ep->bInterval - 1 -
769                                                   dwc_ep->next_frame %
770                                                   dwc_ep->bInterval);
771                 }
772         } else {
773                 dwc_ep->next_frame = start_frame;
774         }
775
776         if (!core_if->pti_enh_enable) {
777                 dwc_ep->pkt_cnt =
778                     dwc_ep->buf_proc_intrvl*dwc_ep->pkt_per_frm /
779                     dwc_ep->bInterval;
780         } else {
781                 dwc_ep->pkt_cnt =
782                     (dwc_ep->data_per_frame *
783                      (dwc_ep->buf_proc_intrvl / dwc_ep->bInterval)
784                      - 1 + dwc_ep->maxpacket) / dwc_ep->maxpacket;
785         }
786
787         if (core_if->dma_desc_enable) {
788                 dwc_ep->desc_cnt =
789                     dwc_ep->buf_proc_intrvl*dwc_ep->pkt_per_frm /
790                     dwc_ep->bInterval;
791         }
792
793         if (atomic_alloc) {
794                 dwc_ep->pkt_info =
795                     DWC_ALLOC_ATOMIC(sizeof(iso_pkt_info_t)*dwc_ep->pkt_cnt);
796         } else {
797                 dwc_ep->pkt_info =
798                     DWC_ALLOC(sizeof(iso_pkt_info_t)*dwc_ep->pkt_cnt);
799         }
800         if (!dwc_ep->pkt_info) {
801                 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
802                 return -DWC_E_NO_MEMORY;
803         }
804         if (core_if->pti_enh_enable) {
805                 dwc_memset(dwc_ep->pkt_info, 0,
806                            sizeof(iso_pkt_info_t)*dwc_ep->pkt_cnt);
807         }
808
809         dwc_ep->cur_pkt = 0;
810         ep->iso_req_handle = req_handle;
811
812         DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
813         dwc_otg_iso_ep_start_transfer(core_if, dwc_ep);
814         return 0;
815 }
816
817 int dwc_otg_pcd_iso_ep_stop(dwc_otg_pcd_t *pcd, void *ep_handle,
818                             void *req_handle)
819 {
820         dwc_irqflags_t flags = 0;
821         dwc_otg_pcd_ep_t *ep;
822         dwc_ep_t *dwc_ep;
823
824         ep = get_ep_from_handle(pcd, ep_handle);
825         if (!ep || !ep->desc || ep->dwc_ep.num == 0) {
826                 DWC_WARN("bad ep\n");
827                 return -DWC_E_INVALID;
828         }
829         dwc_ep = &ep->dwc_ep;
830
831         dwc_otg_iso_ep_stop_transfer(GET_CORE_IF(pcd), dwc_ep);
832
833         DWC_FREE(dwc_ep->pkt_info);
834         DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
835         if (ep->iso_req_handle != req_handle) {
836                 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
837                 return -DWC_E_INVALID;
838         }
839
840         DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
841
842         ep->iso_req_handle = 0;
843         return 0;
844 }
845
846 /**
847  * This function is used for perodical data exchnage between PCD and gadget drivers.
848  * for Isochronous EPs
849  *
850  *      - Every time a sync period completes this function is called to
851  *        perform data exchange between PCD and gadget
852  */
853 void dwc_otg_iso_buffer_done(dwc_otg_pcd_t *pcd, dwc_otg_pcd_ep_t *ep,
854                              void *req_handle)
855 {
856         int i;
857         dwc_ep_t *dwc_ep;
858
859         dwc_ep = &ep->dwc_ep;
860
861         DWC_SPINUNLOCK(ep->pcd->lock);
862         pcd->fops->isoc_complete(pcd, ep->priv, ep->iso_req_handle,
863                                  dwc_ep->proc_buf_num ^ 0x1);
864         DWC_SPINLOCK(ep->pcd->lock);
865
866         for (i = 0; i < dwc_ep->pkt_cnt; ++i) {
867                 dwc_ep->pkt_info[i].status = 0;
868                 dwc_ep->pkt_info[i].offset = 0;
869                 dwc_ep->pkt_info[i].length = 0;
870         }
871 }
872
873 int dwc_otg_pcd_get_iso_packet_count(dwc_otg_pcd_t *pcd, void *ep_handle,
874                                      void *iso_req_handle)
875 {
876         dwc_otg_pcd_ep_t *ep;
877         dwc_ep_t *dwc_ep;
878
879         ep = get_ep_from_handle(pcd, ep_handle);
880         if (!ep->desc || ep->dwc_ep.num == 0) {
881                 DWC_WARN("bad ep\n");
882                 return -DWC_E_INVALID;
883         }
884         dwc_ep = &ep->dwc_ep;
885
886         return dwc_ep->pkt_cnt;
887 }
888
889 void dwc_otg_pcd_get_iso_packet_params(dwc_otg_pcd_t *pcd, void *ep_handle,
890                                        void *iso_req_handle, int packet,
891                                        int *status, int *actual, int *offset)
892 {
893         dwc_otg_pcd_ep_t *ep;
894         dwc_ep_t *dwc_ep;
895
896         ep = get_ep_from_handle(pcd, ep_handle);
897         if (!ep)
898                 DWC_WARN("bad ep\n");
899
900         dwc_ep = &ep->dwc_ep;
901
902         *status = dwc_ep->pkt_info[packet].status;
903         *actual = dwc_ep->pkt_info[packet].length;
904         *offset = dwc_ep->pkt_info[packet].offset;
905 }
906
907 #endif /* DWC_EN_ISOC */
908
909 static void dwc_otg_pcd_init_ep(dwc_otg_pcd_t *pcd, dwc_otg_pcd_ep_t *pcd_ep,
910                                 uint32_t is_in, uint32_t ep_num)
911 {
912         /* Init EP structure */
913         pcd_ep->desc = 0;
914         pcd_ep->pcd = pcd;
915         pcd_ep->stopped = 1;
916         pcd_ep->queue_sof = 0;
917
918         /* Init DWC ep structure */
919         pcd_ep->dwc_ep.is_in = is_in;
920         pcd_ep->dwc_ep.num = ep_num;
921         pcd_ep->dwc_ep.active = 0;
922         pcd_ep->dwc_ep.tx_fifo_num = 0;
923         /* Control until ep is actvated */
924         pcd_ep->dwc_ep.type = DWC_OTG_EP_TYPE_CONTROL;
925         pcd_ep->dwc_ep.maxpacket = MAX_PACKET_SIZE;
926         pcd_ep->dwc_ep.dma_addr = 0;
927         pcd_ep->dwc_ep.start_xfer_buff = 0;
928         pcd_ep->dwc_ep.xfer_buff = 0;
929         pcd_ep->dwc_ep.xfer_len = 0;
930         pcd_ep->dwc_ep.xfer_count = 0;
931         pcd_ep->dwc_ep.sent_zlp = 0;
932         pcd_ep->dwc_ep.total_len = 0;
933         pcd_ep->dwc_ep.desc_addr = 0;
934         pcd_ep->dwc_ep.dma_desc_addr = 0;
935         DWC_CIRCLEQ_INIT(&pcd_ep->queue);
936 }
937
938 /**
939  * Initialize ep's
940  */
941 static void dwc_otg_pcd_reinit(dwc_otg_pcd_t *pcd)
942 {
943         int i;
944         uint32_t hwcfg1;
945         dwc_otg_pcd_ep_t *ep;
946         int in_ep_cntr, out_ep_cntr;
947         uint32_t num_in_eps = (GET_CORE_IF(pcd))->dev_if->num_in_eps;
948         uint32_t num_out_eps = (GET_CORE_IF(pcd))->dev_if->num_out_eps;
949         /**
950          * Initialize the EP0 structure.
951          */
952         ep = &pcd->ep0;
953         dwc_otg_pcd_init_ep(pcd, ep, 0, 0);
954
955         in_ep_cntr = 0;
956         hwcfg1 = (GET_CORE_IF(pcd))->hwcfg1.d32 >> 3;
957         for (i = 1; in_ep_cntr < num_in_eps; i++) {
958                 if ((hwcfg1 & 0x1) == 0) {
959                         dwc_otg_pcd_ep_t *ep = &pcd->in_ep[in_ep_cntr];
960                         in_ep_cntr++;
961                         /**
962                          * @todo NGS: Add direction to EP, based on contents
963                          * of HWCFG1.  Need a copy of HWCFG1 in pcd structure?
964                          * sprintf(";r
965                          */
966                         dwc_otg_pcd_init_ep(pcd, ep, 1 /* IN */ , i);
967
968                         DWC_CIRCLEQ_INIT(&ep->queue);
969                 }
970                 hwcfg1 >>= 2;
971         }
972
973         out_ep_cntr = 0;
974         hwcfg1 = (GET_CORE_IF(pcd))->hwcfg1.d32 >> 2;
975         for (i = 1; out_ep_cntr < num_out_eps; i++) {
976                 if ((hwcfg1 & 0x1) == 0) {
977                         dwc_otg_pcd_ep_t *ep = &pcd->out_ep[out_ep_cntr];
978                         out_ep_cntr++;
979                         /**
980                          * @todo NGS: Add direction to EP, based on contents
981                          * of HWCFG1.  Need a copy of HWCFG1 in pcd structure?
982                          * sprintf(";r
983                          */
984                         dwc_otg_pcd_init_ep(pcd, ep, 0 /* OUT */ , i);
985                         DWC_CIRCLEQ_INIT(&ep->queue);
986                 }
987                 hwcfg1 >>= 2;
988         }
989
990         pcd->ep0state = EP0_DISCONNECT;
991         pcd->ep0.dwc_ep.maxpacket = MAX_EP0_SIZE;
992         pcd->ep0.dwc_ep.type = DWC_OTG_EP_TYPE_CONTROL;
993 }
994
995 /**
996  * This function is called when the SRP timer expires. The SRP should
997  * complete within 6 seconds.
998  */
999 static void srp_timeout(void *ptr)
1000 {
1001         gotgctl_data_t gotgctl;
1002         dwc_otg_core_if_t *core_if = (dwc_otg_core_if_t *) ptr;
1003         volatile uint32_t *addr = &core_if->core_global_regs->gotgctl;
1004
1005         gotgctl.d32 = DWC_READ_REG32(addr);
1006
1007         core_if->srp_timer_started = 0;
1008
1009         if (core_if->adp_enable) {
1010                 if (gotgctl.b.bsesvld == 0) {
1011                         gpwrdn_data_t gpwrdn = {.d32 = 0 };
1012                         DWC_PRINTF("SRP Timeout BSESSVLD = 0\n");
1013                         /* Power off the core */
1014                         if (core_if->power_down == 2) {
1015                                 gpwrdn.b.pwrdnswtch = 1;
1016                                 DWC_MODIFY_REG32(&core_if->core_global_regs->
1017                                                  gpwrdn, gpwrdn.d32, 0);
1018                         }
1019
1020                         gpwrdn.d32 = 0;
1021                         gpwrdn.b.pmuintsel = 1;
1022                         gpwrdn.b.pmuactv = 1;
1023                         DWC_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, 0,
1024                                          gpwrdn.d32);
1025                         dwc_otg_adp_probe_start(core_if);
1026                 } else {
1027                         DWC_PRINTF("SRP Timeout BSESSVLD = 1\n");
1028                         core_if->op_state = B_PERIPHERAL;
1029                         dwc_otg_core_init(core_if);
1030                         dwc_otg_enable_global_interrupts(core_if);
1031                         cil_pcd_start(core_if);
1032                 }
1033         }
1034
1035         if ((core_if->core_params->phy_type == DWC_PHY_TYPE_PARAM_FS) &&
1036             (core_if->core_params->i2c_enable)) {
1037                 DWC_PRINTF("SRP Timeout\n");
1038
1039                 if ((core_if->srp_success) && (gotgctl.b.bsesvld)) {
1040                         if (core_if->pcd_cb && core_if->pcd_cb->resume_wakeup) {
1041                                 core_if->pcd_cb->resume_wakeup(core_if->pcd_cb->
1042                                                                p);
1043                         }
1044
1045                         /* Clear Session Request */
1046                         gotgctl.d32 = 0;
1047                         gotgctl.b.sesreq = 1;
1048                         DWC_MODIFY_REG32(&core_if->core_global_regs->gotgctl,
1049                                          gotgctl.d32, 0);
1050
1051                         core_if->srp_success = 0;
1052                 } else {
1053                         __DWC_ERROR("Device not connected/responding\n");
1054                         gotgctl.b.sesreq = 0;
1055                         DWC_WRITE_REG32(addr, gotgctl.d32);
1056                 }
1057         } else if (gotgctl.b.sesreq) {
1058                 DWC_PRINTF("SRP Timeout\n");
1059
1060                 __DWC_ERROR("Device not connected/responding\n");
1061                 gotgctl.b.sesreq = 0;
1062                 DWC_WRITE_REG32(addr, gotgctl.d32);
1063         } else {
1064                 DWC_PRINTF(" SRP GOTGCTL=%0x\n", gotgctl.d32);
1065         }
1066 }
1067
1068 /**
1069  * Tasklet
1070  *
1071  */
1072 extern void start_next_request(dwc_otg_pcd_ep_t *ep);
1073
1074 static void start_xfer_tasklet_func(void *data)
1075 {
1076         dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *) data;
1077         dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
1078
1079         int i;
1080         depctl_data_t diepctl;
1081
1082         DWC_DEBUGPL(DBG_PCDV, "Start xfer tasklet\n");
1083
1084         diepctl.d32 = DWC_READ_REG32(&core_if->dev_if->in_ep_regs[0]->diepctl);
1085
1086         if (pcd->ep0.queue_sof) {
1087                 pcd->ep0.queue_sof = 0;
1088                 start_next_request(&pcd->ep0);
1089                 /* break; */
1090         }
1091
1092         for (i = 0; i < core_if->dev_if->num_in_eps; i++) {
1093                 depctl_data_t diepctl;
1094                 diepctl.d32 =
1095                     DWC_READ_REG32(&core_if->dev_if->in_ep_regs[i]->diepctl);
1096
1097                 if (pcd->in_ep[i].queue_sof) {
1098                         pcd->in_ep[i].queue_sof = 0;
1099                         start_next_request(&pcd->in_ep[i]);
1100                         /* break; */
1101                 }
1102         }
1103
1104         return;
1105 }
1106
1107 /**
1108  * This function initialized the PCD portion of the driver.
1109  *
1110  */
1111 dwc_otg_pcd_t *dwc_otg_pcd_init(dwc_otg_core_if_t *core_if)
1112 {
1113         dwc_otg_pcd_t *pcd = NULL;
1114         dwc_otg_dev_if_t *dev_if;
1115         int i;
1116
1117         /*
1118          * Allocate PCD structure
1119          */
1120         pcd = DWC_ALLOC(sizeof(dwc_otg_pcd_t));
1121
1122         if (pcd == NULL) {
1123                 return NULL;
1124         }
1125
1126         pcd->lock = DWC_SPINLOCK_ALLOC();
1127         if (!pcd->lock) {
1128                 DWC_ERROR("Could not allocate lock for pcd");
1129                 DWC_FREE(pcd);
1130                 return NULL;
1131         }
1132         /* Set core_if's lock pointer to hcd->lock */
1133         core_if->lock = pcd->lock;
1134         pcd->core_if = core_if;
1135
1136         dev_if = core_if->dev_if;
1137         dev_if->isoc_ep = NULL;
1138
1139         if (core_if->hwcfg4.b.ded_fifo_en) {
1140                 DWC_PRINTF("Dedicated Tx FIFOs mode\n");
1141         } else {
1142                 DWC_PRINTF("Shared Tx FIFO mode\n");
1143         }
1144
1145         /*
1146          * Initialized the Core for Device mode here if there is nod ADP support.
1147          * Otherwise it will be done later in dwc_otg_adp_start routine.
1148          */
1149         /* if (dwc_otg_is_device_mode(core_if) ) { */
1150         /*      dwc_otg_core_dev_init(core_if); */
1151         /*} */
1152
1153         /*
1154          * Register the PCD Callbacks.
1155          */
1156         dwc_otg_cil_register_pcd_callbacks(core_if, &pcd_callbacks, pcd);
1157
1158         /*
1159          * Initialize the DMA buffer for SETUP packets
1160          */
1161         if (GET_CORE_IF(pcd)->dma_enable) {
1162                 pcd->setup_pkt =
1163                     DWC_DMA_ALLOC_ATOMIC(sizeof(*pcd->setup_pkt) * 5,
1164                                          &pcd->setup_pkt_dma_handle);
1165                 if (pcd->setup_pkt == NULL) {
1166                         DWC_FREE(pcd);
1167                         return NULL;
1168                 }
1169
1170                 pcd->status_buf =
1171                     DWC_DMA_ALLOC_ATOMIC(sizeof(uint16_t),
1172                                          &pcd->status_buf_dma_handle);
1173                 if (pcd->status_buf == NULL) {
1174                         DWC_DMA_FREE(sizeof(*pcd->setup_pkt) * 5,
1175                                      pcd->setup_pkt, pcd->setup_pkt_dma_handle);
1176                         DWC_FREE(pcd);
1177                         return NULL;
1178                 }
1179
1180                 if (GET_CORE_IF(pcd)->dma_desc_enable) {
1181                         dev_if->setup_desc_addr[0] =
1182                             dwc_otg_ep_alloc_desc_chain
1183                             (&dev_if->dma_setup_desc_addr[0], 1);
1184                         dev_if->setup_desc_addr[1] =
1185                             dwc_otg_ep_alloc_desc_chain
1186                             (&dev_if->dma_setup_desc_addr[1], 1);
1187                         dev_if->in_desc_addr =
1188                             dwc_otg_ep_alloc_desc_chain
1189                             (&dev_if->dma_in_desc_addr, 1);
1190                         dev_if->out_desc_addr =
1191                             dwc_otg_ep_alloc_desc_chain
1192                             (&dev_if->dma_out_desc_addr, 1);
1193                         pcd->data_terminated = 0;
1194
1195                         if (dev_if->setup_desc_addr[0] == 0
1196                             || dev_if->setup_desc_addr[1] == 0
1197                             || dev_if->in_desc_addr == 0
1198                             || dev_if->out_desc_addr == 0) {
1199
1200                                 if (dev_if->out_desc_addr)
1201                                         dwc_otg_ep_free_desc_chain
1202                                             (dev_if->out_desc_addr,
1203                                              dev_if->dma_out_desc_addr, 1);
1204                                 if (dev_if->in_desc_addr)
1205                                         dwc_otg_ep_free_desc_chain
1206                                             (dev_if->in_desc_addr,
1207                                              dev_if->dma_in_desc_addr, 1);
1208                                 if (dev_if->setup_desc_addr[1])
1209                                         dwc_otg_ep_free_desc_chain
1210                                             (dev_if->setup_desc_addr[1],
1211                                              dev_if->dma_setup_desc_addr[1], 1);
1212                                 if (dev_if->setup_desc_addr[0])
1213                                         dwc_otg_ep_free_desc_chain
1214                                             (dev_if->setup_desc_addr[0],
1215                                              dev_if->dma_setup_desc_addr[0], 1);
1216
1217                                 DWC_DMA_FREE(sizeof(*pcd->setup_pkt) * 5,
1218                                              pcd->setup_pkt,
1219                                              pcd->setup_pkt_dma_handle);
1220                                 DWC_DMA_FREE(sizeof(*pcd->status_buf),
1221                                              pcd->status_buf,
1222                                              pcd->status_buf_dma_handle);
1223
1224                                 DWC_FREE(pcd);
1225
1226                                 return NULL;
1227                         }
1228                 }
1229         } else {
1230                 pcd->setup_pkt = DWC_ALLOC(sizeof(*pcd->setup_pkt) * 5);
1231                 if (pcd->setup_pkt == NULL) {
1232                         DWC_FREE(pcd);
1233                         return NULL;
1234                 }
1235
1236                 pcd->status_buf = DWC_ALLOC(sizeof(uint16_t));
1237                 if (pcd->status_buf == NULL) {
1238                         DWC_FREE(pcd->setup_pkt);
1239                         DWC_FREE(pcd);
1240                         return NULL;
1241                 }
1242         }
1243
1244         dwc_otg_pcd_reinit(pcd);
1245
1246         /* Allocate the cfi object for the PCD */
1247 #ifdef DWC_UTE_CFI
1248         pcd->cfi = DWC_ALLOC(sizeof(cfiobject_t));
1249         if (NULL == pcd->cfi)
1250                 goto fail;
1251         if (init_cfi(pcd->cfi)) {
1252                 CFI_INFO("%s: Failed to init the CFI object\n", __func__);
1253                 goto fail;
1254         }
1255 #endif
1256
1257         /* Initialize tasklets */
1258         pcd->start_xfer_tasklet = DWC_TASK_ALLOC("xfer_tasklet",
1259                                                  start_xfer_tasklet_func, pcd);
1260         pcd->test_mode_tasklet = DWC_TASK_ALLOC("test_mode_tasklet",
1261                                                 do_test_mode, pcd);
1262
1263         /* Initialize SRP timer */
1264         core_if->srp_timer = DWC_TIMER_ALLOC("SRP TIMER", srp_timeout, core_if);
1265
1266         if (core_if->core_params->dev_out_nak) {
1267                 /**
1268                 * Initialize xfer timeout timer. Implemented for
1269                 * 2.93a feature "Device DDMA OUT NAK Enhancement"
1270                 */
1271                 for (i = 0; i < MAX_EPS_CHANNELS; i++) {
1272                         pcd->core_if->ep_xfer_timer[i] =
1273                             DWC_TIMER_ALLOC("ep timer", ep_xfer_timeout,
1274                                             &pcd->core_if->ep_xfer_info[i]);
1275                 }
1276         }
1277
1278         return pcd;
1279 #ifdef DWC_UTE_CFI
1280 fail:
1281 #endif
1282         if (pcd->setup_pkt)
1283                 DWC_FREE(pcd->setup_pkt);
1284         if (pcd->status_buf)
1285                 DWC_FREE(pcd->status_buf);
1286 #ifdef DWC_UTE_CFI
1287         if (pcd->cfi)
1288                 DWC_FREE(pcd->cfi);
1289 #endif
1290         if (pcd)
1291                 DWC_FREE(pcd);
1292         return NULL;
1293
1294 }
1295
1296 /**
1297  * Remove PCD specific data
1298  */
1299 void dwc_otg_pcd_remove(dwc_otg_pcd_t *pcd)
1300 {
1301         dwc_otg_dev_if_t *dev_if = GET_CORE_IF(pcd)->dev_if;
1302         int i;
1303         if (pcd->core_if->core_params->dev_out_nak) {
1304                 for (i = 0; i < MAX_EPS_CHANNELS; i++) {
1305                         DWC_TIMER_CANCEL(pcd->core_if->ep_xfer_timer[i]);
1306                         pcd->core_if->ep_xfer_info[i].state = 0;
1307                 }
1308         }
1309
1310         if (GET_CORE_IF(pcd)->dma_enable) {
1311                 DWC_DMA_FREE(sizeof(*pcd->setup_pkt) * 5, pcd->setup_pkt,
1312                              pcd->setup_pkt_dma_handle);
1313                 DWC_DMA_FREE(sizeof(uint16_t), pcd->status_buf,
1314                              pcd->status_buf_dma_handle);
1315                 if (GET_CORE_IF(pcd)->dma_desc_enable) {
1316                         dwc_otg_ep_free_desc_chain(dev_if->setup_desc_addr[0],
1317                                                    dev_if->dma_setup_desc_addr
1318                                                    [0], 1);
1319                         dwc_otg_ep_free_desc_chain(dev_if->setup_desc_addr[1],
1320                                                    dev_if->dma_setup_desc_addr
1321                                                    [1], 1);
1322                         dwc_otg_ep_free_desc_chain(dev_if->in_desc_addr,
1323                                                    dev_if->dma_in_desc_addr, 1);
1324                         dwc_otg_ep_free_desc_chain(dev_if->out_desc_addr,
1325                                                    dev_if->dma_out_desc_addr,
1326                                                    1);
1327                 }
1328         } else {
1329                 DWC_FREE(pcd->setup_pkt);
1330                 DWC_FREE(pcd->status_buf);
1331         }
1332         DWC_SPINLOCK_FREE(pcd->lock);
1333         /* Set core_if's lock pointer to NULL */
1334         pcd->core_if->lock = NULL;
1335
1336         DWC_TASK_FREE(pcd->start_xfer_tasklet);
1337         DWC_TASK_FREE(pcd->test_mode_tasklet);
1338         if (pcd->core_if->core_params->dev_out_nak) {
1339                 for (i = 0; i < MAX_EPS_CHANNELS; i++) {
1340                         if (pcd->core_if->ep_xfer_timer[i]) {
1341                                 DWC_TIMER_FREE(pcd->core_if->ep_xfer_timer[i]);
1342                         }
1343                 }
1344         }
1345
1346 /* Release the CFI object's dynamic memory */
1347 #ifdef DWC_UTE_CFI
1348         if (pcd->cfi->ops.release) {
1349                 pcd->cfi->ops.release(pcd->cfi);
1350         }
1351 #endif
1352
1353         DWC_FREE(pcd);
1354 }
1355
1356 /**
1357  * Returns whether registered pcd is dual speed or not
1358  */
1359 uint32_t dwc_otg_pcd_is_dualspeed(dwc_otg_pcd_t *pcd)
1360 {
1361         dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
1362
1363         if ((core_if->core_params->speed == DWC_SPEED_PARAM_FULL) ||
1364             ((core_if->hwcfg2.b.hs_phy_type == 2) &&
1365              (core_if->hwcfg2.b.fs_phy_type == 1) &&
1366              (core_if->core_params->ulpi_fs_ls))) {
1367                 return 0;
1368         }
1369
1370         return 1;
1371 }
1372
1373 /**
1374  * Returns whether registered pcd is OTG capable or not
1375  */
1376 uint32_t dwc_otg_pcd_is_otg(dwc_otg_pcd_t *pcd)
1377 {
1378         dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
1379         gusbcfg_data_t usbcfg = {.d32 = 0 };
1380         uint32_t retval = 0;
1381
1382         usbcfg.d32 = DWC_READ_REG32(&core_if->core_global_regs->gusbcfg);
1383 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0)
1384         if (!usbcfg.b.srpcap || !usbcfg.b.hnpcap)
1385                 return 0;
1386         else
1387                 return 1;
1388 # else
1389         if (!usbcfg.b.srpcap)
1390                 return 0;
1391         else
1392                 retval |= 1;
1393
1394         if (usbcfg.b.hnpcap)
1395                 retval |= 2;
1396
1397         if (core_if->adp_enable)
1398                 retval |= 4;
1399 #endif
1400
1401         return retval;
1402 }
1403
1404 /**
1405  * This function assigns periodic Tx FIFO to an periodic EP
1406  * in shared Tx FIFO mode
1407  */
1408 static uint32_t assign_tx_fifo(dwc_otg_core_if_t *core_if)
1409 {
1410         uint32_t TxMsk = 1;
1411         int i;
1412
1413         for (i = 0; i < core_if->hwcfg4.b.num_in_eps; ++i) {
1414                 if ((TxMsk & core_if->tx_msk) == 0) {
1415                         core_if->tx_msk |= TxMsk;
1416                         return i + 1;
1417                 }
1418                 TxMsk <<= 1;
1419         }
1420         return 0;
1421 }
1422
1423 /**
1424  * This function assigns periodic Tx FIFO to an periodic EP
1425  * in shared Tx FIFO mode
1426  */
1427 static uint32_t assign_perio_tx_fifo(dwc_otg_core_if_t *core_if)
1428 {
1429         uint32_t PerTxMsk = 1;
1430         int i;
1431         for (i = 0; i < core_if->hwcfg4.b.num_dev_perio_in_ep; ++i) {
1432                 if ((PerTxMsk & core_if->p_tx_msk) == 0) {
1433                         core_if->p_tx_msk |= PerTxMsk;
1434                         return i + 1;
1435                 }
1436                 PerTxMsk <<= 1;
1437         }
1438         return 0;
1439 }
1440
1441 /**
1442  * This function releases periodic Tx FIFO
1443  * in shared Tx FIFO mode
1444  */
1445 static void release_perio_tx_fifo(dwc_otg_core_if_t *core_if,
1446                                   uint32_t fifo_num)
1447 {
1448         core_if->p_tx_msk =
1449             (core_if->p_tx_msk & (1 << (fifo_num - 1))) ^ core_if->p_tx_msk;
1450 }
1451
1452 /**
1453  * This function releases periodic Tx FIFO
1454  * in shared Tx FIFO mode
1455  */
1456 static void release_tx_fifo(dwc_otg_core_if_t *core_if, uint32_t fifo_num)
1457 {
1458         core_if->tx_msk =
1459             (core_if->tx_msk & (1 << (fifo_num - 1))) ^ core_if->tx_msk;
1460 }
1461
1462 /**
1463  * This function is being called from gadget
1464  * to enable PCD endpoint.
1465  */
1466 int dwc_otg_pcd_ep_enable(dwc_otg_pcd_t *pcd,
1467                           const uint8_t *ep_desc, void *usb_ep)
1468 {
1469         int num, dir;
1470         dwc_otg_pcd_ep_t *ep = NULL;
1471         const usb_endpoint_descriptor_t *desc;
1472         dwc_irqflags_t flags;
1473         /* fifosize_data_t dptxfsiz = {.d32 = 0 }; */
1474         /* gdfifocfg_data_t gdfifocfg = {.d32 = 0 }; */
1475         /* gdfifocfg_data_t gdfifocfgbase = {.d32 = 0 }; */
1476         int retval = 0;
1477         int i, epcount;
1478
1479         desc = (const usb_endpoint_descriptor_t *)ep_desc;
1480
1481         if (!desc) {
1482                 pcd->ep0.priv = usb_ep;
1483                 ep = &pcd->ep0;
1484                 retval = -DWC_E_INVALID;
1485                 goto out;
1486         }
1487
1488         num = UE_GET_ADDR(desc->bEndpointAddress);
1489         dir = UE_GET_DIR(desc->bEndpointAddress);
1490
1491         if (!desc->wMaxPacketSize) {
1492                 DWC_WARN("bad maxpacketsize\n");
1493                 retval = -DWC_E_INVALID;
1494                 goto out;
1495         }
1496
1497         if (dir == UE_DIR_IN) {
1498                 epcount = pcd->core_if->dev_if->num_in_eps;
1499                 for (i = 0; i < epcount; i++) {
1500                         if (num == pcd->in_ep[i].dwc_ep.num) {
1501                                 ep = &pcd->in_ep[i];
1502                                 break;
1503                         }
1504                 }
1505         } else {
1506                 epcount = pcd->core_if->dev_if->num_out_eps;
1507                 for (i = 0; i < epcount; i++) {
1508                         if (num == pcd->out_ep[i].dwc_ep.num) {
1509                                 ep = &pcd->out_ep[i];
1510                                 break;
1511                         }
1512                 }
1513         }
1514
1515         if (!ep) {
1516                 DWC_WARN("bad address\n");
1517                 retval = -DWC_E_INVALID;
1518                 goto out;
1519         }
1520
1521         DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
1522
1523         ep->desc = desc;
1524         ep->priv = usb_ep;
1525
1526         /*
1527          * Activate the EP
1528          */
1529         ep->stopped = 0;
1530
1531         ep->dwc_ep.is_in = (dir == UE_DIR_IN);
1532         ep->dwc_ep.maxpacket = UGETW(desc->wMaxPacketSize);
1533
1534         ep->dwc_ep.type = desc->bmAttributes & UE_XFERTYPE;
1535
1536         if (ep->dwc_ep.is_in) {
1537                 if (!GET_CORE_IF(pcd)->en_multiple_tx_fifo) {
1538                         ep->dwc_ep.tx_fifo_num = 0;
1539
1540                         if (ep->dwc_ep.type == UE_ISOCHRONOUS) {
1541                                 /*
1542                                  * if ISOC EP then assign a Periodic Tx FIFO.
1543                                  */
1544                                 ep->dwc_ep.tx_fifo_num =
1545                                     assign_perio_tx_fifo(GET_CORE_IF(pcd));
1546                         }
1547                 } else {
1548                         /*
1549                          * if Dedicated FIFOs mode is on then assign a Tx FIFO.
1550                          */
1551                         ep->dwc_ep.tx_fifo_num =
1552                             assign_tx_fifo(GET_CORE_IF(pcd));
1553                 }
1554
1555                 /* Calculating EP info controller base address */
1556 #if 0
1557                 if (ep->dwc_ep.tx_fifo_num
1558                     && GET_CORE_IF(pcd)->en_multiple_tx_fifo) {
1559                         gdfifocfg.d32 =
1560                             DWC_READ_REG32(&GET_CORE_IF(pcd)->core_global_regs->
1561                                            gdfifocfg);
1562                         gdfifocfgbase.d32 = gdfifocfg.d32 >> 16;
1563                         dptxfsiz.d32 =
1564                             (DWC_READ_REG32
1565                              (&GET_CORE_IF(pcd)->
1566                               core_global_regs->dtxfsiz[ep->dwc_ep.tx_fifo_num -
1567                                                         1]) >> 16);
1568                         gdfifocfg.b.epinfobase =
1569                             gdfifocfgbase.d32 + dptxfsiz.d32;
1570                         if (GET_CORE_IF(pcd)->snpsid <= OTG_CORE_REV_2_94a) {
1571                                 DWC_WRITE_REG32(&GET_CORE_IF
1572                                                 (pcd)->core_global_regs->
1573                                                 gdfifocfg, gdfifocfg.d32);
1574                         }
1575                 }
1576 #endif
1577         }
1578         /* Set initial data PID. */
1579         if (ep->dwc_ep.type == UE_BULK) {
1580                 ep->dwc_ep.data_pid_start = 0;
1581         }
1582
1583         /* Alloc DMA Descriptors */
1584         if (GET_CORE_IF(pcd)->dma_desc_enable) {
1585 #ifndef DWC_UTE_PER_IO
1586                 if (ep->dwc_ep.type != UE_ISOCHRONOUS) {
1587 #endif
1588                         ep->dwc_ep.desc_addr =
1589                             dwc_otg_ep_alloc_desc_chain(&ep->dwc_ep.
1590                                                         dma_desc_addr,
1591                                                         MAX_DMA_DESC_CNT);
1592                         if (!ep->dwc_ep.desc_addr) {
1593                                 DWC_WARN("%s, can't allocate DMA descriptor\n",
1594                                          __func__);
1595                                 retval = -DWC_E_SHUTDOWN;
1596                                 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
1597                                 goto out;
1598                         }
1599 #ifndef DWC_UTE_PER_IO
1600                 }
1601 #endif
1602         }
1603
1604         DWC_DEBUGPL(DBG_PCD, "Activate %s: type=%d, mps=%d desc=%p\n",
1605                     (ep->dwc_ep.is_in ? "IN" : "OUT"),
1606                     ep->dwc_ep.type, ep->dwc_ep.maxpacket, ep->desc);
1607 #ifdef DWC_UTE_PER_IO
1608         ep->dwc_ep.xiso_bInterval = 1 << (ep->desc->bInterval - 1);
1609 #endif
1610         if (ep->dwc_ep.type == DWC_OTG_EP_TYPE_ISOC) {
1611                 ep->dwc_ep.bInterval = 1 << (ep->desc->bInterval - 1);
1612                 ep->dwc_ep.frame_num = 0xFFFFFFFF;
1613         }
1614
1615         dwc_otg_ep_activate(GET_CORE_IF(pcd), &ep->dwc_ep);
1616
1617 #ifdef DWC_UTE_CFI
1618         if (pcd->cfi->ops.ep_enable) {
1619                 pcd->cfi->ops.ep_enable(pcd->cfi, pcd, ep);
1620         }
1621 #endif
1622
1623         DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
1624
1625 out:
1626         return retval;
1627 }
1628
1629 /**
1630  * This function is being called from gadget
1631  * to disable PCD endpoint.
1632  */
1633 int dwc_otg_pcd_ep_disable(dwc_otg_pcd_t *pcd, void *ep_handle)
1634 {
1635         dwc_otg_pcd_ep_t *ep;
1636         dwc_irqflags_t flags;
1637         dwc_otg_dev_dma_desc_t *desc_addr;
1638         dwc_dma_t dma_desc_addr;
1639         gdfifocfg_data_t gdfifocfgbase = {.d32 = 0 };
1640         gdfifocfg_data_t gdfifocfg = {.d32 = 0 };
1641         /* fifosize_data_t dptxfsiz = {.d32 = 0 }; */
1642
1643         ep = get_ep_from_handle(pcd, ep_handle);
1644
1645         if (!ep || !ep->desc) {
1646                 DWC_DEBUGPL(DBG_PCD, "bad ep address\n");
1647                 return -DWC_E_INVALID;
1648         }
1649
1650         DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
1651
1652         dwc_otg_request_nuke(ep);
1653
1654         dwc_otg_ep_deactivate(GET_CORE_IF(pcd), &ep->dwc_ep);
1655         if (pcd->core_if->core_params->dev_out_nak) {
1656                 DWC_TIMER_CANCEL(pcd->core_if->ep_xfer_timer[ep->dwc_ep.num]);
1657                 pcd->core_if->ep_xfer_info[ep->dwc_ep.num].state = 0;
1658         }
1659         ep->desc = NULL;
1660         ep->stopped = 1;
1661
1662         gdfifocfg.d32 =
1663             DWC_READ_REG32(&GET_CORE_IF(pcd)->core_global_regs->gdfifocfg);
1664         gdfifocfgbase.d32 = gdfifocfg.d32 >> 16;
1665
1666         if (ep->dwc_ep.is_in) {
1667                 if (GET_CORE_IF(pcd)->en_multiple_tx_fifo) {
1668                         /* Flush the Tx FIFO */
1669                         dwc_otg_flush_tx_fifo(GET_CORE_IF(pcd),
1670                                               ep->dwc_ep.tx_fifo_num);
1671                 }
1672                 release_perio_tx_fifo(GET_CORE_IF(pcd), ep->dwc_ep.tx_fifo_num);
1673                 release_tx_fifo(GET_CORE_IF(pcd), ep->dwc_ep.tx_fifo_num);
1674 #if 0
1675                 if (GET_CORE_IF(pcd)->en_multiple_tx_fifo) {
1676                         /* Decreasing EPinfo Base Addr */
1677                         dptxfsiz.d32 =
1678                             (DWC_READ_REG32
1679                              (&GET_CORE_IF(pcd)->core_global_regs->
1680                               dtxfsiz[ep->dwc_ep.tx_fifo_num - 1]) >> 16);
1681                         gdfifocfg.b.epinfobase =
1682                             gdfifocfgbase.d32 - dptxfsiz.d32;
1683                         if (GET_CORE_IF(pcd)->snpsid <= OTG_CORE_REV_2_94a) {
1684                                 DWC_WRITE_REG32(&GET_CORE_IF(pcd)->
1685                                                 core_global_regs->gdfifocfg,
1686                                                 gdfifocfg.d32);
1687                         }
1688                 }
1689 #endif
1690         }
1691
1692         /* Free DMA Descriptors */
1693         if (GET_CORE_IF(pcd)->dma_desc_enable) {
1694                 if (ep->dwc_ep.type != UE_ISOCHRONOUS) {
1695                         desc_addr = ep->dwc_ep.desc_addr;
1696                         dma_desc_addr = ep->dwc_ep.dma_desc_addr;
1697
1698                         /* Cannot call dma_free_coherent() with IRQs disabled */
1699                         DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
1700                         dwc_otg_ep_free_desc_chain(desc_addr, dma_desc_addr,
1701                                                    MAX_DMA_DESC_CNT);
1702
1703                         goto out_unlocked;
1704                 }
1705         }
1706         DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
1707
1708 out_unlocked:
1709         DWC_DEBUGPL(DBG_PCD, "%d %s disabled\n", ep->dwc_ep.num,
1710                     ep->dwc_ep.is_in ? "IN" : "OUT");
1711         return 0;
1712
1713 }
1714
1715 /******************************************************************************/
1716 #ifdef DWC_UTE_PER_IO
1717
1718 /**
1719  * Free the request and its extended parts
1720  *
1721  */
1722 void dwc_pcd_xiso_ereq_free(dwc_otg_pcd_ep_t *ep, dwc_otg_pcd_request_t *req)
1723 {
1724         DWC_FREE(req->ext_req.per_io_frame_descs);
1725         DWC_FREE(req);
1726 }
1727
1728 /**
1729  * Start the next request in the endpoint's queue.
1730  *
1731  */
1732 int dwc_otg_pcd_xiso_start_next_request(dwc_otg_pcd_t *pcd,
1733                                         dwc_otg_pcd_ep_t *ep)
1734 {
1735         int i;
1736         dwc_otg_pcd_request_t *req = NULL;
1737         dwc_ep_t *dwcep = NULL;
1738         struct dwc_iso_xreq_port *ereq = NULL;
1739         struct dwc_iso_pkt_desc_port *ddesc_iso;
1740         uint16_t nat;
1741         depctl_data_t diepctl;
1742
1743         dwcep = &ep->dwc_ep;
1744
1745         if (dwcep->xiso_active_xfers > 0) {
1746 #if 0
1747                 /* Disable this to decrease s/w overhead
1748                  * that is crucial for Isoc transfers */
1749                 DWC_WARN("There are currently active transfers for EP%d \
1750                         (active=%d; queued=%d)", dwcep->num,
1751                         dwcep->xiso_active_xfers, dwcep->xiso_queued_xfers);
1752 #endif
1753                 return 0;
1754         }
1755
1756         nat = UGETW(ep->desc->wMaxPacketSize);
1757         nat = (nat >> 11) & 0x03;
1758
1759         if (!DWC_CIRCLEQ_EMPTY(&ep->queue)) {
1760                 req = DWC_CIRCLEQ_FIRST(&ep->queue);
1761                 ereq = &req->ext_req;
1762                 ep->stopped = 0;
1763
1764                 /* Get the frame number */
1765                 dwcep->xiso_frame_num =
1766                     dwc_otg_get_frame_number(GET_CORE_IF(pcd));
1767                 DWC_DEBUG("FRM_NUM=%d", dwcep->xiso_frame_num);
1768
1769                 ddesc_iso = ereq->per_io_frame_descs;
1770
1771                 if (dwcep->is_in) {
1772                         /* Setup DMA Descriptor chain for IN Isoc request */
1773                         for (i = 0; i < ereq->pio_pkt_count; i++) {
1774                                 /* if ((i % (nat + 1)) == 0) */
1775                                 if (i > 0)
1776                                         dwcep->xiso_frame_num =
1777                                             (dwcep->xiso_bInterval +
1778                                              dwcep->xiso_frame_num) & 0x3FFF;
1779                                 dwcep->desc_addr[i].buf =
1780                                     req->dma + ddesc_iso[i].offset;
1781                                 dwcep->desc_addr[i].status.b_iso_in.txbytes =
1782                                     ddesc_iso[i].length;
1783                                 dwcep->desc_addr[i].status.b_iso_in.framenum =
1784                                     dwcep->xiso_frame_num;
1785                                 dwcep->desc_addr[i].status.b_iso_in.bs =
1786                                     BS_HOST_READY;
1787                                 dwcep->desc_addr[i].status.b_iso_in.txsts = 0;
1788                                 dwcep->desc_addr[i].status.b_iso_in.sp =
1789                                     (ddesc_iso[i].length %
1790                                      dwcep->maxpacket) ? 1 : 0;
1791                                 dwcep->desc_addr[i].status.b_iso_in.ioc = 0;
1792                                 dwcep->desc_addr[i].status.b_iso_in.pid =
1793                                     nat + 1;
1794                                 dwcep->desc_addr[i].status.b_iso_in.l = 0;
1795
1796                                 /* Process the last descriptor */
1797                                 if (i == ereq->pio_pkt_count - 1) {
1798                                         dwcep->desc_addr[i].status.b_iso_in.
1799                                             ioc = 1;
1800                                         dwcep->desc_addr[i].status.b_iso_in.l =
1801                                             1;
1802                                 }
1803                         }
1804
1805                         /* Setup and start the transfer for this endpoint */
1806                         dwcep->xiso_active_xfers++;
1807                         DWC_WRITE_REG32(&GET_CORE_IF(pcd)->
1808                                         dev_if->in_ep_regs[dwcep->num]->diepdma,
1809                                         dwcep->dma_desc_addr);
1810                         diepctl.d32 = 0;
1811                         diepctl.b.epena = 1;
1812                         diepctl.b.cnak = 1;
1813                         DWC_MODIFY_REG32(&GET_CORE_IF(pcd)->
1814                                          dev_if->in_ep_regs[dwcep->num]->
1815                                          diepctl, 0, diepctl.d32);
1816                 } else {
1817                         /* Setup DMA Descriptor chain for OUT Isoc request */
1818                         for (i = 0; i < ereq->pio_pkt_count; i++) {
1819                                 /* if ((i % (nat + 1)) == 0) */
1820                                 dwcep->xiso_frame_num = (dwcep->xiso_bInterval +
1821                                                          dwcep->
1822                                                          xiso_frame_num) &
1823                                     0x3FFF;
1824                                 dwcep->desc_addr[i].buf =
1825                                     req->dma + ddesc_iso[i].offset;
1826                                 dwcep->desc_addr[i].status.b_iso_out.rxbytes =
1827                                     ddesc_iso[i].length;
1828                                 dwcep->desc_addr[i].status.b_iso_out.framenum =
1829                                     dwcep->xiso_frame_num;
1830                                 dwcep->desc_addr[i].status.b_iso_out.bs =
1831                                     BS_HOST_READY;
1832                                 dwcep->desc_addr[i].status.b_iso_out.rxsts = 0;
1833                                 dwcep->desc_addr[i].status.b_iso_out.sp =
1834                                     (ddesc_iso[i].length %
1835                                      dwcep->maxpacket) ? 1 : 0;
1836                                 dwcep->desc_addr[i].status.b_iso_out.ioc = 0;
1837                                 dwcep->desc_addr[i].status.b_iso_out.pid =
1838                                     nat + 1;
1839                                 dwcep->desc_addr[i].status.b_iso_out.l = 0;
1840
1841                                 /* Process the last descriptor */
1842                                 if (i == ereq->pio_pkt_count - 1) {
1843                                         dwcep->desc_addr[i].status.b_iso_out.
1844                                             ioc = 1;
1845                                         dwcep->desc_addr[i].status.b_iso_out.l =
1846                                             1;
1847                                 }
1848                         }
1849
1850                         /* Setup and start the transfer for this endpoint */
1851                         dwcep->xiso_active_xfers++;
1852                         DWC_WRITE_REG32(&GET_CORE_IF(pcd)->dev_if->
1853                                         out_ep_regs[dwcep->num]->doepdma,
1854                                         dwcep->dma_desc_addr);
1855                         diepctl.d32 = 0;
1856                         diepctl.b.epena = 1;
1857                         diepctl.b.cnak = 1;
1858                         DWC_MODIFY_REG32(&GET_CORE_IF(pcd)->dev_if->
1859                                          out_ep_regs[dwcep->num]->doepctl, 0,
1860                                          diepctl.d32);
1861                 }
1862
1863         } else {
1864                 ep->stopped = 1;
1865         }
1866
1867         return 0;
1868 }
1869
1870 /**
1871  *      - Remove the request from the queue
1872  */
1873 void complete_xiso_ep(dwc_otg_pcd_ep_t *ep)
1874 {
1875         dwc_otg_pcd_request_t *req = NULL;
1876         struct dwc_iso_xreq_port *ereq = NULL;
1877         struct dwc_iso_pkt_desc_port *ddesc_iso = NULL;
1878         dwc_ep_t *dwcep = NULL;
1879         int i;
1880
1881         /* DWC_DEBUG(); */
1882         dwcep = &ep->dwc_ep;
1883
1884         /* Get the first pending request from the queue */
1885         if (!DWC_CIRCLEQ_EMPTY(&ep->queue)) {
1886                 req = DWC_CIRCLEQ_FIRST(&ep->queue);
1887                 if (!req) {
1888                         DWC_PRINTF("complete_ep 0x%p, req = NULL!\n", ep);
1889                         return;
1890                 }
1891                 dwcep->xiso_active_xfers--;
1892                 dwcep->xiso_queued_xfers--;
1893                 /* Remove this request from the queue */
1894                 DWC_CIRCLEQ_REMOVE_INIT(&ep->queue, req, queue_entry);
1895         } else {
1896                 DWC_PRINTF("complete_ep 0x%p, ep->queue empty!\n", ep);
1897                 return;
1898         }
1899
1900         ep->stopped = 1;
1901         ereq = &req->ext_req;
1902         ddesc_iso = ereq->per_io_frame_descs;
1903
1904         if (dwcep->xiso_active_xfers < 0) {
1905                 DWC_WARN("EP#%d (xiso_active_xfers=%d)", dwcep->num,
1906                          dwcep->xiso_active_xfers);
1907         }
1908
1909         /* Fill the Isoc descs of portable extended req from dma descriptors */
1910         for (i = 0; i < ereq->pio_pkt_count; i++) {
1911                 if (dwcep->is_in) {     /* IN endpoints */
1912                         ddesc_iso[i].actual_length = ddesc_iso[i].length -
1913                             dwcep->desc_addr[i].status.b_iso_in.txbytes;
1914                         ddesc_iso[i].status =
1915                             dwcep->desc_addr[i].status.b_iso_in.txsts;
1916                 } else {        /* OUT endpoints */
1917                         ddesc_iso[i].actual_length = ddesc_iso[i].length -
1918                             dwcep->desc_addr[i].status.b_iso_out.rxbytes;
1919                         ddesc_iso[i].status =
1920                             dwcep->desc_addr[i].status.b_iso_out.rxsts;
1921                 }
1922         }
1923
1924         DWC_SPINUNLOCK(ep->pcd->lock);
1925
1926         /* Call the completion function in the non-portable logic */
1927         ep->pcd->fops->xisoc_complete(ep->pcd, ep->priv, req->priv, 0,
1928                                       &req->ext_req);
1929
1930         DWC_SPINLOCK(ep->pcd->lock);
1931
1932         /* Free the request - specific freeing needed for extended request object */
1933         dwc_pcd_xiso_ereq_free(ep, req);
1934
1935         /* Start the next request */
1936         dwc_otg_pcd_xiso_start_next_request(ep->pcd, ep);
1937
1938         return;
1939 }
1940
1941 /**
1942  * Create and initialize the Isoc pkt descriptors of the extended request.
1943  *
1944  */
1945 static int dwc_otg_pcd_xiso_create_pkt_descs(dwc_otg_pcd_request_t *req,
1946                                              void *ereq_nonport,
1947                                              int atomic_alloc)
1948 {
1949         struct dwc_iso_xreq_port *ereq = NULL;
1950         struct dwc_iso_xreq_port *req_mapped = NULL;
1951         struct dwc_iso_pkt_desc_port *ipds = NULL;      /* To be created in this function */
1952         uint32_t pkt_count;
1953         int i;
1954
1955         ereq = &req->ext_req;
1956         req_mapped = (struct dwc_iso_xreq_port *)ereq_nonport;
1957         pkt_count = req_mapped->pio_pkt_count;
1958
1959         /* Create the isoc descs */
1960         if (atomic_alloc) {
1961                 ipds = DWC_ALLOC_ATOMIC(sizeof(*ipds) * pkt_count);
1962         } else {
1963                 ipds = DWC_ALLOC(sizeof(*ipds) * pkt_count);
1964         }
1965
1966         if (!ipds) {
1967                 DWC_ERROR("Failed to allocate isoc descriptors");
1968                 return -DWC_E_NO_MEMORY;
1969         }
1970
1971         /* Initialize the extended request fields */
1972         ereq->per_io_frame_descs = ipds;
1973         ereq->error_count = 0;
1974         ereq->pio_alloc_pkt_count = pkt_count;
1975         ereq->pio_pkt_count = pkt_count;
1976         ereq->tr_sub_flags = req_mapped->tr_sub_flags;
1977
1978         /* Init the Isoc descriptors */
1979         for (i = 0; i < pkt_count; i++) {
1980                 ipds[i].length = req_mapped->per_io_frame_descs[i].length;
1981                 ipds[i].offset = req_mapped->per_io_frame_descs[i].offset;
1982                 ipds[i].status = req_mapped->per_io_frame_descs[i].status;      /* 0 */
1983                 ipds[i].actual_length =
1984                     req_mapped->per_io_frame_descs[i].actual_length;
1985         }
1986
1987         return 0;
1988 }
1989
1990 static void prn_ext_request(struct dwc_iso_xreq_port *ereq)
1991 {
1992         struct dwc_iso_pkt_desc_port *xfd = NULL;
1993         int i;
1994
1995         DWC_DEBUG("per_io_frame_descs=%p", ereq->per_io_frame_descs);
1996         DWC_DEBUG("tr_sub_flags=%d", ereq->tr_sub_flags);
1997         DWC_DEBUG("error_count=%d", ereq->error_count);
1998         DWC_DEBUG("pio_alloc_pkt_count=%d", ereq->pio_alloc_pkt_count);
1999         DWC_DEBUG("pio_pkt_count=%d", ereq->pio_pkt_count);
2000         DWC_DEBUG("res=%d", ereq->res);
2001
2002         for (i = 0; i < ereq->pio_pkt_count; i++) {
2003                 xfd = &ereq->per_io_frame_descs[0];
2004                 DWC_DEBUG("FD #%d", i);
2005
2006                 DWC_DEBUG("xfd->actual_length=%d", xfd->actual_length);
2007                 DWC_DEBUG("xfd->length=%d", xfd->length);
2008                 DWC_DEBUG("xfd->offset=%d", xfd->offset);
2009                 DWC_DEBUG("xfd->status=%d", xfd->status);
2010         }
2011 }
2012
2013 /**
2014  *
2015  */
2016 int dwc_otg_pcd_xiso_ep_queue(dwc_otg_pcd_t *pcd, void *ep_handle,
2017                               uint8_t *buf, dwc_dma_t dma_buf, uint32_t buflen,
2018                               int zero, void *req_handle, int atomic_alloc,
2019                               void *ereq_nonport)
2020 {
2021         dwc_otg_pcd_request_t *req = NULL;
2022         dwc_otg_pcd_ep_t *ep;
2023         dwc_irqflags_t flags;
2024         int res;
2025
2026         ep = get_ep_from_handle(pcd, ep_handle);
2027         if (!ep) {
2028                 DWC_WARN("bad ep\n");
2029                 return -DWC_E_INVALID;
2030         }
2031
2032         /* We support this extension only for DDMA mode */
2033         if (ep->dwc_ep.type == DWC_OTG_EP_TYPE_ISOC)
2034                 if (!GET_CORE_IF(pcd)->dma_desc_enable)
2035                         return -DWC_E_INVALID;
2036
2037         /* Create a dwc_otg_pcd_request_t object */
2038         if (atomic_alloc) {
2039                 req = DWC_ALLOC_ATOMIC(sizeof(*req));
2040         } else {
2041                 req = DWC_ALLOC(sizeof(*req));
2042         }
2043
2044         if (!req) {
2045                 return -DWC_E_NO_MEMORY;
2046         }
2047
2048         /* Create the Isoc descs for this request which shall be the exact match
2049          * of the structure sent to us from the non-portable logic */
2050         res =
2051             dwc_otg_pcd_xiso_create_pkt_descs(req, ereq_nonport, atomic_alloc);
2052         if (res) {
2053                 DWC_WARN("Failed to init the Isoc descriptors");
2054                 DWC_FREE(req);
2055                 return res;
2056         }
2057
2058         DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
2059
2060         DWC_CIRCLEQ_INIT_ENTRY(req, queue_entry);
2061         req->buf = buf;
2062         req->dma = dma_buf;
2063         req->length = buflen;
2064         req->sent_zlp = zero;
2065         req->priv = req_handle;
2066
2067         /* DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags); */
2068         ep->dwc_ep.dma_addr = dma_buf;
2069         ep->dwc_ep.start_xfer_buff = buf;
2070         ep->dwc_ep.xfer_buff = buf;
2071         ep->dwc_ep.xfer_len = 0;
2072         ep->dwc_ep.xfer_count = 0;
2073         ep->dwc_ep.sent_zlp = 0;
2074         ep->dwc_ep.total_len = buflen;
2075
2076         /* Add this request to the tail */
2077         DWC_CIRCLEQ_INSERT_TAIL(&ep->queue, req, queue_entry);
2078         ep->dwc_ep.xiso_queued_xfers++;
2079
2080         /* DWC_DEBUG("CP_0"); */
2081         /* DWC_DEBUG("req->ext_req.tr_sub_flags=%d", req->ext_req.tr_sub_flags); */
2082         /* prn_ext_request((struct dwc_iso_xreq_port *) ereq_nonport); */
2083         /* prn_ext_request(&req->ext_req); */
2084
2085         /* DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags); */
2086
2087         /* If the req->status == ASAP  then check if there is any active transfer
2088          * for this endpoint. If no active transfers, then get the first entry
2089          * from the queue and start that transfer
2090          */
2091         if (req->ext_req.tr_sub_flags == DWC_EREQ_TF_ASAP) {
2092                 res = dwc_otg_pcd_xiso_start_next_request(pcd, ep);
2093                 if (res) {
2094                         DWC_WARN("Failed to start the next Isoc transfer");
2095                         DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2096                         DWC_FREE(req);
2097                         return res;
2098                 }
2099         }
2100
2101         DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2102         return 0;
2103 }
2104
2105 #endif
2106 /* END ifdef DWC_UTE_PER_IO ***************************************************/
2107 int dwc_otg_pcd_ep_queue(dwc_otg_pcd_t *pcd, void *ep_handle,
2108                          uint8_t *buf, dwc_dma_t dma_buf, uint32_t buflen,
2109                          int zero, void *req_handle, int atomic_alloc)
2110 {
2111         dwc_irqflags_t flags;
2112         dwc_otg_pcd_request_t *req;
2113         dwc_otg_pcd_ep_t *ep;
2114         uint32_t max_transfer;
2115
2116         ep = get_ep_from_handle(pcd, ep_handle);
2117         if (!ep || (!ep->desc && ep->dwc_ep.num != 0)) {
2118                 DWC_WARN("bad ep\n");
2119                 return -DWC_E_INVALID;
2120         }
2121
2122         if (atomic_alloc) {
2123                 req = DWC_ALLOC_ATOMIC(sizeof(*req));
2124         } else {
2125                 req = DWC_ALLOC(sizeof(*req));
2126         }
2127
2128         if (!req) {
2129                 return -DWC_E_NO_MEMORY;
2130         }
2131         DWC_CIRCLEQ_INIT_ENTRY(req, queue_entry);
2132         if (!GET_CORE_IF(pcd)->core_params->opt) {
2133                 if (ep->dwc_ep.num != 0) {
2134                         DWC_ERROR("queue req %p, len %d buf %p\n",
2135                                   req_handle, buflen, buf);
2136                 }
2137         }
2138
2139         req->buf = buf;
2140         req->dma = dma_buf;
2141         req->length = buflen;
2142         req->sent_zlp = zero;
2143         req->priv = req_handle;
2144         req->dw_align_buf = NULL;
2145         if ((dma_buf & 0x3) && GET_CORE_IF(pcd)->dma_enable
2146             && !GET_CORE_IF(pcd)->dma_desc_enable)
2147                 req->dw_align_buf = DWC_DMA_ALLOC(buflen,
2148                                                   &req->dw_align_buf_dma);
2149         DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
2150
2151         /*
2152          * After adding request to the queue for IN ISOC wait for In Token Received
2153          * when TX FIFO is empty interrupt and for OUT ISOC wait for OUT Token
2154          * Received when EP is disabled interrupt to obtain starting microframe
2155          * (odd/even) start transfer
2156          */
2157         if (ep->dwc_ep.type == DWC_OTG_EP_TYPE_ISOC) {
2158                 if (req != 0) {
2159                         depctl_data_t depctl = {.d32 =
2160                                     DWC_READ_REG32(&pcd->core_if->
2161                                                    dev_if->in_ep_regs[ep->
2162                                                                       dwc_ep.
2163                                                                       num]->diepctl)
2164                         };
2165                         ++pcd->request_pending;
2166
2167                         DWC_CIRCLEQ_INSERT_TAIL(&ep->queue, req, queue_entry);
2168                         if (ep->dwc_ep.is_in) {
2169                                 depctl.b.cnak = 1;
2170                                 DWC_WRITE_REG32(&pcd->core_if->
2171                                                 dev_if->in_ep_regs[ep->dwc_ep.
2172                                                                    num]->diepctl,
2173                                                 depctl.d32);
2174                         }
2175
2176                         DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2177                 }
2178                 return 0;
2179         }
2180
2181         /*
2182          * For EP0 IN without premature status, zlp is required?
2183          */
2184         if (ep->dwc_ep.num == 0 && ep->dwc_ep.is_in) {
2185                 DWC_DEBUGPL(DBG_PCDV, "%d-OUT ZLP\n", ep->dwc_ep.num);
2186                 /* _req->zero = 1; */
2187         }
2188
2189         /* Start the transfer */
2190         if (DWC_CIRCLEQ_EMPTY(&ep->queue) && !ep->stopped) {
2191                 /* EP0 Transfer? */
2192                 if (ep->dwc_ep.num == 0) {
2193                         switch (pcd->ep0state) {
2194                         case EP0_IN_DATA_PHASE:
2195                                 DWC_DEBUGPL(DBG_PCD,
2196                                             "%s ep0: EP0_IN_DATA_PHASE\n",
2197                                             __func__);
2198                                 break;
2199
2200                         case EP0_OUT_DATA_PHASE:
2201                                 DWC_DEBUGPL(DBG_PCD,
2202                                             "%s ep0: EP0_OUT_DATA_PHASE\n",
2203                                             __func__);
2204                                 if (pcd->request_config) {
2205                                         /* Complete STATUS PHASE */
2206                                         ep->dwc_ep.is_in = 1;
2207                                         pcd->ep0state = EP0_IN_STATUS_PHASE;
2208                                 }
2209                                 break;
2210
2211                         case EP0_IN_STATUS_PHASE:
2212                                 DWC_DEBUGPL(DBG_PCD,
2213                                             "%s ep0: EP0_IN_STATUS_PHASE\n",
2214                                             __func__);
2215                                 break;
2216
2217                         default:
2218                                 DWC_DEBUGPL(DBG_ANY, "ep0: odd state %d\n",
2219                                             pcd->ep0state);
2220                                 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2221                                 return -DWC_E_SHUTDOWN;
2222                         }
2223
2224                         ep->dwc_ep.dma_addr = dma_buf;
2225                         ep->dwc_ep.start_xfer_buff = buf;
2226                         ep->dwc_ep.xfer_buff = buf;
2227                         ep->dwc_ep.xfer_len = buflen;
2228                         ep->dwc_ep.xfer_count = 0;
2229                         ep->dwc_ep.sent_zlp = 0;
2230                         ep->dwc_ep.total_len = ep->dwc_ep.xfer_len;
2231
2232                         if (zero) {
2233                                 if ((ep->dwc_ep.xfer_len %
2234                                      ep->dwc_ep.maxpacket == 0)
2235                                     && (ep->dwc_ep.xfer_len != 0)) {
2236                                         ep->dwc_ep.sent_zlp = 1;
2237                                 }
2238
2239                         }
2240
2241                         dwc_otg_ep0_start_transfer(GET_CORE_IF(pcd),
2242                                                    &ep->dwc_ep);
2243                 }               /* non-ep0 endpoints */
2244                 else {
2245 #ifdef DWC_UTE_CFI
2246                         if (ep->dwc_ep.buff_mode != BM_STANDARD) {
2247                                 /* store the request length */
2248                                 ep->dwc_ep.cfi_req_len = buflen;
2249                                 pcd->cfi->ops.build_descriptors(pcd->cfi, pcd,
2250                                                                 ep, req);
2251                         } else {
2252 #endif
2253                                 max_transfer =
2254                                     GET_CORE_IF(ep->pcd)->
2255                                     core_params->max_transfer_size;
2256
2257                                 /* Setup and start the Transfer */
2258                                 if (req->dw_align_buf) {
2259                                         if (ep->dwc_ep.is_in)
2260                                                 dwc_memcpy(req->dw_align_buf,
2261                                                            buf, buflen);
2262                                         ep->dwc_ep.dma_addr =
2263                                             req->dw_align_buf_dma;
2264                                         ep->dwc_ep.start_xfer_buff =
2265                                             req->dw_align_buf;
2266                                         ep->dwc_ep.xfer_buff =
2267                                             req->dw_align_buf;
2268                                 } else {
2269                                         ep->dwc_ep.dma_addr = dma_buf;
2270                                         ep->dwc_ep.start_xfer_buff = buf;
2271                                         ep->dwc_ep.xfer_buff = buf;
2272                                 }
2273                                 ep->dwc_ep.xfer_len = 0;
2274                                 ep->dwc_ep.xfer_count = 0;
2275                                 ep->dwc_ep.sent_zlp = 0;
2276                                 ep->dwc_ep.total_len = buflen;
2277
2278                                 ep->dwc_ep.maxxfer = max_transfer;
2279                                 if (GET_CORE_IF(pcd)->dma_desc_enable) {
2280                                         uint32_t out_max_xfer =
2281                                             DDMA_MAX_TRANSFER_SIZE -
2282                                             (DDMA_MAX_TRANSFER_SIZE % 4);
2283                                         if (ep->dwc_ep.is_in) {
2284                                                 if (ep->dwc_ep.maxxfer >
2285                                                     DDMA_MAX_TRANSFER_SIZE) {
2286                                                         ep->dwc_ep.maxxfer =
2287                                                             DDMA_MAX_TRANSFER_SIZE;
2288                                                 }
2289                                         } else {
2290                                                 if (ep->dwc_ep.maxxfer >
2291                                                     out_max_xfer) {
2292                                                         ep->dwc_ep.maxxfer =
2293                                                             out_max_xfer;
2294                                                 }
2295                                         }
2296                                 }
2297                                 if (ep->dwc_ep.maxxfer < ep->dwc_ep.total_len) {
2298                                         ep->dwc_ep.maxxfer -=
2299                                             (ep->dwc_ep.maxxfer %
2300                                              ep->dwc_ep.maxpacket);
2301                                 }
2302
2303                                 if (zero) {
2304                                         if ((ep->dwc_ep.total_len %
2305                                              ep->dwc_ep.maxpacket == 0)
2306                                             && (ep->dwc_ep.total_len != 0)) {
2307                                                 ep->dwc_ep.sent_zlp = 1;
2308                                         }
2309                                 }
2310 #ifdef DWC_UTE_CFI
2311                         }
2312 #endif
2313                         dwc_otg_ep_start_transfer(GET_CORE_IF(pcd),
2314                                                   &ep->dwc_ep);
2315                 }
2316         }
2317
2318         if (req != 0) {
2319                 ++pcd->request_pending;
2320                 DWC_CIRCLEQ_INSERT_TAIL(&ep->queue, req, queue_entry);
2321                 if (ep->dwc_ep.is_in && ep->stopped
2322                     && !(GET_CORE_IF(pcd)->dma_enable)) {
2323                         /** @todo NGS Create a function for this. */
2324                         diepmsk_data_t diepmsk = {.d32 = 0 };
2325                         diepmsk.b.intktxfemp = 1;
2326                         if (GET_CORE_IF(pcd)->multiproc_int_enable) {
2327                                 DWC_MODIFY_REG32(&GET_CORE_IF(pcd)->dev_if->
2328                                                  dev_global_regs->
2329                                                  diepeachintmsk[ep->dwc_ep.num],
2330                                                  0, diepmsk.d32);
2331                         } else {
2332                                 DWC_MODIFY_REG32(&GET_CORE_IF(pcd)->dev_if->
2333                                                  dev_global_regs->diepmsk, 0,
2334                                                  diepmsk.d32);
2335                         }
2336
2337                 }
2338         }
2339         DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2340
2341         return 0;
2342 }
2343
2344 int dwc_otg_pcd_ep_dequeue(dwc_otg_pcd_t *pcd, void *ep_handle,
2345                            void *req_handle)
2346 {
2347         dwc_irqflags_t flags;
2348         dwc_otg_pcd_request_t *req;
2349         dwc_otg_pcd_ep_t *ep;
2350
2351         ep = get_ep_from_handle(pcd, ep_handle);
2352         if (!ep || (!ep->desc && ep->dwc_ep.num != 0)) {
2353                 DWC_WARN("bad argument\n");
2354                 return -DWC_E_INVALID;
2355         }
2356
2357         DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
2358
2359         /* make sure it's actually queued on this endpoint */
2360         DWC_CIRCLEQ_FOREACH(req, &ep->queue, queue_entry) {
2361                 if (req->priv == (void *)req_handle) {
2362                         break;
2363                 }
2364         }
2365
2366         if (req->priv != (void *)req_handle) {
2367                 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2368                 return -DWC_E_INVALID;
2369         }
2370
2371         if (!DWC_CIRCLEQ_EMPTY_ENTRY(req, queue_entry)) {
2372                 dwc_otg_request_done(ep, req, -DWC_E_RESTART);
2373         } else {
2374                 req = NULL;
2375         }
2376
2377         DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2378
2379         return req ? 0 : -DWC_E_SHUTDOWN;
2380
2381 }
2382
2383 int dwc_otg_pcd_ep_halt(dwc_otg_pcd_t *pcd, void *ep_handle, int value)
2384 {
2385         dwc_otg_pcd_ep_t *ep;
2386         dwc_irqflags_t flags;
2387         int retval = 0;
2388
2389         ep = get_ep_from_handle(pcd, ep_handle);
2390
2391         if (!ep || (!ep->desc && ep != &pcd->ep0) ||
2392             (ep->desc && (ep->desc->bmAttributes == UE_ISOCHRONOUS))) {
2393                 DWC_WARN("%s, bad ep\n", __func__);
2394                 return -DWC_E_INVALID;
2395         }
2396
2397         DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
2398         if (!DWC_CIRCLEQ_EMPTY(&ep->queue)) {
2399                 DWC_WARN("%d %s XFer In process\n", ep->dwc_ep.num,
2400                          ep->dwc_ep.is_in ? "IN" : "OUT");
2401                 retval = -DWC_E_AGAIN;
2402         } else if (value == 0) {
2403                 ep->dwc_ep.stall_clear_flag = 0;
2404                 dwc_otg_ep_clear_stall(GET_CORE_IF(pcd), &ep->dwc_ep);
2405         } else if (value == 1) {
2406 stall:
2407                 if (ep->dwc_ep.is_in == 1 && GET_CORE_IF(pcd)->dma_desc_enable) {
2408                         dtxfsts_data_t txstatus;
2409                         fifosize_data_t txfifosize;
2410
2411                         txfifosize.d32 =
2412                             DWC_READ_REG32(&GET_CORE_IF(pcd)->core_global_regs->
2413                                            dtxfsiz[ep->dwc_ep.tx_fifo_num]);
2414                         txstatus.d32 =
2415                             DWC_READ_REG32(&GET_CORE_IF(pcd)->dev_if->
2416                                            in_ep_regs[ep->dwc_ep.num]->dtxfsts);
2417
2418                         if (txstatus.b.txfspcavail < txfifosize.b.depth) {
2419                                 DWC_WARN("%s() Data In Tx Fifo\n", __func__);
2420                                 retval = -DWC_E_AGAIN;
2421                         } else {
2422                                 if (ep->dwc_ep.num == 0) {
2423                                         pcd->ep0state = EP0_STALL;
2424                                 }
2425
2426                                 ep->stopped = 1;
2427                                 dwc_otg_ep_set_stall(GET_CORE_IF(pcd),
2428                                                      &ep->dwc_ep);
2429                         }
2430                 } else {
2431                         if (ep->dwc_ep.num == 0) {
2432                                 pcd->ep0state = EP0_STALL;
2433                         }
2434
2435                         ep->stopped = 1;
2436                         dwc_otg_ep_set_stall(GET_CORE_IF(pcd), &ep->dwc_ep);
2437                 }
2438         } else if (value == 2) {
2439                 ep->dwc_ep.stall_clear_flag = 0;
2440         } else if (value == 3) {
2441                 ep->dwc_ep.stall_clear_flag = 1;
2442                 goto stall;
2443         }
2444
2445         DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2446
2447         return retval;
2448 }
2449
2450 /**
2451  * This function initiates remote wakeup of the host from suspend state.
2452  */
2453 void dwc_otg_pcd_rem_wkup_from_suspend(dwc_otg_pcd_t *pcd, int set)
2454 {
2455         dctl_data_t dctl = { 0 };
2456         dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
2457         dsts_data_t dsts;
2458
2459         dsts.d32 = DWC_READ_REG32(&core_if->dev_if->dev_global_regs->dsts);
2460         if (!dsts.b.suspsts) {
2461                 DWC_WARN("Remote wakeup while is not in suspend state\n");
2462         }
2463         /* Check if DEVICE_REMOTE_WAKEUP feature enabled */
2464         if (pcd->remote_wakeup_enable) {
2465                 if (set) {
2466
2467                         if (core_if->adp_enable) {
2468                                 gpwrdn_data_t gpwrdn;
2469
2470                                 dwc_otg_adp_probe_stop(core_if);
2471
2472                                 /* Mask SRP detected interrupt from Power Down Logic */
2473                                 gpwrdn.d32 = 0;
2474                                 gpwrdn.b.srp_det_msk = 1;
2475                                 DWC_MODIFY_REG32(&core_if->core_global_regs->
2476                                                  gpwrdn, gpwrdn.d32, 0);
2477
2478                                 /* Disable Power Down Logic */
2479                                 gpwrdn.d32 = 0;
2480                                 gpwrdn.b.pmuactv = 1;
2481                                 DWC_MODIFY_REG32(&core_if->core_global_regs->
2482                                                  gpwrdn, gpwrdn.d32, 0);
2483
2484                                 /*
2485                                  * Initialize the Core for Device mode.
2486                                  */
2487                                 core_if->op_state = B_PERIPHERAL;
2488                                 dwc_otg_core_init(core_if);
2489                                 dwc_otg_enable_global_interrupts(core_if);
2490                                 cil_pcd_start(core_if);
2491
2492                                 dwc_otg_initiate_srp(core_if);
2493                         }
2494
2495                         dctl.b.rmtwkupsig = 1;
2496                         DWC_MODIFY_REG32(&core_if->dev_if->
2497                                          dev_global_regs->dctl, 0, dctl.d32);
2498                         DWC_DEBUGPL(DBG_PCD, "Set Remote Wakeup\n");
2499
2500                         dwc_mdelay(2);
2501                         DWC_MODIFY_REG32(&core_if->dev_if->
2502                                          dev_global_regs->dctl, dctl.d32, 0);
2503                         DWC_DEBUGPL(DBG_PCD, "Clear Remote Wakeup\n");
2504                 }
2505         } else {
2506                 DWC_DEBUGPL(DBG_PCD, "Remote Wakeup is disabled\n");
2507         }
2508 }
2509
2510 #ifdef CONFIG_USB_DWC_OTG_LPM
2511 /**
2512  * This function initiates remote wakeup of the host from L1 sleep state.
2513  */
2514 void dwc_otg_pcd_rem_wkup_from_sleep(dwc_otg_pcd_t *pcd, int set)
2515 {
2516         glpmcfg_data_t lpmcfg;
2517         pcgcctl_data_t pcgcctl = {.d32 = 0 };
2518
2519         dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
2520
2521         lpmcfg.d32 = DWC_READ_REG32(&core_if->core_global_regs->glpmcfg);
2522
2523         /* Check if we are in L1 state */
2524         if (!lpmcfg.b.prt_sleep_sts) {
2525                 DWC_DEBUGPL(DBG_PCD, "Device is not in sleep state\n");
2526                 return;
2527         }
2528
2529         /* Check if host allows remote wakeup */
2530         if (!lpmcfg.b.rem_wkup_en) {
2531                 DWC_DEBUGPL(DBG_PCD, "Host does not allow remote wakeup\n");
2532                 return;
2533         }
2534
2535         /* Check if Resume OK */
2536         if (!lpmcfg.b.sleep_state_resumeok) {
2537                 DWC_DEBUGPL(DBG_PCD, "Sleep state resume is not OK\n");
2538                 return;
2539         }
2540
2541         lpmcfg.d32 = DWC_READ_REG32(&core_if->core_global_regs->glpmcfg);
2542         lpmcfg.b.en_utmi_sleep = 0;
2543         lpmcfg.b.hird_thres &= (~(1 << 4));
2544
2545         /* Clear Enbl_L1Gating bit. */
2546         pcgcctl.b.enbl_sleep_gating = 1;
2547         DWC_MODIFY_REG32(core_if->pcgcctl, pcgcctl.d32, 0);
2548
2549         DWC_WRITE_REG32(&core_if->core_global_regs->glpmcfg, lpmcfg.d32);
2550
2551         if (set) {
2552                 dctl_data_t dctl = {.d32 = 0 };
2553                 dctl.b.rmtwkupsig = 1;
2554                 /* Set RmtWkUpSig bit to start remote wakup signaling.
2555                  * Hardware will automatically clear this bit.
2556                  */
2557                 DWC_MODIFY_REG32(&core_if->dev_if->dev_global_regs->dctl,
2558                                  0, dctl.d32);
2559                 DWC_DEBUGPL(DBG_PCD, "Set Remote Wakeup\n");
2560         }
2561
2562 }
2563 #endif
2564
2565 /**
2566  * Performs remote wakeup.
2567  */
2568 void dwc_otg_pcd_remote_wakeup(dwc_otg_pcd_t *pcd, int set)
2569 {
2570         dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
2571         dwc_irqflags_t flags;
2572         if (dwc_otg_is_device_mode(core_if)) {
2573                 DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
2574 #ifdef CONFIG_USB_DWC_OTG_LPM
2575                 if (core_if->lx_state == DWC_OTG_L1) {
2576                         dwc_otg_pcd_rem_wkup_from_sleep(pcd, set);
2577                 } else {
2578 #endif
2579                         dwc_otg_pcd_rem_wkup_from_suspend(pcd, set);
2580 #ifdef CONFIG_USB_DWC_OTG_LPM
2581                 }
2582 #endif
2583                 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2584         }
2585         return;
2586 }
2587
2588 void dwc_otg_pcd_disconnect_us(dwc_otg_pcd_t *pcd, int no_of_usecs)
2589 {
2590         dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
2591         dctl_data_t dctl = { 0 };
2592
2593         if (dwc_otg_is_device_mode(core_if)) {
2594                 dctl.b.sftdiscon = 1;
2595                 DWC_PRINTF("Soft disconnect for %d useconds\n", no_of_usecs);
2596                 DWC_MODIFY_REG32(&core_if->dev_if->dev_global_regs->dctl, 0,
2597                                  dctl.d32);
2598                 dwc_udelay(no_of_usecs);
2599                 DWC_MODIFY_REG32(&core_if->dev_if->dev_global_regs->dctl,
2600                                  dctl.d32, 0);
2601
2602         } else {
2603                 DWC_PRINTF("NOT SUPPORTED IN HOST MODE\n");
2604         }
2605         return;
2606
2607 }
2608
2609 int dwc_otg_pcd_wakeup(dwc_otg_pcd_t *pcd)
2610 {
2611         dsts_data_t dsts;
2612         gotgctl_data_t gotgctl;
2613
2614         /*
2615          * This function starts the Protocol if no session is in progress. If
2616          * a session is already in progress, but the device is suspended,
2617          * remote wakeup signaling is started.
2618          */
2619
2620         /* Check if valid session */
2621         gotgctl.d32 =
2622             DWC_READ_REG32(&(GET_CORE_IF(pcd)->core_global_regs->gotgctl));
2623         if (gotgctl.b.bsesvld) {
2624                 /* Check if suspend state */
2625                 dsts.d32 =
2626                     DWC_READ_REG32(&
2627                                    (GET_CORE_IF(pcd)->dev_if->dev_global_regs->
2628                                     dsts));
2629                 if (dsts.b.suspsts) {
2630                         dwc_otg_pcd_remote_wakeup(pcd, 1);
2631                 }
2632         } else {
2633                 dwc_otg_pcd_initiate_srp(pcd);
2634         }
2635
2636         return 0;
2637
2638 }
2639
2640 /**
2641  * Implement Soft-Connect and Soft-Disconnect function
2642  */
2643
2644 void dwc_otg_pcd_pullup_enable(dwc_otg_pcd_t *pcd)
2645 {
2646         if (pcd)
2647         DWC_MODIFY_REG32(&(GET_CORE_IF(pcd)->dev_if->dev_global_regs->dctl), 2,
2648                          0);
2649 }
2650
2651 void dwc_otg_pcd_pullup_disable(dwc_otg_pcd_t *pcd)
2652 {
2653         if (pcd)
2654         DWC_MODIFY_REG32(&(GET_CORE_IF(pcd)->dev_if->dev_global_regs->dctl), 0,
2655                          2);
2656 }
2657
2658 void dwc_pcd_reset(dwc_otg_pcd_t *pcd)
2659 {
2660         dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
2661         dwc_otg_disable_global_interrupts(core_if);
2662         dwc_otg_core_init(core_if);
2663         dwc_otg_pcd_reinit(pcd);
2664         dwc_otg_core_dev_init(core_if);
2665         dwc_otg_enable_global_interrupts(core_if);
2666 }
2667
2668 /**
2669  * Start the SRP timer to detect when the SRP does not complete within
2670  * 6 seconds.
2671  *
2672  * @param pcd the pcd structure.
2673  */
2674 void dwc_otg_pcd_initiate_srp(dwc_otg_pcd_t *pcd)
2675 {
2676         dwc_irqflags_t flags;
2677         DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
2678         dwc_otg_initiate_srp(GET_CORE_IF(pcd));
2679         DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2680 }
2681
2682 int dwc_otg_pcd_get_frame_number(dwc_otg_pcd_t *pcd)
2683 {
2684         return dwc_otg_get_frame_number(GET_CORE_IF(pcd));
2685 }
2686
2687 int dwc_otg_pcd_is_lpm_enabled(dwc_otg_pcd_t *pcd)
2688 {
2689         return GET_CORE_IF(pcd)->core_params->lpm_enable;
2690 }
2691
2692 int dwc_otg_pcd_is_besl_enabled(dwc_otg_pcd_t *pcd)
2693 {
2694         return GET_CORE_IF(pcd)->core_params->besl_enable;
2695 }
2696
2697 int dwc_otg_pcd_get_param_baseline_besl(dwc_otg_pcd_t *pcd)
2698 {
2699         return GET_CORE_IF(pcd)->core_params->baseline_besl;
2700 }
2701
2702 int dwc_otg_pcd_get_param_deep_besl(dwc_otg_pcd_t *pcd)
2703 {
2704         return GET_CORE_IF(pcd)->core_params->deep_besl;
2705 }
2706
2707 uint32_t get_b_hnp_enable(dwc_otg_pcd_t *pcd)
2708 {
2709         return pcd->b_hnp_enable;
2710 }
2711
2712 uint32_t get_a_hnp_support(dwc_otg_pcd_t *pcd)
2713 {
2714         return pcd->a_hnp_support;
2715 }
2716
2717 uint32_t get_a_alt_hnp_support(dwc_otg_pcd_t *pcd)
2718 {
2719         return pcd->a_alt_hnp_support;
2720 }
2721
2722 int dwc_otg_pcd_get_rmwkup_enable(dwc_otg_pcd_t *pcd)
2723 {
2724         return pcd->remote_wakeup_enable;
2725 }
2726
2727 #endif /* DWC_HOST_ONLY */